Merge commit 'upstream-main' into master

Bug: 261600888
Test: none, build files to be updated in follow up cl
Change-Id: Ib520938290c6bbdee4a9f73b6419b6c947a96ec4
This commit is contained in:
Jorge E. Moreira
2022-12-06 16:34:41 -08:00
5393 changed files with 541103 additions and 211666 deletions

View File

@ -19,3 +19,6 @@ BreakBeforeTernaryOperators: false
IndentWrappedFunctionNames: true
ContinuationIndentWidth: 4
ObjCSpaceBeforeProtocolList: true
---
Language: Cpp
IncludeBlocks: Regroup

1
.gitignore vendored
View File

@ -26,6 +26,7 @@
*_proto_cpp.xml
*~
.*.sw?
.cache
.cipd
.clangd
.classpath

50
.gn
View File

@ -11,37 +11,24 @@ import("//build/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"
# The secondary source root is a parallel directory tree where
# GN build files are placed when they can not be placed directly
# in the source tree, e.g. for third party source trees.
secondary_source = "//build/secondary/"
# These are the targets to check headers for by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will have
# These are the targets to skip header checking by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will not have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
check_targets = [
":webrtc_common",
"//api/*",
"//audio/*",
"//backup/*",
"//call/*",
"//common_audio/*",
"//common_video/*",
"//examples/*",
"//logging/*",
"//media/*",
"//modules/*",
"//p2p/*",
"//pc/*",
"//rtc_base/*",
"//rtc_tools/*",
"//sdk/*",
"//stats/*",
"//system_wrappers/*",
"//test/*",
"//video/*",
"//third_party/libyuv/*",
no_check_targets = [
"//third_party/icu/*",
# TODO(crbug.com/1151236) Remove once fixed.
"//base/allocator/partition_allocator:partition_alloc",
]
# These are the list of GN files that run exec_script. This whitelist exists
@ -62,7 +49,7 @@ default_args = {
mac_sdk_min = "10.12"
ios_deployment_target = "10.0"
ios_deployment_target = "12.0"
# The SDK API level, in contrast, is set by build/android/AndroidManifest.xml.
android32_ndk_api_level = 16
@ -75,4 +62,17 @@ default_args = {
enable_libaom = true
gtest_enable_absl_printers = true
# Differently from Chromium, WebRTC still support SDK 21.
default_min_sdk_version = 21
# Prevent jsoncpp to pass -Wno-deprecated-declarations to users
jsoncpp_no_deprecated_declarations = false
# Fixes the abi-revision issue.
# TODO(https://bugs.webrtc.org/14437): Remove this section if general
# Chromium fix resolves the problem.
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 9
}

1
.mailmap Normal file
View File

@ -0,0 +1 @@
Tommi <tommi@webrtc.org> Tomas Gunnarsson <tommi@webrtc.org>

4
.style.yapf Normal file
View File

@ -0,0 +1,4 @@
[style]
based_on_style = pep8
indent_width = 2
column_limit = 80

View File

@ -9,7 +9,7 @@
# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
# this never requires the end-user machine to have a working python extension
# compilation environment. All of these packages are built using:
# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/
#
# All python scripts in the repo share this same spec, to avoid dependency
# fragmentation.
@ -20,7 +20,7 @@
# vpython path/to/script.py some --arguments
#
# Read more about `vpython` and how to modify this file here:
# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md
python_version: "2.7"
@ -31,7 +31,7 @@ wheel: <
version: "version:5.2.2"
>
# Used by tools_webrtc/perf/webrtc_dashboard_upload.py.
# Used by tools_webrtc/perf/process_perf_results.py.
wheel: <
name: "infra/python/wheels/httplib2-py2_py3"
version: "version:0.10.3"
@ -52,7 +52,7 @@ wheel: <
wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.10.0"
version: "version:1.15.0"
>
wheel: <
name: "infra/python/wheels/pbr-py2_py3"
@ -66,3 +66,11 @@ wheel: <
name: "infra/python/wheels/mock-py2_py3"
version: "version:2.0.0"
>
wheel: <
name: "infra/python/wheels/protobuf-py2_py3"
version: "version:3.13.0"
>
wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>

88
.vpython3 Normal file
View File

@ -0,0 +1,88 @@
# This is a vpython "spec" file.
#
# It describes patterns for python wheel dependencies of the python scripts in
# the chromium repo, particularly for dependencies that have compiled components
# (since pure-python dependencies can be easily vendored into third_party).
#
# When vpython is invoked, it finds this file and builds a python VirtualEnv,
# containing all of the dependencies described in this file, fetching them from
# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
# this never requires the end-user machine to have a working python extension
# compilation environment. All of these packages are built using:
# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/
#
# All python scripts in the repo share this same spec, to avoid dependency
# fragmentation.
#
# If you have depot_tools installed in your $PATH, you can invoke python scripts
# in this repo by running them as you normally would run them, except
# substituting `vpython` instead of `python` on the command line, e.g.:
# vpython path/to/script.py some --arguments
#
# Read more about `vpython` and how to modify this file here:
# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md
python_version: "3.8"
# Used by:
# third_party/catapult
wheel: <
name: "infra/python/wheels/psutil/${vpython_platform}"
version: "version:5.8.0.chromium.2"
>
# Used by tools_webrtc/perf/process_perf_results.py.
wheel: <
name: "infra/python/wheels/httplib2-py3"
version: "version:0.19.1"
>
wheel: <
name: "infra/python/wheels/pyparsing-py2_py3"
version: "version:2.4.7"
>
# Used by:
# build/toolchain/win
wheel: <
name: "infra/python/wheels/pywin32/${vpython_platform}"
version: "version:300"
match_tag: <
platform: "win32"
>
match_tag: <
platform: "win_amd64"
>
>
# GRPC used by iOS test.
wheel: <
name: "infra/python/wheels/grpcio/${vpython_platform}"
version: "version:1.44.0"
>
wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.15.0"
>
wheel: <
name: "infra/python/wheels/pbr-py2_py3"
version: "version:3.0.0"
>
wheel: <
name: "infra/python/wheels/funcsigs-py2_py3"
version: "version:1.0.2"
>
wheel: <
name: "infra/python/wheels/mock-py2_py3"
version: "version:2.0.0"
>
wheel: <
name: "infra/python/wheels/protobuf-py3"
version: "version:3.20.0"
>
wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>

125
AUTHORS
View File

@ -1,117 +1,167 @@
# Names should be added to this file like so:
# Name or Organization <email address>
# Names should be added to this file with this pattern:
#
# For individuals:
# Name <email address>
#
# For organizations:
# Organization <fnmatch pattern>
#
# See python fnmatch module documentation for more information.
#
# Please keep the list sorted.
# BEGIN individuals section.
Aaron Clauson <aaron@sipsorcery.com>
Adam Fedor <adam.fedor@gmail.com>
Akshay Shah <meetakshay99@gmail.com>
Alex Henrie <alexhenrie24@gmail.com>
Alexander Brauckmann <a.brauckmann@gmail.com>
Alexandre Gouaillard <agouaillard@gmail.com>
Alex Henrie <alexhenrie24@gmail.com>
Andrew MacDonald <andrew@webrtc.org>
Andrey Efremov <yoklmnprst@ya.ru>
Andrew Johnson <ajohnson@draster.com>
Anil Kumar <an1kumar@gmail.com>
Ben Strong <bstrong@gmail.com>
Berthold Herrmann <bertholdherrmann08@googlemail.com>
Bob Withers <bwit@pobox.com>
Brett Hebert <brett@quebic.com>
Brett Hebert <hebert.brett@pm.me>
Bridger Maxwell <bridgeyman@gmail.com>
Chris Tserng <tserng@amazon.com>
Bruno Pitrus <brunopitrus@hotmail.com>
Cheng Qian <chengqian1521@qq.com>
Christophe Dumez <ch.dumez@samsung.com>
Chris Tserng <tserng@amazon.com>
Cody Barnes <conceptgenesis@gmail.com>
Colin Plumb
Cyril Lashkevich <notorca@gmail.com>
David Porter <david@porter.me>
Dax Booysen <dax@younow.com>
CZ Theng <cz.theng@gmail.com>
Danail Kirov <dkirovbroadsoft@gmail.com>
Dave Cowart <davecowart@gmail.com>
David Porter <david@porter.me>
David Sanders <dsanders11@ucsbalum.com>
Dax Booysen <dax@younow.com>
Dennis Angelo <dennis.angelo@gmail.com>
Dharmesh Chauhan <dharmesh.r.chauhan@gmail.com>
Di Wu <meetwudi@gmail.com>
Dirk-Jan C. Binnema <djcb@djcbsoftware.nl>
Dmitry Lizin <sdkdimon@gmail.com>
Eike Rathke <erathke@redhat.com>
Eric Rescorla, RTFM Inc. <ekr@rtfm.com>
Filip Hlasek <filip@orcamobility.ai>
Frederik Riedel, Frogg GmbH <frederik.riedel@frogg.io>
Giji Gangadharan <giji.g@samsung.com>
Graham Yoakum <gyoakum@skobalt.com>
Gustavo Garcia <gustavogb@gmail.com>
Hans Knoechel <hans@hans-knoechel.de>
Hugues Ekra <hekra01@gmail.com>
Jake Hilton <jakehilton@gmail.com>
James H. Brown <jbrown@burgoyne.com>
Jan Grulich <grulja@gmail.com>
Jan Kalab <pitlicek@gmail.com>
Jens Nielsen <jens.nielsen@berotec.se>
Jesús Leganés-Combarro <piranna@gmail.com>
Jiawei Ou <jiawei.ou@gmail.com>
Jie Mao <maojie0924@gmail.com>
Jiwon Kim <jwkim0000@gmail.com>
Johnny Wong <hellojinqiang@gmail.com>
Jose Antonio Olivera Ortega <josea.olivera@gmail.com>
Keiichi Enomoto <enm10k@gmail.com>
Kiran Thind <kiran.thind@gmail.com>
Korniltsev Anatoly <korniltsev.anatoly@gmail.com>
Kyutae Lee <gorisanson@gmail.com>
Lennart Grahl <lennart.grahl@gmail.com>
Luke Weber <luke.weber@gmail.com>
Maksim Khobat <maksimkhobat@gmail.com>
Mallikarjuna Rao V <vm.arjun@samsung.com>
Manish Jethani <manish.jethani@gmail.com>
Martin Storsjo <martin@martin.st>
Matthias Liebig <matthias.gcode@gmail.com>
Maksim Sisov <msisov@igalia.com>
Maxim Pavlov <pavllovmax@gmail.com>
Maxim Potapov <vopatop.skam@gmail.com>
Michael Iedema <michael@kapsulate.com>
Michał Zarach <michalzaq12@gmail.com>
Michel Promonet <michel.promonet.1@gmail.com>
Miguel Paris <mparisdiaz@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Mike Wei <Mike.WeiB@gmail.com>
Min Wang <mingewang@gmail.com>
Mo Zanaty <mzanaty@cisco.com>
Nico Schlumprecht <me@github.nico.onl>
Niek van der Maas <mail@niekvandermaas.nl>
Olivier Crête <olivier.crete@ocrete.ca>
Pali Rohar
Paul Kapustin <pkapustin@gmail.com>
Philipp Hancke <philipp.hancke@googlemail.com>
Peng Yu <yupeng323@gmail.com>
Philipp Hancke <philipp.hancke@googlemail.com>
Piasy Xu <xz4215@gmail.com>
Rafael Lopez Diez <rafalopezdiez@gmail.com>
Ralph Giles <giles@ghostscript.com>
Raman Budny <budnyjj@gmail.com>
Ramprakash Jelari <ennajelari@gmail.com>
Riku Voipio <riku.voipio@linaro.org>
Robert Bares <robert@bares.me>
Robert Mader <robert.mader@posteo.de>
Robert Nagy <robert.nagy@gmail.com>
Ryan Yoakum <ryoakum@skobalt.com>
Satender Saroha <ssaroha@yahoo.com>
Sarah Thompson <sarah@telergy.com>
Satender Saroha <ssaroha@yahoo.com>
Saul Kravitz <Saul.Kravitz@celera.com>
Sergio Garcia Murillo <sergio.garcia.murillo@gmail.com>
Shaofan Qi <vshaqi@gmail.com>
Shuhai Peng <shuhai.peng@intel.com>
Silviu Caragea <silviu.cpp@gmail.com>
Stefan Gula <steweg@gmail.com>
Stephan Hartmann <stha09@googlemail.com>
Steve Reid <sreid@sea-to-sky.net>
Takaaki Suzuki <takaakisuzuki.14@gmail.com>
Tarun Chawla <trnkumarchawla@gmail.com>
Todd Wong <todd.wong.ndq@gmail.com>
Tomas Popela <tomas.popela@gmail.com>
Trevor Hayes <trevor.axiom@gmail.com>
Uladzislau Susha <landby@gmail.com>
Vladimir Beloborodov <VladimirTechMan@gmail.com>
Vicken Simonian <vsimon@gmail.com>
Victor Costan <costan@gmail.com>
Vladimir Beloborodov <VladimirTechMan@gmail.com>
Xiaohong Xu <freemine@yeah.net>
Xiaolei Yu <dreifachstein@gmail.com>
Xinchao Tian <tianxinchao@360.cn>
Yaowen Guo <albertguo88@gmail.com>
Yura Yaroshevich <yura.yaroshevich@gmail.com>
Yuriy Pavlyshak <yuriy@appear.in>
Hans Knoechel <hans@hans-knoechel.de>
Korniltsev Anatoly <korniltsev.anatoly@gmail.com>
Todd Wong <todd.wong.ndq@gmail.com>
Sergio Garcia Murillo <sergio.garcia.murillo@gmail.com>
Maxim Pavlov <pavllovmax@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Piasy Xu <xz4215@gmail.com>
Tomas Popela <tomas.popela@gmail.com>
Jan Grulich <grulja@gmail.com>
Jiwon Kim <jwkim0000@gmail.com>
Eike Rathke <erathke@redhat.com>
Michel Promonet <michel.promonet.1@gmail.com>
Min Wang <mingewang@gmail.com>
Ramprakash Jelari <ennajelari@gmail.com>
CZ Theng <cz.theng@gmail.com>
Miguel Paris <mparisdiaz@gmail.com>
Raman Budny <budnyjj@gmail.com>
Stephan Hartmann <stha09@googlemail.com>
Lennart Grahl <lennart.grahl@gmail.com>
Zhaofeng Li <hello@zhaofeng.li>
Pengfei Han <hanpfei@gmail.com>
# END individuals section.
&yet LLC <*@andyet.com>
8x8 Inc. <*@sip-communicator.org>
# BEGIN organizations section.
8x8 Inc. <*@8x8.com>
8x8 Inc. <*@jitsi.org>
8x8 Inc. <*@sip-communicator.org>
Agora IO <*@agora.io>
ARM Holdings <*@arm.com>
BroadSoft Inc. <*@broadsoft.com>
Canonical Ltd <*@canonical.com>
CoSMo Software Consulting, Pte Ltd <*@cosmosoftware.io>
Facebook Inc. <*@fb.com>
Google Inc. <*@google.com>
Highfive, Inc. <*@highfive.com>
Hopin Ltd. <*@hopin.to>
HyperConnect Inc. <*@hpcnt.com>
Life On Air Inc. <*@lifeonair.com>
Intel Corporation <*@intel.com>
LG Electronics, Inc. <*@lge.com>
Life On Air Inc. <*@lifeonair.com>
Meta Platforms, Inc. <*@meta.com>
Microsoft Corporation <*@microsoft.com>
MIPS Technologies <*@mips.com>
Mozilla Foundation <*@mozilla.com>
Netgem S.A. <*@netgem.com>
Nutanix Inc. <*@nutanix.com>
NVIDIA Corporation <*@nvidia.com>
Opera Software ASA <*@opera.com>
Optical Tone Ltd <*@opticaltone.com>
Pengutronix e.K. <*@pengutronix.de>
Quebic Inc. <*@quebic.com>
Raptor Computing Systems, LLC <*@raptorcs.com>
RingCentral, Inc. <*@ringcentral.com>
Signal Messenger, LLC <*@signal.org>
Sinch AB <*@sinch.com>
@ -120,14 +170,15 @@ Telenor Digital AS <*@telenor.com>
Temasys Communications <*@temasys.io>
The Chromium Authors <*@chromium.org>
The WebRTC Authors <*@webrtc.org>
Threema GmbH <*@threema.ch>
Tuple, LLC <*@tuple.app>
Twilio, Inc. <*@twilio.com>
Vewd Software AS <*@vewd.com>
Videona Socialmedia <*@videona.com>
Videxio AS <*@videxio.com>
Vidyo, Inc. <*@vidyo.com>
Vonage Holdings Corp. <*@vonage.com>
Wang Qing <wangqing-hf@loongson.cn>
Wire Swiss GmbH <*@wire.com>
Vewd Software AS <*@vewd.com>
Highfive, Inc. <*@highfive.com>
CoSMo Software Consulting, Pte Ltd <*@cosmosoftware.io>
Tuple, LLC <*@tuple.app>
Videona Socialmedia <*@videona.com>
Threema GmbH <*@threema.ch>
&yet LLC <*@andyet.com>
# END organizations section.

221
BUILD.gn
View File

@ -12,8 +12,18 @@
# you add a new build file, there must be some path of dependencies from this
# file to your new one or GN won't know about it.
# Use of visibility = clauses:
# The default visibility for all rtc_ targets is equivalent to "//*", or
# "all targets in webrtc can depend on this, nothing outside can".
#
# When overriding, the choices are:
# - visibility = [ "*" ] - public. Stuff outside webrtc can use this.
# - visibility = [ ":*" ] - directory private.
# As a general guideline, only targets in api/ should have public visibility.
import("//build/config/linux/pkg_config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//third_party/google_benchmark/buildconfig.gni")
import("webrtc.gni")
if (rtc_enable_protobuf) {
import("//third_party/protobuf/proto_library.gni")
@ -37,8 +47,8 @@ if (!build_with_chromium) {
}
if (rtc_include_tests) {
deps += [
":fuchsia_perf_tests",
":rtc_unittests",
":slow_tests",
":video_engine_tests",
":voip_unittests",
":webrtc_nonparallel_tests",
@ -54,8 +64,11 @@ if (!build_with_chromium) {
"modules/remote_bitrate_estimator:rtp_to_text",
"modules/rtp_rtcp:test_packet_masks_metrics",
"modules/video_capture:video_capture_internal_impl",
"net/dcsctp:dcsctp_unittests",
"pc:peerconnection_unittests",
"pc:rtc_pc_unittests",
"pc:slow_peer_connection_unittests",
"pc:svc_tests",
"rtc_tools:rtp_generator",
"rtc_tools:video_replay",
"stats:rtc_stats_unittests",
@ -71,6 +84,13 @@ if (!build_with_chromium) {
# see bugs.webrtc.org/11027#c5.
deps += [ ":webrtc_lib_link_test" ]
}
if (is_ios) {
deps += [
"examples:apprtcmobile_tests",
"sdk:sdk_framework_unittests",
"sdk:sdk_unittests",
]
}
if (is_android) {
deps += [
"examples:android_examples_junit_tests",
@ -82,11 +102,17 @@ if (!build_with_chromium) {
}
if (rtc_enable_protobuf) {
deps += [
"audio:low_bandwidth_audio_test",
"audio:low_bandwidth_audio_perf_test",
"logging:rtc_event_log_rtp_dump",
"tools_webrtc/perf:webrtc_dashboard_upload",
]
}
if ((is_linux || is_chromeos) && rtc_use_pipewire) {
deps += [ "modules/desktop_capture:shared_screencast_stream_test" ]
}
}
if (target_os == "android") {
deps += [ "tools_webrtc:binary_version_check" ]
}
}
}
@ -113,12 +139,15 @@ config("common_inherited_config") {
cflags = []
ldflags = []
if (rtc_enable_symbol_export || is_component_build) {
defines = [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ]
if (rtc_dlog_always_on) {
defines += [ "DLOG_ALWAYS_ON" ]
}
if (build_with_mozilla) {
defines += [ "WEBRTC_MOZILLA_BUILD" ]
if (rtc_enable_symbol_export || is_component_build) {
defines += [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ]
}
if (rtc_enable_objc_symbol_export) {
defines += [ "WEBRTC_ENABLE_OBJC_SYMBOL_EXPORT" ]
}
if (!rtc_builtin_ssl_root_certificates) {
@ -129,6 +158,14 @@ config("common_inherited_config") {
defines += [ "RTC_DISABLE_CHECK_MSG" ]
}
if (rtc_enable_avx2) {
defines += [ "WEBRTC_ENABLE_AVX2" ]
}
if (rtc_enable_win_wgc) {
defines += [ "RTC_ENABLE_WIN_WGC" ]
}
# Some tests need to declare their own trace event handlers. If this define is
# not set, the first time TRACE_EVENT_* is called it will store the return
# value for the current handler in an static variable, so that subsequent
@ -169,7 +206,7 @@ config("common_inherited_config") {
"WEBRTC_IOS",
]
}
if (is_linux) {
if (is_linux || is_chromeos) {
defines += [ "WEBRTC_LINUX" ]
}
if (is_mac) {
@ -206,14 +243,6 @@ config("common_inherited_config") {
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_exit_time_destructors") {
if (is_clang) {
cflags = [ "-Wno-exit-time-destructors" ]
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_global_constructors") {
@ -245,6 +274,12 @@ config("common_config") {
defines += [ "WEBRTC_ENABLE_PROTOBUF=0" ]
}
if (rtc_strict_field_trials) {
defines += [ "WEBRTC_STRICT_FIELD_TRIALS=1" ]
} else {
defines += [ "WEBRTC_STRICT_FIELD_TRIALS=0" ]
}
if (rtc_include_internal_audio_device) {
defines += [ "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE" ]
}
@ -253,8 +288,12 @@ config("common_config") {
defines += [ "RTC_ENABLE_VP9" ]
}
if (rtc_include_dav1d_in_internal_decoder_factory) {
defines += [ "RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY" ]
}
if (rtc_enable_sctp) {
defines += [ "HAVE_SCTP" ]
defines += [ "WEBRTC_HAVE_SCTP" ]
}
if (rtc_enable_external_auth) {
@ -289,7 +328,16 @@ config("common_config") {
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
}
cflags = []
if (is_clang) {
cflags += [
# TODO(webrtc:13219): Fix -Wshadow instances and enable.
"-Wno-shadow",
# See https://reviews.llvm.org/D56731 for details about this
# warning.
"-Wctad-maybe-unsupported",
]
}
if (build_with_chromium) {
defines += [
@ -325,20 +373,17 @@ config("common_config") {
}
if (is_clang) {
cflags += [
"-Wc++11-narrowing",
"-Wimplicit-fallthrough",
"-Wthread-safety",
"-Winconsistent-missing-override",
"-Wundef",
]
cflags += [ "-Wc++11-narrowing" ]
# use_xcode_clang only refers to the iOS toolchain, host binaries use
# chromium's clang always.
if (!is_nacl &&
(!use_xcode_clang || current_toolchain == host_toolchain)) {
# Flags NaCl (Clang 3.7) and Xcode 7.3 (Clang clang-703.0.31) do not
# recognize.
if (!is_fuchsia) {
# Compiling with the Fuchsia SDK results in Wundef errors
# TODO(bugs.fuchsia.dev/100722): Remove from (!is_fuchsia) branch when
# Fuchsia build errors are fixed.
cflags += [ "-Wundef" ]
}
if (!is_nacl) {
# Flags NaCl (Clang 3.7) do not recognize.
cflags += [ "-Wunused-lambda-capture" ]
}
}
@ -415,10 +460,6 @@ config("common_config") {
config("common_objc") {
frameworks = [ "Foundation.framework" ]
if (rtc_use_metal_rendering) {
defines = [ "RTC_SUPPORTS_METAL" ]
}
}
if (!build_with_chromium) {
@ -436,7 +477,6 @@ if (!build_with_chromium) {
defines = []
deps = [
":webrtc_common",
"api:create_peerconnection_factory",
"api:libjingle_peerconnection_api",
"api:rtc_error",
@ -445,6 +485,7 @@ if (!build_with_chromium) {
"api/rtc_event_log:rtc_event_log_factory",
"api/task_queue",
"api/task_queue:default_task_queue_factory",
"api/test/metrics",
"audio",
"call",
"common_audio",
@ -455,9 +496,7 @@ if (!build_with_chromium) {
"modules/video_capture:video_capture_internal_impl",
"p2p:rtc_p2p",
"pc:libjingle_peerconnection",
"pc:peerconnection",
"pc:rtc_pc",
"pc:rtc_pc_base",
"rtc_base",
"sdk",
"video",
@ -501,6 +540,10 @@ if (!build_with_chromium) {
rtc_executable("webrtc_lib_link_test") {
testonly = true
# This target is used for checking to link, so do not check dependencies
# on gn check.
check_includes = false # no-presubmit-check TODO(bugs.webrtc.org/12785)
sources = [ "webrtc_lib_link_test.cc" ]
deps = [
# NOTE: Don't add deps here. If this test fails to link, it means you
@ -511,15 +554,6 @@ if (!build_with_chromium) {
}
}
rtc_source_set("webrtc_common") {
# Client code SHOULD NOT USE THIS TARGET, but for now it needs to be public
# because there exists client code that uses it.
# TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that
# client code gets updated.
visibility = [ "*" ]
sources = [ "common_types.h" ]
}
if (use_libfuzzer || use_afl) {
# This target is only here for gn to discover fuzzer build targets under
# webrtc/test/fuzzers/.
@ -529,22 +563,26 @@ if (use_libfuzzer || use_afl) {
}
}
if (rtc_include_tests) {
if (rtc_include_tests && !build_with_chromium) {
rtc_test("rtc_unittests") {
testonly = true
deps = [
":webrtc_common",
"api:compile_all_headers",
"api:rtc_api_unittests",
"api/audio/test:audio_api_unittests",
"api/audio_codecs/test:audio_codecs_api_unittests",
"api/numerics:numerics_unittests",
"api/task_queue:pending_task_safety_flag_unittests",
"api/test/metrics:metrics_unittests",
"api/transport:stun_unittest",
"api/video/test:rtc_api_video_unittests",
"api/video_codecs/test:video_codecs_api_unittests",
"api/voip:compile_all_headers",
"call:fake_network_pipe_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
"rtc_base:callback_list_unittests",
"rtc_base:rtc_base_approved_unittests",
"rtc_base:rtc_base_unittests",
"rtc_base:rtc_json_unittests",
@ -552,11 +590,12 @@ if (rtc_include_tests) {
"rtc_base:rtc_operations_chain_unittests",
"rtc_base:rtc_task_queue_unittests",
"rtc_base:sigslot_unittest",
"rtc_base:untyped_function_unittest",
"rtc_base:weak_ptr_unittests",
"rtc_base/experiments:experiments_unittests",
"rtc_base/synchronization:sequence_checker_unittests",
"rtc_base/task_utils:pending_task_safety_flag_unittests",
"rtc_base/task_utils:to_queued_task_unittests",
"rtc_base/system:file_wrapper_unittests",
"rtc_base/task_utils:repeating_task_unittests",
"rtc_base/units:units_unittests",
"sdk:sdk_tests",
"test:rtp_test_utils",
"test:test_main",
@ -578,31 +617,18 @@ if (rtc_include_tests) {
]
shard_timeout = 900
}
}
if (is_ios || is_mac) {
deps += [ "sdk:rtc_unittests_objc" ]
if (enable_google_benchmarks) {
rtc_test("benchmarks") {
testonly = true
deps = [
"rtc_base/synchronization:mutex_benchmark",
"test:benchmark_main",
]
}
}
rtc_test("benchmarks") {
testonly = true
deps = [
"rtc_base/synchronization:mutex_benchmark",
"test:benchmark_main",
]
}
# This runs tests that must run in real time and therefore can take some
# time to execute. They are in a separate executable to avoid making the
# regular unittest suite too slow to run frequently.
rtc_test("slow_tests") {
testonly = true
deps = [
"rtc_base/task_utils:repeating_task_unittests",
"test:test_main",
]
}
# TODO(pbos): Rename test suite, this is no longer "just" for video targets.
video_engine_tests_resources = [
"resources/foreman_cif_short.yuv",
@ -634,7 +660,12 @@ if (rtc_include_tests) {
]
data = video_engine_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
use_default_launcher = false
deps += [
"//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
"//testing/android/native_test:native_test_java",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 900
}
if (is_ios) {
@ -679,7 +710,12 @@ if (rtc_include_tests) {
data = webrtc_perf_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
use_default_launcher = false
deps += [
"//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
"//testing/android/native_test:native_test_java",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 4500
}
if (is_ios) {
@ -687,6 +723,22 @@ if (rtc_include_tests) {
}
}
rtc_test("fuchsia_perf_tests") {
testonly = true
deps = [
#TODO(fxbug.dev/115601) - Enable when fixed
#"call:call_perf_tests",
#"video:video_pc_full_stack_tests",
"modules/audio_coding:audio_coding_perf_tests",
"modules/audio_processing:audio_processing_perf_tests",
"pc:peerconnection_perf_tests",
"test:test_main",
"video:video_full_stack_tests",
]
data = webrtc_perf_tests_resources
}
rtc_test("webrtc_nonparallel_tests") {
testonly = true
deps = [ "rtc_base:rtc_base_nonparallel_tests" ]
@ -699,6 +751,7 @@ if (rtc_include_tests) {
rtc_test("voip_unittests") {
testonly = true
deps = [
"api/voip:compile_all_headers",
"api/voip:voip_engine_factory_unittests",
"audio/voip/test:audio_channel_unittests",
"audio/voip/test:audio_egress_unittests",
@ -709,6 +762,23 @@ if (rtc_include_tests) {
}
}
# Build target for standalone dcsctp
rtc_static_library("dcsctp") {
# Only the root target should depend on this.
visibility = [ "//:default" ]
sources = []
complete_static_lib = true
suppressed_configs += [ "//build/config/compiler:thin_archive" ]
defines = []
deps = [
"net/dcsctp/public:factory",
"net/dcsctp/public:socket",
"net/dcsctp/public:types",
"net/dcsctp/socket:dcsctp_socket",
"net/dcsctp/timer:task_queue_timeout",
]
}
# ---- Poisons ----
#
# Here is one empty dummy target for each poison type (needed because
@ -724,6 +794,9 @@ group("poison_audio_codecs") {
group("poison_default_task_queue") {
}
group("poison_default_echo_detector") {
}
group("poison_rtc_json") {
}

View File

@ -61,8 +61,8 @@ The decisions of the WebRTC community managers may be appealed via community-app
## Acknowledgements
This Code of Conduct is based on Contributor Covenant, version 1.4,
available [here](http://contributor-covenant.org/version/1/4) and [Chromium](https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md)
available [here](http://contributor-covenant.org/version/1/4) and [Chromium](https://chromium.googlesource.com/chromium/src/+/main/CODE_OF_CONDUCT.md)
## License
This Code of Conduct is available for reuse under the Creative Commons Zero (CC0) license.
This Code of Conduct is available for reuse under the Creative Commons Zero (CC0) license.

2266
DEPS

File diff suppressed because it is too large Load Diff

3
DIR_METADATA Normal file
View File

@ -0,0 +1,3 @@
monorail {
project: "webrtc"
}

View File

@ -6,6 +6,4 @@
# review owners to ensure that the added dependency was OK.
danilchap@webrtc.org
kwiberg@webrtc.org
mbonadei@webrtc.org
phoglund@webrtc.org

View File

@ -1,22 +1,6 @@
henrika@webrtc.org
juberti@webrtc.org
kwiberg@webrtc.org
hta@webrtc.org
mflodman@webrtc.org
stefan@webrtc.org
tommi@webrtc.org
per-file .gitignore=*
per-file .gn=mbonadei@webrtc.org
per-file *.gn=mbonadei@webrtc.org
per-file *.gni=mbonadei@webrtc.org
per-file AUTHORS=*
per-file DEPS=*
per-file pylintrc=phoglund@webrtc.org
per-file WATCHLISTS=*
per-file abseil-in-webrtc.md=danilchap@webrtc.org
per-file abseil-in-webrtc.md=kwiberg@webrtc.org
per-file abseil-in-webrtc.md=mbonadei@webrtc.org
per-file style-guide.md=danilchap@webrtc.org
per-file style-guide.md=kwiberg@webrtc.org
per-file native-api.md=kwiberg@webrtc.org
# COMPONENT: Internals>WebRTC
include OWNERS_INFRA #{Owners for infra and repo related files}

17
OWNERS_INFRA.webrtc Normal file
View File

@ -0,0 +1,17 @@
#Owners for infra and repo related files
per-file .gitignore=*
per-file .gn=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file BUILD.gn=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file .../BUILD.gn=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file *.gni=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file .../*.gni=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file .vpython=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file .vpython3=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file AUTHORS=*
per-file DEPS=*
per-file pylintrc=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org
per-file WATCHLISTS=*
per-file native-api.md=mbonadei@webrtc.org
per-file ....lua=titovartem@webrtc.org
per-file .style.yapf=jleconte@webrtc.org
per-file *.py=mbonadei@webrtc.org,jansson@webrtc.org,jleconte@webrtc.org

File diff suppressed because it is too large Load Diff

View File

@ -23,8 +23,10 @@ native API header files.
* Master source code repo: https://webrtc.googlesource.com/src
* Samples and reference apps: https://github.com/webrtc
* Mailing list: http://groups.google.com/group/discuss-webrtc
* Continuous build: http://build.chromium.org/p/client.webrtc
* [Coding style guide](style-guide.md)
* Continuous build: https://ci.chromium.org/p/webrtc/g/ci/console
* [Coding style guide](g3doc/style-guide.md)
* [Code of conduct](CODE_OF_CONDUCT.md)
* [Reporting bugs](docs/bug-reporting.md)
* [Documentation](g3doc/sitemap.md)
[native-dev]: https://webrtc.googlesource.com/src/+/refs/heads/master/docs/native-code/index.md
[native-dev]: https://webrtc.googlesource.com/src/+/main/docs/native-code/index.md

View File

@ -73,9 +73,6 @@
'video_coding': {
'filepath': 'modules/video_coding/.*',
},
'video_processing': {
'filepath': 'modules/video_processing/.*',
},
'bitrate_controller': {
'filepath': 'modules/bitrate_controller/.*'
},
@ -97,6 +94,9 @@
'pc': {
'filepath': '^pc/.*',
},
'logging' : {
'filepath': 'logging/.*',
},
},
'WATCHLISTS': {
@ -107,14 +107,13 @@
'yujie.mao@webrtc.org'],
'build_files': ['mbonadei@webrtc.org'],
'common_audio': ['alessiob@webrtc.org',
'aluebs@webrtc.org',
'audio-team@agora.io',
'minyue@webrtc.org',
'peah@webrtc.org',
'saza@webrtc.org'],
'audio': ['peah@webrtc.org'],
'api': ['kwiberg@webrtc.org','peah@webrtc.org'],
'base': ['kwiberg@webrtc.org'],
'api': ['hta@webrtc.org',
'peah@webrtc.org'],
'base': ['hta@webrtc.org'],
'call': ['mflodman@webrtc.org',
'stefan@webrtc.org'],
'video': ['mflodman@webrtc.org',
@ -134,45 +133,33 @@
'audio_coding': ['alessiob@webrtc.org',
'audio-team@agora.io',
'henrik.lundin@webrtc.org',
'kwiberg@webrtc.org',
'minyue@webrtc.org',
'peah@webrtc.org',
'saza@webrtc.org'],
'neteq': ['alessiob@webrtc.org',
'audio-team@agora.io',
'henrik.lundin@webrtc.org',
'minyue@webrtc.org',
'saza@webrtc.org'],
'audio_mixer': ['aleloi@webrtc.org',
'henrik.lundin@webrtc.org',
'peah@webrtc.org',
'saza@webrtc.org'],
'audio_processing': ['alessiob@webrtc.org',
'aluebs@webrtc.org',
'audio-team@agora.io',
'fhernqvist@webrtc.org',
'henrik.lundin@webrtc.org',
'kwiberg@webrtc.org',
'minyue@webrtc.org',
'peah@webrtc.org',
'saza@webrtc.org'],
'video_coding': ['mflodman@webrtc.org',
'stefan@webrtc.org',
'video-team@agora.io',
'zhengzhonghou@agora.io'],
'video_processing': ['stefan@webrtc.org',
'video-team@agora.io',
'zhengzhonghou@agora.io'],
'bitrate_controller': ['mflodman@webrtc.org',
'stefan@webrtc.org',
'srte@webrtc.org',
'zhuangzesen@agora.io'],
'congestion_controller': ['srte@webrtc.org'],
'congestion_controller': [],
'remote_bitrate_estimator': ['mflodman@webrtc.org',
'stefan@webrtc.org',
'zhuangzesen@agora.io'],
'pacing': ['mflodman@webrtc.org',
'srte@webrtc.org',
'stefan@webrtc.org',
'zhuangzesen@agora.io'],
'rtp_rtcp': ['mflodman@webrtc.org',
@ -185,5 +172,6 @@
'peah@webrtc.org',
'zhengzhonghou@agora.io'],
'pc': ['steveanton+watch@webrtc.org'],
'logging': ['terelius@webrtc.org'],
},
}

File diff suppressed because it is too large Load Diff

109
api/DEPS
View File

@ -11,10 +11,13 @@ include_rules = [
"-common_video",
"-data",
"-examples",
"-experiments",
"-g3doc",
"-ios",
"-infra",
"-logging",
"-media",
"-net",
"-modules",
"-out",
"-p2p",
@ -40,12 +43,16 @@ include_rules = [
specific_include_rules = {
# Some internal headers are allowed even in API headers:
"call_factory_interface\.h": [
"+call/rtp_transport_controller_send_factory_interface.h",
],
".*\.h": [
"+rtc_base/checks.h",
"+rtc_base/system/rtc_export.h",
"+rtc_base/system/rtc_export_template.h",
"+rtc_base/units/unit_base.h",
"+rtc_base/deprecation.h",
],
"array_view\.h": [
@ -63,6 +70,10 @@ specific_include_rules = {
"+rtc_base/async_resolver_interface.h",
],
"async_dns_resolver\.h": [
"+rtc_base/socket_address.h",
],
"candidate\.h": [
"+rtc_base/network_constants.h",
"+rtc_base/socket_address.h",
@ -102,14 +113,6 @@ specific_include_rules = {
"+rtc_base/ref_count.h",
],
"jsep_ice_candidate\.h": [
"+rtc_base/constructor_magic.h",
],
"jsep_session_description\.h": [
"+rtc_base/constructor_magic.h",
],
"media_stream_interface\.h": [
"+modules/audio_processing/include/audio_processing_statistics.h",
"+rtc_base/ref_count.h",
@ -120,31 +123,31 @@ specific_include_rules = {
"+rtc_base/async_packet_socket.h",
],
"peer_connection_factory_proxy\.h": [
"+rtc_base/bind.h",
],
"peer_connection_interface\.h": [
"+call/rtp_transport_controller_send_factory_interface.h",
"+media/base/media_config.h",
"+media/base/media_engine.h",
"+p2p/base/port.h",
"+p2p/base/port_allocator.h",
"+rtc_base/network.h",
"+rtc_base/network_constants.h",
"+rtc_base/network_monitor_factory.h",
"+rtc_base/ref_count.h",
"+rtc_base/rtc_certificate.h",
"+rtc_base/rtc_certificate_generator.h",
"+rtc_base/socket_address.h",
"+rtc_base/ssl_certificate.h",
"+rtc_base/ssl_stream_adapter.h",
"+rtc_base/thread.h",
],
"proxy\.h": [
"+rtc_base/event.h",
"+rtc_base/message_handler.h", # Inherits from it.
"+rtc_base/ref_counted_object.h",
"+rtc_base/thread.h",
],
"ref_counted_base\.h": [
"+rtc_base/constructor_magic.h",
"+rtc_base/ref_count.h",
"+rtc_base/ref_counter.h",
],
@ -172,14 +175,15 @@ specific_include_rules = {
"+rtc_base/ref_count.h",
],
"set_local_description_observer_interface\.h": [
"+rtc_base/ref_count.h",
],
"set_remote_description_observer_interface\.h": [
"+rtc_base/ref_count.h",
],
"stats_types\.h": [
"+rtc_base/constructor_magic.h",
"legacy_stats_types\.h": [
"+rtc_base/ref_count.h",
"+rtc_base/string_encode.h",
"+rtc_base/thread_checker.h",
],
@ -187,27 +191,18 @@ specific_include_rules = {
"+rtc_base/ref_count.h",
],
"audio_frame\.h": [
"+rtc_base/constructor_magic.h",
],
"audio_mixer\.h": [
"+rtc_base/ref_count.h",
],
"audio_decoder\.h": [
"+rtc_base/buffer.h",
"+rtc_base/constructor_magic.h",
],
"audio_decoder_factory\.h": [
"+rtc_base/ref_count.h",
],
"audio_decoder_factory_template\.h": [
"+rtc_base/ref_counted_object.h",
],
"audio_encoder\.h": [
"+rtc_base/buffer.h",
],
@ -216,10 +211,6 @@ specific_include_rules = {
"+rtc_base/ref_count.h",
],
"audio_encoder_factory_template\.h": [
"+rtc_base/ref_counted_object.h",
],
"frame_decryptor_interface\.h": [
"+rtc_base/ref_count.h",
],
@ -234,7 +225,6 @@ specific_include_rules = {
"rtc_stats_report\.h": [
"+rtc_base/ref_count.h",
"+rtc_base/ref_counted_object.h",
],
"audioproc_float\.h": [
@ -245,11 +235,14 @@ specific_include_rules = {
"+modules/audio_processing/include/audio_processing.h",
],
"fake_frame_decryptor\.h": [
"+rtc_base/ref_counted_object.h",
"fake_metronome\.h": [
"+rtc_base/synchronization/mutex.h",
"+rtc_base/task_queue.h",
"+rtc_base/task_utils/repeating_task.h",
"+rtc_base/thread_annotations.h",
],
"fake_frame_encryptor\.h": [
"make_ref_counted\.h": [
"+rtc_base/ref_counted_object.h",
],
@ -257,6 +250,18 @@ specific_include_rules = {
"+test/gmock.h",
],
"mock_peerconnectioninterface\.h": [
"+rtc_base/ref_counted_object.h",
],
"mock_video_track\.h": [
"+rtc_base/ref_counted_object.h",
],
"notifier\.h": [
"+rtc_base/system/no_unique_address.h",
],
"simulated_network\.h": [
"+rtc_base/random.h",
"+rtc_base/thread_annotations.h",
@ -278,6 +283,39 @@ specific_include_rules = {
"+rtc_base/ref_count.h",
],
"sequence_checker\.h": [
"+rtc_base/synchronization/sequence_checker_internal.h",
"+rtc_base/thread_annotations.h",
],
"wrapping_async_dns_resolver\.h": [
"+rtc_base/async_resolver.h",
"+rtc_base/async_resolver_interface.h",
"+rtc_base/socket_address.h",
"+rtc_base/third_party/sigslot/sigslot.h",
"+rtc_base/thread_annotations.h",
],
"video_encoder_factory_template.*\.h": [
"+modules/video_coding",
],
"video_decoder_factory_template.*\.h": [
"+modules/video_coding",
],
"field_trials\.h": [
"+rtc_base/containers/flat_map.h",
],
"video_track_source_proxy_factory.h": [
"+rtc_base/thread.h",
],
"field_trials_registry\.h": [
"+rtc_base/containers/flat_set.h",
],
# .cc files in api/ should not be restricted in what they can #include,
# so we re-add all the top-level directories here. (That's because .h
# files leak their #includes to whoever's #including them, but .cc files
@ -288,6 +326,7 @@ specific_include_rules = {
"+common_audio",
"+common_video",
"+examples",
"+experiments",
"+logging",
"+media",
"+modules",

View File

@ -1,14 +1,14 @@
crodbro@webrtc.org
deadbeef@webrtc.org
hta@webrtc.org
juberti@webrtc.org
kwiberg@webrtc.org
magjed@webrtc.org
perkj@webrtc.org
tkchin@webrtc.org
tommi@webrtc.org
# For approvals that absolutely must be done on US Pacific time
deadbeef@webrtc.org
tkchin@webrtc.org
per-file peer_connection*=hbos@webrtc.org
per-file DEPS=mbonadei@webrtc.org
per-file DEPS=kwiberg@webrtc.org
per-file uma_metrics.h=kron@webrtc.org

View File

@ -1,6 +1,6 @@
# How to write code in the `api/` directory
Mostly, just follow the regular [style guide](../style-guide.md), but:
Mostly, just follow the regular [style guide](../g3doc/style-guide.md), but:
* Note that `api/` code is not exempt from the “`.h` and `.cc` files come in
pairs” rule, so if you declare something in `api/path/to/foo.h`, it should be
@ -17,7 +17,7 @@ it from a `.cc` file, so that users of our API headers won’t transitively
For headers in `api/` that need to refer to non-public types, forward
declarations are often a lesser evil than including non-public header files. The
usual [rules](../style-guide.md#forward-declarations) still apply, though.
usual [rules](../g3doc/style-guide.md#forward-declarations) still apply, though.
`.cc` files in `api/` should preferably be kept reasonably small. If a
substantial implementation is needed, consider putting it with our non-public

View File

@ -16,8 +16,8 @@ rtc_source_set("resource_adaptation_api") {
]
deps = [
"../../api:scoped_refptr",
"../../rtc_base:checks",
"../../rtc_base:refcount",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:rtc_export",
]
}

View File

@ -1,6 +1,6 @@
specific_include_rules = {
"resource\.h": [
# ref_count.h is a public_deps of rtc_base_approved. Necessary because of
# ref_count.h is a public_deps of rtc_base:refcount. Necessary because of
# rtc::RefCountInterface.
"+rtc_base/ref_count.h",
],

View File

@ -10,6 +10,8 @@
#include "api/adaptation/resource.h"
#include "rtc_base/checks.h"
namespace webrtc {
const char* ResourceUsageStateToString(ResourceUsageState usage_state) {
@ -19,6 +21,7 @@ const char* ResourceUsageStateToString(ResourceUsageState usage_state) {
case ResourceUsageState::kUnderuse:
return "kUnderuse";
}
RTC_CHECK_NOTREACHED();
}
ResourceListener::~ResourceListener() {}

View File

@ -57,7 +57,7 @@ class RTC_EXPORT Resource : public rtc::RefCountInterface {
~Resource() override;
virtual std::string Name() const = 0;
// The |listener| may be informed of resource usage measurements on any task
// The `listener` may be informed of resource usage measurements on any task
// queue, but not after this method is invoked with the null argument.
virtual void SetResourceListener(ResourceListener* listener) = 0;
};

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include <array>
#include <iterator>
#include <type_traits>
#include "rtc_base/checks.h"
@ -83,7 +84,7 @@ namespace rtc {
// a pointer if fix-sized) and trivially copyable, so it's probably cheaper to
// pass it by value than by const reference.
namespace impl {
namespace array_view_internal {
// Magic constant for indicating that the size of an ArrayView is variable
// instead of fixed.
@ -124,7 +125,7 @@ class ArrayViewBase<T, 0> {
// Specialized base class for ArrayViews of variable size.
template <typename T>
class ArrayViewBase<T, impl::kArrayViewVarSize> {
class ArrayViewBase<T, array_view_internal::kArrayViewVarSize> {
public:
ArrayViewBase(T* data, size_t size)
: data_(size == 0 ? nullptr : data), size_(size) {}
@ -141,10 +142,11 @@ class ArrayViewBase<T, impl::kArrayViewVarSize> {
size_t size_;
};
} // namespace impl
} // namespace array_view_internal
template <typename T, std::ptrdiff_t Size = impl::kArrayViewVarSize>
class ArrayView final : public impl::ArrayViewBase<T, Size> {
template <typename T,
std::ptrdiff_t Size = array_view_internal::kArrayViewVarSize>
class ArrayView final : public array_view_internal::ArrayViewBase<T, Size> {
public:
using value_type = T;
using const_iterator = const T*;
@ -152,7 +154,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
// Construct an ArrayView from a pointer and a length.
template <typename U>
ArrayView(U* data, size_t size)
: impl::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
: array_view_internal::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data());
RTC_DCHECK_EQ(size, this->size());
RTC_DCHECK_EQ(!this->data(),
@ -166,7 +168,8 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
: ArrayView() {}
ArrayView(std::nullptr_t, size_t size)
: ArrayView(static_cast<T*>(nullptr), size) {
static_assert(Size == 0 || Size == impl::kArrayViewVarSize, "");
static_assert(Size == 0 || Size == array_view_internal::kArrayViewVarSize,
"");
RTC_DCHECK_EQ(0, size);
}
@ -174,7 +177,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
template <typename U, size_t N>
ArrayView(U (&array)[N]) // NOLINT
: ArrayView(array, N) {
static_assert(Size == N || Size == impl::kArrayViewVarSize,
static_assert(Size == N || Size == array_view_internal::kArrayViewVarSize,
"Array size must match ArrayView size");
}
@ -207,7 +210,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
// N> when M != N.
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {
@ -215,7 +218,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
}
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {
@ -235,13 +238,13 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
// const rtc::Buffer to ArrayView<const uint8_t>.
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {}
@ -258,6 +261,18 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
T* end() const { return this->data() + this->size(); }
const T* cbegin() const { return this->data(); }
const T* cend() const { return this->data() + this->size(); }
std::reverse_iterator<T*> rbegin() const {
return std::make_reverse_iterator(end());
}
std::reverse_iterator<T*> rend() const {
return std::make_reverse_iterator(begin());
}
std::reverse_iterator<const T*> crbegin() const {
return std::make_reverse_iterator(cend());
}
std::reverse_iterator<const T*> crend() const {
return std::make_reverse_iterator(cbegin());
}
ArrayView<T> subview(size_t offset, size_t size) const {
return offset < this->size()

View File

@ -451,6 +451,20 @@ TEST(ArrayViewTest, TestIterationEmpty) {
}
}
TEST(ArrayViewTest, TestReverseIterationEmpty) {
// Variable-size.
ArrayView<std::vector<std::vector<std::vector<std::string>>>> av;
EXPECT_EQ(av.rbegin(), av.rend());
EXPECT_EQ(av.crbegin(), av.crend());
EXPECT_TRUE(av.empty());
// Fixed-size.
ArrayView<std::vector<std::vector<std::vector<std::string>>>, 0> af;
EXPECT_EQ(af.begin(), af.end());
EXPECT_EQ(af.cbegin(), af.cend());
EXPECT_TRUE(af.empty());
}
TEST(ArrayViewTest, TestIterationVariable) {
char arr[] = "Arrr!";
ArrayView<char> av(arr);
@ -472,6 +486,25 @@ TEST(ArrayViewTest, TestIterationVariable) {
}
}
TEST(ArrayViewTest, TestReverseIterationVariable) {
char arr[] = "Arrr!";
ArrayView<char> av(arr);
EXPECT_EQ('\0', *av.rbegin());
EXPECT_EQ('\0', *av.crbegin());
EXPECT_EQ('A', *(av.rend() - 1));
EXPECT_EQ('A', *(av.crend() - 1));
const char* cit = av.cend() - 1;
for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) {
EXPECT_EQ(*cit, *crit);
}
char* it = av.end() - 1;
for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) {
EXPECT_EQ(*it, *rit);
}
}
TEST(ArrayViewTest, TestIterationFixed) {
char arr[] = "Arrr!";
ArrayView<char, 6> av(arr);
@ -493,6 +526,25 @@ TEST(ArrayViewTest, TestIterationFixed) {
}
}
TEST(ArrayViewTest, TestReverseIterationFixed) {
char arr[] = "Arrr!";
ArrayView<char, 6> av(arr);
EXPECT_EQ('\0', *av.rbegin());
EXPECT_EQ('\0', *av.crbegin());
EXPECT_EQ('A', *(av.rend() - 1));
EXPECT_EQ('A', *(av.crend() - 1));
const char* cit = av.cend() - 1;
for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) {
EXPECT_EQ(*cit, *crit);
}
char* it = av.end() - 1;
for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) {
EXPECT_EQ(*it, *rit);
}
}
TEST(ArrayViewTest, TestEmpty) {
EXPECT_TRUE(ArrayView<int>().empty());
const int a[] = {1, 2, 3};

104
api/async_dns_resolver.h Normal file
View File

@ -0,0 +1,104 @@
/*
* Copyright 2021 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_ASYNC_DNS_RESOLVER_H_
#define API_ASYNC_DNS_RESOLVER_H_
#include <functional>
#include <memory>
#include "rtc_base/checks.h"
#include "rtc_base/socket_address.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// This interface defines the methods to resolve a hostname asynchronously.
// The AsyncDnsResolverInterface class encapsulates a single name query.
//
// Usage:
// std::unique_ptr<AsyncDnsResolverInterface> resolver =
// factory->Create(address-to-be-resolved, [r = resolver.get()]() {
// if (r->result.GetResolvedAddress(AF_INET, &addr) {
// // success
// } else {
// // failure
// error = r->result().GetError();
// }
// // Release resolver.
// resolver_list.erase(std::remove_if(resolver_list.begin(),
// resolver_list.end(),
// [](refptr) { refptr.get() == r; });
// });
// resolver_list.push_back(std::move(resolver));
class AsyncDnsResolverResult {
public:
virtual ~AsyncDnsResolverResult() = default;
// Returns true iff the address from `Start` was successfully resolved.
// If the address was successfully resolved, sets `addr` to a copy of the
// address from `Start` with the IP address set to the top most resolved
// address of `family` (`addr` will have both hostname and the resolved ip).
virtual bool GetResolvedAddress(int family,
rtc::SocketAddress* addr) const = 0;
// Returns error from resolver.
virtual int GetError() const = 0;
};
// The API for a single name query.
// The constructor, destructor and all functions must be called from
// the same sequence, and the callback will also be called on that sequence.
// The class guarantees that the callback will not be called if the
// resolver's destructor has been called.
class RTC_EXPORT AsyncDnsResolverInterface {
public:
virtual ~AsyncDnsResolverInterface() = default;
// Start address resolution of the hostname in `addr`.
virtual void Start(const rtc::SocketAddress& addr,
std::function<void()> callback) = 0;
// Start address resolution of the hostname in `addr` matching `family`.
virtual void Start(const rtc::SocketAddress& addr,
int family,
std::function<void()> callback) = 0;
virtual const AsyncDnsResolverResult& result() const = 0;
};
// An abstract factory for creating AsyncDnsResolverInterfaces. This allows
// client applications to provide WebRTC with their own mechanism for
// performing DNS resolution.
class AsyncDnsResolverFactoryInterface {
public:
virtual ~AsyncDnsResolverFactoryInterface() = default;
// Creates an AsyncDnsResolver and starts resolving the name. The callback
// will be called when resolution is finished.
// The callback will be called on the sequence that the caller runs on.
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
const rtc::SocketAddress& addr,
std::function<void()> callback) = 0;
// Creates an AsyncDnsResolver and starts resolving the name to an address
// matching the specified family. The callback will be called when resolution
// is finished. The callback will be called on the sequence that the caller
// runs on.
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
const rtc::SocketAddress& addr,
int family,
std::function<void()> callback) = 0;
// Creates an AsyncDnsResolver and does not start it.
// For backwards compatibility, will be deprecated and removed.
// One has to do a separate Start() call on the
// resolver to start name resolution.
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> Create() = 0;
};
} // namespace webrtc
#endif // API_ASYNC_DNS_RESOLVER_H_

View File

@ -20,17 +20,25 @@ rtc_library("audio_frame_api") {
deps = [
"..:rtp_packet_info",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:timeutils",
]
}
rtc_source_set("audio_frame_processor") {
visibility = [ "*" ]
sources = [ "audio_frame_processor.h" ]
}
rtc_source_set("audio_mixer_api") {
visibility = [ "*" ]
sources = [ "audio_mixer.h" ]
deps = [
":audio_frame_api",
"../../rtc_base:rtc_base_approved",
"..:make_ref_counted",
"../../rtc_base:refcount",
]
}
@ -42,7 +50,6 @@ rtc_library("aec3_config") {
]
deps = [
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:safe_minmax",
"../../rtc_base/system:rtc_export",
]
@ -58,8 +65,9 @@ rtc_library("aec3_config_json") {
deps = [
":aec3_config",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:logging",
"../../rtc_base:rtc_json",
"../../rtc_base:stringutils",
"../../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
@ -77,7 +85,6 @@ rtc_library("aec3_factory") {
":aec3_config",
":echo_control",
"../../modules/audio_processing/aec3",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:rtc_export",
]
}
@ -90,14 +97,15 @@ rtc_source_set("echo_control") {
rtc_source_set("echo_detector_creator") {
visibility = [ "*" ]
allow_poison = [ "default_echo_detector" ]
sources = [
"echo_detector_creator.cc",
"echo_detector_creator.h",
]
deps = [
"..:make_ref_counted",
"../../api:scoped_refptr",
"../../modules/audio_processing:api",
"../../modules/audio_processing:audio_processing",
"../../rtc_base:refcount",
"../../modules/audio_processing:residual_echo_detector",
]
}

View File

@ -11,8 +11,6 @@
#include "api/audio/audio_frame.h"
#include <string.h>
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
@ -24,35 +22,13 @@ AudioFrame::AudioFrame() {
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
void swap(AudioFrame& a, AudioFrame& b) {
using std::swap;
swap(a.timestamp_, b.timestamp_);
swap(a.elapsed_time_ms_, b.elapsed_time_ms_);
swap(a.ntp_time_ms_, b.ntp_time_ms_);
swap(a.samples_per_channel_, b.samples_per_channel_);
swap(a.sample_rate_hz_, b.sample_rate_hz_);
swap(a.num_channels_, b.num_channels_);
swap(a.channel_layout_, b.channel_layout_);
swap(a.speech_type_, b.speech_type_);
swap(a.vad_activity_, b.vad_activity_);
swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_);
swap(a.packet_infos_, b.packet_infos_);
const size_t length_a = a.samples_per_channel_ * a.num_channels_;
const size_t length_b = b.samples_per_channel_ * b.num_channels_;
RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples);
RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples);
std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_);
swap(a.muted_, b.muted_);
swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
}
void AudioFrame::ResetWithoutMuting() {
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
// TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;

View File

@ -14,11 +14,8 @@
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -60,7 +57,8 @@ class AudioFrame {
AudioFrame();
friend void swap(AudioFrame& a, AudioFrame& b);
AudioFrame(const AudioFrame&) = delete;
AudioFrame& operator=(const AudioFrame&) = delete;
// Resets all members to their default state.
void Reset();
@ -139,7 +137,7 @@ class AudioFrame {
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
@ -149,7 +147,7 @@ class AudioFrame {
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
// |RtpPacketInfos| may also be empty if the audio samples did not come from
// `RtpPacketInfos` may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
@ -165,11 +163,9 @@ class AudioFrame {
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in |packet_infos_|.
// capture timestamp of a received frame is found in `packet_infos_`.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
} // namespace webrtc

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_FRAME_PROCESSOR_H_
#define API_AUDIO_AUDIO_FRAME_PROCESSOR_H_
#include <functional>
#include <memory>
namespace webrtc {
class AudioFrame;
// If passed into PeerConnectionFactory, will be used for additional
// processing of captured audio frames, performed before encoding.
// Implementations must be thread-safe.
class AudioFrameProcessor {
public:
using OnAudioFrameCallback = std::function<void(std::unique_ptr<AudioFrame>)>;
virtual ~AudioFrameProcessor() = default;
// Processes the frame received from WebRTC, is called by WebRTC off the
// realtime audio capturing path. AudioFrameProcessor must reply with
// processed frames by calling `sink_callback` if it was provided in SetSink()
// call. `sink_callback` can be called in the context of Process().
virtual void Process(std::unique_ptr<AudioFrame> frame) = 0;
// Atomically replaces the current sink with the new one. Before the
// first call to this function, or if the provided `sink_callback` is nullptr,
// processed frames are simply discarded.
virtual void SetSink(OnAudioFrameCallback sink_callback) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_FRAME_PROCESSOR_H_

View File

@ -35,9 +35,9 @@ class AudioMixer : public rtc::RefCountInterface {
kError, // The audio_frame will not be used.
};
// Overwrites |audio_frame|. The data_ field is overwritten with
// Overwrites `audio_frame`. The data_ field is overwritten with
// 10 ms of new audio (either 1 or 2 interleaved channels) at
// |sample_rate_hz|. All fields in |audio_frame| must be updated.
// `sample_rate_hz`. All fields in `audio_frame` must be updated.
virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
AudioFrame* audio_frame) = 0;
@ -66,7 +66,7 @@ class AudioMixer : public rtc::RefCountInterface {
// should mix at a rate that doesn't cause quality loss of the
// sources' audio. The mixing rate is one of the rates listed in
// AudioProcessing::NativeRate. All fields in
// |audio_frame_for_mixing| must be updated.
// `audio_frame_for_mixing` must be updated.
virtual void Mix(size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) = 0;

View File

@ -275,7 +275,7 @@ const char* ChannelLayoutToString(ChannelLayout layout) {
case CHANNEL_LAYOUT_BITSTREAM:
return "BITSTREAM";
}
RTC_NOTREACHED() << "Invalid channel layout provided: " << layout;
RTC_DCHECK_NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
}

View File

@ -153,6 +153,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000);
res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f);
res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 250000);
res = res & Limit(&c->erle.min, 1.f, 100000.f);
res = res & Limit(&c->erle.max_l, 1.f, 100000.f);
@ -165,6 +166,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f);
res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
res = res & Limit(&c->ep_strength.nearend_len, -1.0f, 1.0f);
res =
res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
@ -228,6 +230,12 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res =
res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.last_permanent_lf_smoothing_band, 0, 64);
res = res & Limit(&c->suppressor.last_lf_smoothing_band, 0, 64);
res = res & Limit(&c->suppressor.last_lf_band, 0, 63);
res = res &
Limit(&c->suppressor.first_hf_band, c->suppressor.last_lf_band + 1, 64);
res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold,

View File

@ -43,6 +43,7 @@ struct RTC_EXPORT EchoCanceller3Config {
size_t hysteresis_limit_blocks = 1;
size_t fixed_capture_delay_samples = 0;
float delay_estimate_smoothing = 0.7f;
float delay_estimate_smoothing_delay_found = 0.7f;
float delay_candidate_detection_threshold = 0.2f;
struct DelaySelectionThresholds {
int initial;
@ -58,6 +59,7 @@ struct RTC_EXPORT EchoCanceller3Config {
};
AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true};
AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false};
bool detect_pre_echo = true;
} delay;
struct Filter {
@ -86,9 +88,11 @@ struct RTC_EXPORT EchoCanceller3Config {
size_t config_change_duration_blocks = 250;
float initial_state_seconds = 2.5f;
int coarse_reset_hangover_blocks = 25;
bool conservative_initial_phase = false;
bool enable_coarse_filter_output_usage = true;
bool use_linear_filter = true;
bool high_pass_filter_echo_reference = false;
bool export_linear_aec_output = false;
} filter;
@ -105,8 +109,11 @@ struct RTC_EXPORT EchoCanceller3Config {
struct EpStrength {
float default_gain = 1.f;
float default_len = 0.83f;
float nearend_len = 0.83f;
bool echo_can_saturate = true;
bool bounded_erl = false;
bool erle_onset_compensation_in_dominant_nearend = false;
bool use_conservative_tail_frequency_response = true;
} ep_strength;
struct EchoAudibility {
@ -143,6 +150,7 @@ struct RTC_EXPORT EchoCanceller3Config {
float noise_gate_slope = 0.3f;
size_t render_pre_window_size = 1;
size_t render_post_window_size = 1;
bool model_reverb_in_nonlinear_mode = true;
} echo_model;
struct ComfortNoise {
@ -189,6 +197,12 @@ struct RTC_EXPORT EchoCanceller3Config {
2.0f,
0.25f);
bool lf_smoothing_during_initial_phase = true;
int last_permanent_lf_smoothing_band = 0;
int last_lf_smoothing_band = 5;
int last_lf_band = 5;
int first_hf_band = 8;
struct DominantNearendDetection {
float enr_threshold = .25f;
float enr_exit_threshold = 10.f;
@ -196,6 +210,7 @@ struct RTC_EXPORT EchoCanceller3Config {
int hold_duration = 50;
int trigger_threshold = 12;
bool use_during_initial_phase = true;
bool use_unbounded_echo_spectrum = true;
} dominant_nearend_detection;
struct SubbandNearendDetection {
@ -215,12 +230,20 @@ struct RTC_EXPORT EchoCanceller3Config {
struct HighBandsSuppression {
float enr_threshold = 1.f;
float max_gain_during_echo = 1.f;
float anti_howling_activation_threshold = 25.f;
float anti_howling_gain = 0.01f;
float anti_howling_activation_threshold = 400.f;
float anti_howling_gain = 1.f;
} high_bands_suppression;
float floor_first_increase = 0.00001f;
bool conservative_hf_suppression = false;
} suppressor;
struct MultiChannel {
bool detect_stereo_content = true;
float stereo_detection_threshold = 0.0f;
int stereo_detection_timeout_threshold_seconds = 300;
float stereo_detection_hysteresis_seconds = 2.0f;
} multi_channel;
};
} // namespace webrtc

View File

@ -11,6 +11,7 @@
#include <stddef.h>
#include <memory>
#include <string>
#include <vector>
@ -156,9 +157,14 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
*parsing_successful = true;
Json::Value root;
bool success = Json::Reader().parse(std::string(json_string), root);
Json::CharReaderBuilder builder;
std::string error_message;
std::unique_ptr<Json::CharReader> reader(builder.newCharReader());
bool success =
reader->parse(json_string.data(), json_string.data() + json_string.size(),
&root, &error_message);
if (!success) {
RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << json_string;
RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << error_message;
*parsing_successful = false;
return;
}
@ -191,6 +197,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
&cfg.delay.fixed_capture_delay_samples);
ReadParam(section, "delay_estimate_smoothing",
&cfg.delay.delay_estimate_smoothing);
ReadParam(section, "delay_estimate_smoothing_delay_found",
&cfg.delay.delay_estimate_smoothing_delay_found);
ReadParam(section, "delay_candidate_detection_threshold",
&cfg.delay.delay_candidate_detection_threshold);
@ -212,6 +220,7 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
&cfg.delay.render_alignment_mixing);
ReadParam(section, "capture_alignment_mixing",
&cfg.delay.capture_alignment_mixing);
ReadParam(section, "detect_pre_echo", &cfg.delay.detect_pre_echo);
}
if (rtc::GetValueFromJsonObject(aec3_root, "filter", &section)) {
@ -223,11 +232,15 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
&cfg.filter.config_change_duration_blocks);
ReadParam(section, "initial_state_seconds",
&cfg.filter.initial_state_seconds);
ReadParam(section, "coarse_reset_hangover_blocks",
&cfg.filter.coarse_reset_hangover_blocks);
ReadParam(section, "conservative_initial_phase",
&cfg.filter.conservative_initial_phase);
ReadParam(section, "enable_coarse_filter_output_usage",
&cfg.filter.enable_coarse_filter_output_usage);
ReadParam(section, "use_linear_filter", &cfg.filter.use_linear_filter);
ReadParam(section, "high_pass_filter_echo_reference",
&cfg.filter.high_pass_filter_echo_reference);
ReadParam(section, "export_linear_aec_output",
&cfg.filter.export_linear_aec_output);
}
@ -247,8 +260,13 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", &section)) {
ReadParam(section, "default_gain", &cfg.ep_strength.default_gain);
ReadParam(section, "default_len", &cfg.ep_strength.default_len);
ReadParam(section, "nearend_len", &cfg.ep_strength.nearend_len);
ReadParam(section, "echo_can_saturate", &cfg.ep_strength.echo_can_saturate);
ReadParam(section, "bounded_erl", &cfg.ep_strength.bounded_erl);
ReadParam(section, "erle_onset_compensation_in_dominant_nearend",
&cfg.ep_strength.erle_onset_compensation_in_dominant_nearend);
ReadParam(section, "use_conservative_tail_frequency_response",
&cfg.ep_strength.use_conservative_tail_frequency_response);
}
if (rtc::GetValueFromJsonObject(aec3_root, "echo_audibility", &section)) {
@ -302,6 +320,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
&cfg.echo_model.render_pre_window_size);
ReadParam(section, "render_post_window_size",
&cfg.echo_model.render_post_window_size);
ReadParam(section, "model_reverb_in_nonlinear_mode",
&cfg.echo_model.model_reverb_in_nonlinear_mode);
}
if (rtc::GetValueFromJsonObject(aec3_root, "comfort_noise", &section)) {
@ -331,6 +351,15 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
&cfg.suppressor.nearend_tuning.max_dec_factor_lf);
}
ReadParam(section, "lf_smoothing_during_initial_phase",
&cfg.suppressor.lf_smoothing_during_initial_phase);
ReadParam(section, "last_permanent_lf_smoothing_band",
&cfg.suppressor.last_permanent_lf_smoothing_band);
ReadParam(section, "last_lf_smoothing_band",
&cfg.suppressor.last_lf_smoothing_band);
ReadParam(section, "last_lf_band", &cfg.suppressor.last_lf_band);
ReadParam(section, "first_hf_band", &cfg.suppressor.first_hf_band);
if (rtc::GetValueFromJsonObject(section, "dominant_nearend_detection",
&subsection)) {
ReadParam(subsection, "enr_threshold",
@ -346,6 +375,9 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
ReadParam(
subsection, "use_during_initial_phase",
&cfg.suppressor.dominant_nearend_detection.use_during_initial_phase);
ReadParam(subsection, "use_unbounded_echo_spectrum",
&cfg.suppressor.dominant_nearend_detection
.use_unbounded_echo_spectrum);
}
if (rtc::GetValueFromJsonObject(section, "subband_nearend_detection",
@ -381,6 +413,19 @@ void Aec3ConfigFromJsonString(absl::string_view json_string,
ReadParam(section, "floor_first_increase",
&cfg.suppressor.floor_first_increase);
ReadParam(section, "conservative_hf_suppression",
&cfg.suppressor.conservative_hf_suppression);
}
if (rtc::GetValueFromJsonObject(aec3_root, "multi_channel", &section)) {
ReadParam(section, "detect_stereo_content",
&cfg.multi_channel.detect_stereo_content);
ReadParam(section, "stereo_detection_threshold",
&cfg.multi_channel.stereo_detection_threshold);
ReadParam(section, "stereo_detection_timeout_threshold_seconds",
&cfg.multi_channel.stereo_detection_timeout_threshold_seconds);
ReadParam(section, "stereo_detection_hysteresis_seconds",
&cfg.multi_channel.stereo_detection_hysteresis_seconds);
}
}
@ -415,6 +460,8 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
<< config.delay.fixed_capture_delay_samples << ",";
ost << "\"delay_estimate_smoothing\": "
<< config.delay.delay_estimate_smoothing << ",";
ost << "\"delay_estimate_smoothing_delay_found\": "
<< config.delay.delay_estimate_smoothing_delay_found << ",";
ost << "\"delay_candidate_detection_threshold\": "
<< config.delay.delay_candidate_detection_threshold << ",";
@ -459,7 +506,9 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
<< (config.delay.capture_alignment_mixing.prefer_first_two_channels
? "true"
: "false");
ost << "}";
ost << "},";
ost << "\"detect_pre_echo\": "
<< (config.delay.detect_pre_echo ? "true" : "false");
ost << "},";
ost << "\"filter\": {";
@ -498,6 +547,8 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
<< config.filter.config_change_duration_blocks << ",";
ost << "\"initial_state_seconds\": " << config.filter.initial_state_seconds
<< ",";
ost << "\"coarse_reset_hangover_blocks\": "
<< config.filter.coarse_reset_hangover_blocks << ",";
ost << "\"conservative_initial_phase\": "
<< (config.filter.conservative_initial_phase ? "true" : "false") << ",";
ost << "\"enable_coarse_filter_output_usage\": "
@ -505,6 +556,9 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
<< ",";
ost << "\"use_linear_filter\": "
<< (config.filter.use_linear_filter ? "true" : "false") << ",";
ost << "\"high_pass_filter_echo_reference\": "
<< (config.filter.high_pass_filter_echo_reference ? "true" : "false")
<< ",";
ost << "\"export_linear_aec_output\": "
<< (config.filter.export_linear_aec_output ? "true" : "false");
@ -526,11 +580,20 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
ost << "\"ep_strength\": {";
ost << "\"default_gain\": " << config.ep_strength.default_gain << ",";
ost << "\"default_len\": " << config.ep_strength.default_len << ",";
ost << "\"nearend_len\": " << config.ep_strength.nearend_len << ",";
ost << "\"echo_can_saturate\": "
<< (config.ep_strength.echo_can_saturate ? "true" : "false") << ",";
ost << "\"bounded_erl\": "
<< (config.ep_strength.bounded_erl ? "true" : "false");
<< (config.ep_strength.bounded_erl ? "true" : "false") << ",";
ost << "\"erle_onset_compensation_in_dominant_nearend\": "
<< (config.ep_strength.erle_onset_compensation_in_dominant_nearend
? "true"
: "false")
<< ",";
ost << "\"use_conservative_tail_frequency_response\": "
<< (config.ep_strength.use_conservative_tail_frequency_response
? "true"
: "false");
ost << "},";
ost << "\"echo_audibility\": {";
@ -585,7 +648,9 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
ost << "\"render_pre_window_size\": "
<< config.echo_model.render_pre_window_size << ",";
ost << "\"render_post_window_size\": "
<< config.echo_model.render_post_window_size;
<< config.echo_model.render_post_window_size << ",";
ost << "\"model_reverb_in_nonlinear_mode\": "
<< (config.echo_model.model_reverb_in_nonlinear_mode ? "true" : "false");
ost << "},";
ost << "\"comfort_noise\": {";
@ -627,20 +692,30 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
ost << "\"max_dec_factor_lf\": "
<< config.suppressor.nearend_tuning.max_dec_factor_lf;
ost << "},";
ost << "\"dominant_nearend_detection\": {";
ost << "\"enr_threshold\": "
<< config.suppressor.dominant_nearend_detection.enr_threshold << ",";
ost << "\"enr_exit_threshold\": "
<< config.suppressor.dominant_nearend_detection.enr_exit_threshold << ",";
ost << "\"snr_threshold\": "
<< config.suppressor.dominant_nearend_detection.snr_threshold << ",";
ost << "\"hold_duration\": "
<< config.suppressor.dominant_nearend_detection.hold_duration << ",";
ost << "\"trigger_threshold\": "
<< config.suppressor.dominant_nearend_detection.trigger_threshold << ",";
ost << "\"use_during_initial_phase\": "
<< config.suppressor.dominant_nearend_detection.use_during_initial_phase;
ost << "},";
ost << "\"lf_smoothing_during_initial_phase\": "
<< (config.suppressor.lf_smoothing_during_initial_phase ? "true"
: "false")
<< ",";
ost << "\"last_permanent_lf_smoothing_band\": "
<< config.suppressor.last_permanent_lf_smoothing_band << ",";
ost << "\"last_lf_smoothing_band\": "
<< config.suppressor.last_lf_smoothing_band << ",";
ost << "\"last_lf_band\": " << config.suppressor.last_lf_band << ",";
ost << "\"first_hf_band\": " << config.suppressor.first_hf_band << ",";
{
const auto& dnd = config.suppressor.dominant_nearend_detection;
ost << "\"dominant_nearend_detection\": {";
ost << "\"enr_threshold\": " << dnd.enr_threshold << ",";
ost << "\"enr_exit_threshold\": " << dnd.enr_exit_threshold << ",";
ost << "\"snr_threshold\": " << dnd.snr_threshold << ",";
ost << "\"hold_duration\": " << dnd.hold_duration << ",";
ost << "\"trigger_threshold\": " << dnd.trigger_threshold << ",";
ost << "\"use_during_initial_phase\": " << dnd.use_during_initial_phase
<< ",";
ost << "\"use_unbounded_echo_spectrum\": "
<< dnd.use_unbounded_echo_spectrum;
ost << "},";
}
ost << "\"subband_nearend_detection\": {";
ost << "\"nearend_average_blocks\": "
<< config.suppressor.subband_nearend_detection.nearend_average_blocks
@ -672,8 +747,23 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
ost << "\"anti_howling_gain\": "
<< config.suppressor.high_bands_suppression.anti_howling_gain;
ost << "},";
ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase;
ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase
<< ",";
ost << "\"conservative_hf_suppression\": "
<< config.suppressor.conservative_hf_suppression;
ost << "},";
ost << "\"multi_channel\": {";
ost << "\"detect_stereo_content\": "
<< (config.multi_channel.detect_stereo_content ? "true" : "false") << ",";
ost << "\"stereo_detection_threshold\": "
<< config.multi_channel.stereo_detection_threshold << ",";
ost << "\"stereo_detection_timeout_threshold_seconds\": "
<< config.multi_channel.stereo_detection_timeout_threshold_seconds << ",";
ost << "\"stereo_detection_hysteresis_seconds\": "
<< config.multi_channel.stereo_detection_hysteresis_seconds;
ost << "}";
ost << "}";
ost << "}";

View File

@ -25,7 +25,8 @@ std::unique_ptr<EchoControl> EchoCanceller3Factory::Create(
int num_render_channels,
int num_capture_channels) {
return std::make_unique<EchoCanceller3>(
config_, sample_rate_hz, num_render_channels, num_capture_channels);
config_, /*multichannel_config=*/absl::nullopt, sample_rate_hz,
num_render_channels, num_capture_channels);
}
} // namespace webrtc

View File

@ -48,6 +48,13 @@ class EchoControl {
// Provides an optional external estimate of the audio buffer delay.
virtual void SetAudioBufferDelay(int delay_ms) = 0;
// Specifies whether the capture output will be used. The purpose of this is
// to allow the echo controller to deactivate some of the processing when the
// resulting output is anyway not used, for instance when the endpoint is
// muted.
// TODO(b/177830919): Make pure virtual.
virtual void SetCaptureOutputUsage(bool capture_output_used) {}
// Returns wheter the signal is altered.
virtual bool ActiveProcessing() const = 0;

View File

@ -9,13 +9,13 @@
*/
#include "api/audio/echo_detector_creator.h"
#include "api/make_ref_counted.h"
#include "modules/audio_processing/residual_echo_detector.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
rtc::scoped_refptr<EchoDetector> CreateEchoDetector() {
return new rtc::RefCountedObject<ResidualEchoDetector>();
return rtc::make_ref_counted<ResidualEchoDetector>();
}
} // namespace webrtc

View File

@ -24,7 +24,6 @@ if (rtc_include_tests) {
"..:aec3_config",
"..:aec3_config_json",
"..:audio_frame_api",
"../../../rtc_base:rtc_base_approved",
"../../../test:test_support",
]
}

View File

@ -133,54 +133,4 @@ TEST(AudioFrameTest, CopyFrom) {
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
}
TEST(AudioFrameTest, SwapFrames) {
AudioFrame frame1, frame2;
int16_t samples1[kNumChannelsMono * kSamplesPerChannel];
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
samples1[i] = i;
}
frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
frame1.set_absolute_capture_timestamp_ms(12345678);
const auto frame1_channel_layout = frame1.channel_layout();
int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)];
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
samples2[i] = 1000 + i;
}
frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1,
kSampleRateHz + 1, AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, kNumChannelsMono + 1);
const auto frame2_channel_layout = frame2.channel_layout();
swap(frame1, frame2);
EXPECT_EQ(kTimestamp + 1, frame1.timestamp_);
ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_);
EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_);
EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_);
ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_);
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
EXPECT_EQ(samples2[i], frame1.data()[i]);
}
EXPECT_FALSE(frame1.absolute_capture_timestamp_ms());
EXPECT_EQ(frame2_channel_layout, frame1.channel_layout());
EXPECT_EQ(kTimestamp, frame2.timestamp_);
ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_);
EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_);
ASSERT_EQ(kNumChannelsMono, frame2.num_channels_);
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
EXPECT_EQ(samples1[i], frame2.data()[i]);
}
EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms());
EXPECT_EQ(frame1_channel_layout, frame2.channel_layout());
}
} // namespace webrtc

View File

@ -21,19 +21,29 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) {
cfg.delay.log_warning_on_delay_changes = true;
cfg.filter.refined.error_floor = 2.f;
cfg.filter.coarse_initial.length_blocks = 3u;
cfg.filter.high_pass_filter_echo_reference =
!cfg.filter.high_pass_filter_echo_reference;
cfg.comfort_noise.noise_floor_dbfs = 100.f;
cfg.echo_model.model_reverb_in_nonlinear_mode = false;
cfg.suppressor.normal_tuning.mask_hf.enr_suppress = .5f;
cfg.suppressor.subband_nearend_detection.nearend_average_blocks = 3;
cfg.suppressor.subband_nearend_detection.subband1 = {1, 3};
cfg.suppressor.subband_nearend_detection.subband1 = {4, 5};
cfg.suppressor.subband_nearend_detection.nearend_threshold = 2.f;
cfg.suppressor.subband_nearend_detection.snr_threshold = 100.f;
cfg.multi_channel.detect_stereo_content =
!cfg.multi_channel.detect_stereo_content;
cfg.multi_channel.stereo_detection_threshold += 1.0f;
cfg.multi_channel.stereo_detection_timeout_threshold_seconds += 1;
cfg.multi_channel.stereo_detection_hysteresis_seconds += 1;
std::string json_string = Aec3ConfigToJsonString(cfg);
EchoCanceller3Config cfg_transformed = Aec3ConfigFromJsonString(json_string);
// Expect unchanged values to remain default.
EXPECT_EQ(cfg.ep_strength.default_len,
cfg_transformed.ep_strength.default_len);
EXPECT_EQ(cfg.ep_strength.nearend_len,
cfg_transformed.ep_strength.nearend_len);
EXPECT_EQ(cfg.suppressor.normal_tuning.mask_lf.enr_suppress,
cfg_transformed.suppressor.normal_tuning.mask_lf.enr_suppress);
@ -46,8 +56,12 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) {
cfg_transformed.filter.coarse_initial.length_blocks);
EXPECT_EQ(cfg.filter.refined.error_floor,
cfg_transformed.filter.refined.error_floor);
EXPECT_EQ(cfg.filter.high_pass_filter_echo_reference,
cfg_transformed.filter.high_pass_filter_echo_reference);
EXPECT_EQ(cfg.comfort_noise.noise_floor_dbfs,
cfg_transformed.comfort_noise.noise_floor_dbfs);
EXPECT_EQ(cfg.echo_model.model_reverb_in_nonlinear_mode,
cfg_transformed.echo_model.model_reverb_in_nonlinear_mode);
EXPECT_EQ(cfg.suppressor.normal_tuning.mask_hf.enr_suppress,
cfg_transformed.suppressor.normal_tuning.mask_hf.enr_suppress);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.nearend_average_blocks,
@ -66,5 +80,14 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) {
cfg_transformed.suppressor.subband_nearend_detection.nearend_threshold);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.snr_threshold,
cfg_transformed.suppressor.subband_nearend_detection.snr_threshold);
EXPECT_EQ(cfg.multi_channel.detect_stereo_content,
cfg_transformed.multi_channel.detect_stereo_content);
EXPECT_EQ(cfg.multi_channel.stereo_detection_threshold,
cfg_transformed.multi_channel.stereo_detection_threshold);
EXPECT_EQ(
cfg.multi_channel.stereo_detection_timeout_threshold_seconds,
cfg_transformed.multi_channel.stereo_detection_timeout_threshold_seconds);
EXPECT_EQ(cfg.multi_channel.stereo_detection_hysteresis_seconds,
cfg_transformed.multi_channel.stereo_detection_hysteresis_seconds);
}
} // namespace webrtc

View File

@ -31,15 +31,19 @@ rtc_library("audio_codecs_api") {
deps = [
"..:array_view",
"..:bitrate_allocation",
"..:make_ref_counted",
"..:scoped_refptr",
"../../api:field_trials_view",
"../../rtc_base:buffer",
"../../rtc_base:checks",
"../../rtc_base:deprecation",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:event_tracer",
"../../rtc_base:refcount",
"../../rtc_base:sanitizer",
"../../rtc_base/system:rtc_export",
"../units:time_delta",
]
absl_deps = [
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
@ -55,11 +59,9 @@ rtc_library("builtin_audio_decoder_factory") {
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"L16:audio_decoder_L16",
"g711:audio_decoder_g711",
"g722:audio_decoder_g722",
"isac:audio_decoder_isac",
]
defines = []
if (rtc_include_ilbc) {
@ -89,11 +91,9 @@ rtc_library("builtin_audio_encoder_factory") {
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"L16:audio_encoder_L16",
"g711:audio_encoder_g711",
"g722:audio_encoder_g722",
"isac:audio_encoder_isac",
]
defines = []
if (rtc_include_ilbc) {
@ -123,7 +123,6 @@ rtc_library("opus_audio_decoder_factory") {
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"opus:audio_decoder_multiopus",
"opus:audio_decoder_opus",
]
@ -139,7 +138,6 @@ rtc_library("opus_audio_encoder_factory") {
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"opus:audio_encoder_multiopus",
"opus:audio_encoder_opus",
]

View File

@ -21,9 +21,11 @@ rtc_library("audio_encoder_L16") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
@ -41,8 +43,9 @@ rtc_library("audio_decoder_L16") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [

View File

@ -24,9 +24,10 @@ absl::optional<AudioDecoderL16::Config> AudioDecoderL16::SdpToConfig(
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()
? absl::optional<Config>(config)
: absl::nullopt;
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
return config;
}
return absl::nullopt;
}
void AudioDecoderL16::AppendSupportedDecoders(
@ -36,10 +37,13 @@ void AudioDecoderL16::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderL16::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return config.IsOk() ? std::make_unique<AudioDecoderPcm16B>(
config.sample_rate_hz, config.num_channels)
: nullptr;
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
return nullptr;
}
return std::make_unique<AudioDecoderPcm16B>(config.sample_rate_hz,
config.num_channels);
}
} // namespace webrtc

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -29,7 +30,8 @@ struct RTC_EXPORT AudioDecoderL16 {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
num_channels >= 1;
(num_channels >= 1 &&
num_channels <= AudioDecoder::kMaxNumberOfChannels);
}
int sample_rate_hz = 8000;
int num_channels = 1;
@ -38,7 +40,8 @@ struct RTC_EXPORT AudioDecoderL16 {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -24,6 +24,7 @@ namespace webrtc {
absl::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
const SdpAudioFormat& format) {
if (!rtc::IsValueInRangeForNumericType<int>(format.num_channels)) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
Config config;
@ -36,9 +37,10 @@ absl::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
}
}
return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()
? absl::optional<Config>(config)
: absl::nullopt;
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
return config;
}
return absl::nullopt;
}
void AudioEncoderL16::AppendSupportedEncoders(
@ -57,13 +59,17 @@ AudioCodecInfo AudioEncoderL16::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderL16::MakeAudioEncoder(
const AudioEncoderL16::Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioEncoderPcm16B::Config c;
c.sample_rate_hz = config.sample_rate_hz;
c.num_channels = config.num_channels;
c.frame_size_ms = config.frame_size_ms;
c.payload_type = payload_type;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderPcm16B>(c);
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -29,7 +30,9 @@ struct RTC_EXPORT AudioEncoderL16 {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
num_channels >= 1 && frame_size_ms > 0 && frame_size_ms <= 120 &&
num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels &&
frame_size_ms > 0 && frame_size_ms <= 120 &&
frame_size_ms % 10 == 0;
}
int sample_rate_hz = 8000;
@ -42,7 +45,8 @@ struct RTC_EXPORT AudioEncoderL16 {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -1 +1,3 @@
kwiberg@webrtc.org
alessiob@webrtc.org
henrik.lundin@webrtc.org
jakobi@webrtc.org

View File

@ -10,7 +10,6 @@
#include "api/audio_codecs/audio_decoder.h"
#include <assert.h>
#include <memory>
#include <utility>
@ -162,9 +161,10 @@ AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) {
case 2:
return kComfortNoise;
default:
assert(false);
RTC_DCHECK_NOTREACHED();
return kSpeech;
}
}
constexpr int AudioDecoder::kMaxNumberOfChannels;
} // namespace webrtc

View File

@ -20,7 +20,6 @@
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -37,6 +36,9 @@ class AudioDecoder {
AudioDecoder() = default;
virtual ~AudioDecoder() = default;
AudioDecoder(const AudioDecoder&) = delete;
AudioDecoder& operator=(const AudioDecoder&) = delete;
class EncodedAudioFrame {
public:
struct DecodeResult {
@ -53,8 +55,8 @@ class AudioDecoder {
// Returns true if this packet contains DTX.
virtual bool IsDtxPacket() const;
// Decodes this frame of audio and writes the result in |decoded|.
// |decoded| must be large enough to store as many samples as indicated by a
// Decodes this frame of audio and writes the result in `decoded`.
// `decoded` must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an absl::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
@ -85,8 +87,8 @@ class AudioDecoder {
// Let the decoder parse this payload and prepare zero or more decodable
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
// ensure that the AudioDecoder object outlives any frame objects returned by
// this call. The decoder is free to swap or move the data from the |payload|
// buffer. |timestamp| is the input timestamp, in samples, corresponding to
// this call. The decoder is free to swap or move the data from the `payload`
// buffer. `timestamp` is the input timestamp, in samples, corresponding to
// the start of the payload.
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp);
@ -95,12 +97,12 @@ class AudioDecoder {
// obsolete; callers should call ParsePayload instead. For now, subclasses
// must still implement DecodeInternal.
// Decodes |encode_len| bytes from |encoded| and writes the result in
// |decoded|. The maximum bytes allowed to be written into |decoded| is
// |max_decoded_bytes|. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, |speech_type|
// Decodes `encode_len` bytes from `encoded` and writes the result in
// `decoded`. The maximum bytes allowed to be written into `decoded` is
// `max_decoded_bytes`. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, `speech_type`
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
// sample rate is provided in |sample_rate_hz|, which must be valid for the
// sample rate is provided in `sample_rate_hz`, which must be valid for the
// codec at hand.
int Decode(const uint8_t* encoded,
size_t encoded_len,
@ -123,11 +125,11 @@ class AudioDecoder {
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets. The caller has to make sure that the
// memory allocated in |decoded| should accommodate |num_frames| frames.
// memory allocated in `decoded` should accommodate `num_frames` frames.
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Asks the decoder to generate packet-loss concealment and append it to the
// end of |concealment_audio|. The concealment audio should be in
// end of `concealment_audio`. The concealment audio should be in
// channel-interleaved format, with as many channels as the last decoded
// packet produced. The implementation must produce at least
// requested_samples_per_channel, or nothing at all. This is a signal to the
@ -136,7 +138,7 @@ class AudioDecoder {
// with the decoded audio on either side of the concealment.
// Note: The default implementation of GeneratePlc will be deleted soon. All
// implementations must provide their own, which can be a simple as a no-op.
// TODO(bugs.webrtc.org/9676): Remove default impementation.
// TODO(bugs.webrtc.org/9676): Remove default implementation.
virtual void GeneratePlc(size_t requested_samples_per_channel,
rtc::BufferT<int16_t>* concealment_audio);
@ -146,19 +148,19 @@ class AudioDecoder {
// Returns the last error code from the decoder.
virtual int ErrorCode();
// Returns the duration in samples-per-channel of the payload in |encoded|
// which is |encoded_len| bytes long. Returns kNotImplemented if no duration
// Returns the duration in samples-per-channel of the payload in `encoded`
// which is `encoded_len` bytes long. Returns kNotImplemented if no duration
// estimate is available, or -1 in case of an error.
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
// Returns the duration in samples-per-channel of the redandant payload in
// |encoded| which is |encoded_len| bytes long. Returns kNotImplemented if no
// `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no
// duration estimate is available, or -1 in case of an error.
virtual int PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const;
// Detects whether a packet has forward error correction. The packet is
// comprised of the samples in |encoded| which is |encoded_len| bytes long.
// comprised of the samples in `encoded` which is `encoded_len` bytes long.
// Returns true if the packet has FEC and false otherwise.
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
@ -170,6 +172,9 @@ class AudioDecoder {
// during the lifetime of the decoder.
virtual size_t Channels() const = 0;
// The maximum number of audio channels supported by WebRTC decoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
static SpeechType ConvertSpeechType(int16_t type);
@ -184,9 +189,6 @@ class AudioDecoder {
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type);
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
} // namespace webrtc

View File

@ -38,6 +38,8 @@ class AudioDecoderFactory : public rtc::RefCountInterface {
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such decoders must still
// work.

View File

@ -15,8 +15,9 @@
#include <vector>
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/field_trials_view.h"
#include "api/make_ref_counted.h"
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
@ -32,7 +33,8 @@ struct Helper<> {
static bool IsSupportedDecoder(const SdpAudioFormat& format) { return false; }
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
return nullptr;
}
};
@ -55,16 +57,22 @@ struct Helper<T, Ts...> {
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
auto opt_config = T::SdpToConfig(format);
return opt_config ? T::MakeAudioDecoder(*opt_config, codec_pair_id)
: Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id);
: Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
field_trials);
}
};
template <typename... Ts>
class AudioDecoderFactoryT : public AudioDecoderFactory {
public:
explicit AudioDecoderFactoryT(const FieldTrialsView* field_trials) {
field_trials_ = field_trials;
}
std::vector<AudioCodecSpec> GetSupportedDecoders() override {
std::vector<AudioCodecSpec> specs;
Helper<Ts...>::AppendSupportedDecoders(&specs);
@ -78,8 +86,11 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id);
return Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
field_trials_);
}
const FieldTrialsView* field_trials_;
};
} // namespace audio_decoder_factory_template_impl
@ -89,8 +100,8 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
// Each decoder type is given as a template argument to the function; it should
// be a struct with the following static member functions:
//
// // Converts |audio_format| to a ConfigType instance. Returns an empty
// // optional if |audio_format| doesn't correctly specify a decoder of our
// // Converts `audio_format` to a ConfigType instance. Returns an empty
// // optional if `audio_format` doesn't correctly specify a decoder of our
// // type.
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
@ -115,7 +126,8 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
// TODO(kwiberg): Point at CreateBuiltinAudioDecoderFactory() for an example of
// how it is used.
template <typename... Ts>
rtc::scoped_refptr<AudioDecoderFactory> CreateAudioDecoderFactory() {
rtc::scoped_refptr<AudioDecoderFactory> CreateAudioDecoderFactory(
const FieldTrialsView* field_trials = nullptr) {
// There's no technical reason we couldn't allow zero template parameters,
// but such a factory couldn't create any decoders, and callers can do this
// by mistake by simply forgetting the <> altogether. So we forbid it in
@ -123,9 +135,9 @@ rtc::scoped_refptr<AudioDecoderFactory> CreateAudioDecoderFactory() {
static_assert(sizeof...(Ts) >= 1,
"Caller must give at least one template parameter");
return rtc::scoped_refptr<AudioDecoderFactory>(
new rtc::RefCountedObject<
audio_decoder_factory_template_impl::AudioDecoderFactoryT<Ts...>>());
return rtc::make_ref_counted<
audio_decoder_factory_template_impl::AudioDecoderFactoryT<Ts...>>(
field_trials);
}
} // namespace webrtc

View File

@ -83,7 +83,7 @@ void AudioEncoder::OnReceivedUplinkPacketLossFraction(
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction) {
RTC_NOTREACHED();
RTC_DCHECK_NOTREACHED();
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
@ -110,4 +110,5 @@ ANAStats AudioEncoder::GetANAStats() const {
return ANAStats();
}
constexpr int AudioEncoder::kMaxNumberOfChannels;
} // namespace webrtc

View File

@ -16,12 +16,12 @@
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
#include "rtc_base/deprecation.h"
namespace webrtc {
@ -95,13 +95,13 @@ class AudioEncoder {
// This is the main struct for auxiliary encoding information. Each encoded
// packet should be accompanied by one EncodedInfo struct, containing the
// total number of |encoded_bytes|, the |encoded_timestamp| and the
// |payload_type|. If the packet contains redundant encodings, the |redundant|
// total number of `encoded_bytes`, the `encoded_timestamp` and the
// `payload_type`. If the packet contains redundant encodings, the `redundant`
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
// vector represents one encoding; the order of structs in the vector is the
// same as the order in which the actual payloads are written to the byte
// stream. When EncoderInfoLeaf structs are present in the vector, the main
// struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the
// struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the
// vector.
struct EncodedInfo : public EncodedInfoLeaf {
EncodedInfo();
@ -143,7 +143,7 @@ class AudioEncoder {
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
// The encoder appends zero or more bytes of output to |encoded| and returns
// The encoder appends zero or more bytes of output to `encoded` and returns
// additional encoding information. Encode() checks some preconditions, calls
// EncodeImpl() which does the actual work, and then checks some
// postconditions.
@ -182,12 +182,11 @@ class AudioEncoder {
// implementation does nothing.
virtual void SetMaxPlaybackRate(int frequency_hz);
// This is to be deprecated. Please use |OnReceivedTargetAudioBitrate|
// instead.
// Tells the encoder what average bitrate we'd like it to produce. The
// encoder is free to adjust or disregard the given bitrate (the default
// implementation does the latter).
RTC_DEPRECATED virtual void SetTargetBitrate(int target_bps);
ABSL_DEPRECATED("Use OnReceivedTargetAudioBitrate instead")
virtual void SetTargetBitrate(int target_bps);
// Causes this encoder to let go of any other encoders it contains, and
// returns a pointer to an array where they are stored (which is required to
@ -206,11 +205,12 @@ class AudioEncoder {
virtual void DisableAudioNetworkAdaptor();
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
// |uplink_packet_loss_fraction| is in the range [0.0, 1.0].
// `uplink_packet_loss_fraction` is in the range [0.0, 1.0].
virtual void OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction);
RTC_DEPRECATED virtual void OnReceivedUplinkRecoverablePacketLossFraction(
ABSL_DEPRECATED("")
virtual void OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction);
// Provides target audio bitrate to this encoder to allow it to adapt.
@ -246,6 +246,9 @@ class AudioEncoder {
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
// The maximum number of audio channels supported by WebRTC encoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode().

View File

@ -44,6 +44,8 @@ class AudioEncoderFactory : public rtc::RefCountInterface {
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such encoders must still
// work.

View File

@ -15,8 +15,9 @@
#include <vector>
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/field_trials_view.h"
#include "api/make_ref_counted.h"
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
@ -36,7 +37,8 @@ struct Helper<> {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
return nullptr;
}
};
@ -63,13 +65,14 @@ struct Helper<T, Ts...> {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
auto opt_config = T::SdpToConfig(format);
if (opt_config) {
return T::MakeAudioEncoder(*opt_config, payload_type, codec_pair_id);
} else {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format,
codec_pair_id);
codec_pair_id, field_trials);
}
}
};
@ -77,6 +80,10 @@ struct Helper<T, Ts...> {
template <typename... Ts>
class AudioEncoderFactoryT : public AudioEncoderFactory {
public:
explicit AudioEncoderFactoryT(const FieldTrialsView* field_trials) {
field_trials_ = field_trials;
}
std::vector<AudioCodecSpec> GetSupportedEncoders() override {
std::vector<AudioCodecSpec> specs;
Helper<Ts...>::AppendSupportedEncoders(&specs);
@ -92,8 +99,11 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format, codec_pair_id);
return Helper<Ts...>::MakeAudioEncoder(payload_type, format, codec_pair_id,
field_trials_);
}
const FieldTrialsView* field_trials_;
};
} // namespace audio_encoder_factory_template_impl
@ -103,8 +113,8 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
// Each encoder type is given as a template argument to the function; it should
// be a struct with the following static member functions:
//
// // Converts |audio_format| to a ConfigType instance. Returns an empty
// // optional if |audio_format| doesn't correctly specify an encoder of our
// // Converts `audio_format` to a ConfigType instance. Returns an empty
// // optional if `audio_format` doesn't correctly specify an encoder of our
// // type.
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
@ -134,7 +144,8 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
// TODO(kwiberg): Point at CreateBuiltinAudioEncoderFactory() for an example of
// how it is used.
template <typename... Ts>
rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory() {
rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory(
const FieldTrialsView* field_trials = nullptr) {
// There's no technical reason we couldn't allow zero template parameters,
// but such a factory couldn't create any encoders, and callers can do this
// by mistake by simply forgetting the <> altogether. So we forbid it in
@ -142,9 +153,9 @@ rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory() {
static_assert(sizeof...(Ts) >= 1,
"Caller must give at least one template parameter");
return rtc::scoped_refptr<AudioEncoderFactory>(
new rtc::RefCountedObject<
audio_encoder_factory_template_impl::AudioEncoderFactoryT<Ts...>>());
return rtc::make_ref_counted<
audio_encoder_factory_template_impl::AudioEncoderFactoryT<Ts...>>(
field_trials);
}
} // namespace webrtc

View File

@ -39,7 +39,7 @@ struct RTC_EXPORT SdpAudioFormat {
Parameters&& param);
~SdpAudioFormat();
// Returns true if this format is compatible with |o|. In SDP terminology:
// Returns true if this format is compatible with `o`. In SDP terminology:
// would it represent the same codec between an offer and an answer? As
// opposed to operator==, this method disregards codec parameters.
bool Matches(const SdpAudioFormat& o) const;

View File

@ -20,7 +20,6 @@
#if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" // nogncheck
#endif
#include "api/audio_codecs/isac/audio_decoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck
@ -57,7 +56,7 @@ rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory() {
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>,
#endif
AudioDecoderIsac, AudioDecoderG722,
AudioDecoderG722,
#if WEBRTC_USE_BUILTIN_ILBC
AudioDecoderIlbc,

View File

@ -20,7 +20,6 @@
#if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" // nogncheck
#endif
#include "api/audio_codecs/isac/audio_encoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck
@ -47,8 +46,10 @@ struct NotAdvertised {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
return T::MakeAudioEncoder(config, payload_type, codec_pair_id);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr) {
return T::MakeAudioEncoder(config, payload_type, codec_pair_id,
field_trials);
}
};
@ -61,7 +62,7 @@ rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory() {
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>,
#endif
AudioEncoderIsac, AudioEncoderG722,
AudioEncoderG722,
#if WEBRTC_USE_BUILTIN_ILBC
AudioEncoderIlbc,

View File

@ -21,9 +21,11 @@ rtc_library("audio_encoder_g711") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g711",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
@ -41,8 +43,9 @@ rtc_library("audio_decoder_g711") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g711",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [

View File

@ -28,7 +28,10 @@ absl::optional<AudioDecoderG711::Config> AudioDecoderG711::SdpToConfig(
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
RTC_DCHECK(config.IsOk());
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
@ -44,14 +47,19 @@ void AudioDecoderG711::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderG711::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.type) {
case Config::Type::kPcmU:
return std::make_unique<AudioDecoderPcmU>(config.num_channels);
case Config::Type::kPcmA:
return std::make_unique<AudioDecoderPcmA>(config.num_channels);
default:
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -28,7 +29,9 @@ struct RTC_EXPORT AudioDecoderG711 {
struct Config {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
return (type == Type::kPcmU || type == Type::kPcmA) && num_channels >= 1;
return (type == Type::kPcmU || type == Type::kPcmA) &&
num_channels >= 1 &&
num_channels <= AudioDecoder::kMaxNumberOfChannels;
}
Type type;
int num_channels;
@ -37,7 +40,8 @@ struct RTC_EXPORT AudioDecoderG711 {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -38,7 +38,10 @@ absl::optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
}
}
RTC_DCHECK(config.IsOk());
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
@ -61,8 +64,12 @@ AudioCodecInfo AudioEncoderG711::QueryAudioEncoder(const Config& config) {
std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.type) {
case Config::Type::kPcmU: {
AudioEncoderPcmU::Config impl_config;
@ -79,6 +86,7 @@ std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
return std::make_unique<AudioEncoderPcmA>(impl_config);
}
default: {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -29,7 +30,9 @@ struct RTC_EXPORT AudioEncoderG711 {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
return (type == Type::kPcmU || type == Type::kPcmA) &&
frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1;
frame_size_ms > 0 && frame_size_ms % 10 == 0 &&
num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels;
}
Type type = Type::kPcmU;
int num_channels = 1;
@ -42,7 +45,8 @@ struct RTC_EXPORT AudioEncoderG711 {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -15,6 +15,7 @@ if (is_android) {
rtc_source_set("audio_encoder_g722_config") {
visibility = [ "*" ]
sources = [ "audio_encoder_g722_config.h" ]
deps = [ "..:audio_codecs_api" ]
}
rtc_library("audio_encoder_g722") {
@ -27,9 +28,11 @@ rtc_library("audio_encoder_g722") {
deps = [
":audio_encoder_g722_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g722",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
@ -47,8 +50,9 @@ rtc_library("audio_decoder_g722") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g722",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [

View File

@ -21,12 +21,12 @@ namespace webrtc {
absl::optional<AudioDecoderG722::Config> AudioDecoderG722::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "G722") &&
format.clockrate_hz == 8000 &&
(format.num_channels == 1 || format.num_channels == 2)
? absl::optional<Config>(
Config{rtc::dchecked_cast<int>(format.num_channels)})
: absl::nullopt;
if (absl::EqualsIgnoreCase(format.name, "G722") &&
format.clockrate_hz == 8000 &&
(format.num_channels == 1 || format.num_channels == 2)) {
return Config{rtc::dchecked_cast<int>(format.num_channels)};
}
return absl::nullopt;
}
void AudioDecoderG722::AppendSupportedDecoders(
@ -36,13 +36,19 @@ void AudioDecoderG722::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderG722::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.num_channels) {
case 1:
return std::make_unique<AudioDecoderG722Impl>();
case 2:
return std::make_unique<AudioDecoderG722StereoImpl>();
default:
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -33,7 +34,8 @@ struct RTC_EXPORT AudioDecoderG722 {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -38,8 +38,11 @@ absl::optional<AudioEncoderG722Config> AudioEncoderG722::SdpToConfig(
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 10, 60);
}
}
return config.IsOk() ? absl::optional<AudioEncoderG722Config>(config)
: absl::nullopt;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
}
void AudioEncoderG722::AppendSupportedEncoders(
@ -59,8 +62,12 @@ AudioCodecInfo AudioEncoderG722::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderG722::MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderG722Impl>(config, payload_type);
}

View File

@ -19,6 +19,7 @@
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -34,7 +35,8 @@ struct RTC_EXPORT AudioEncoderG722 {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -11,11 +11,14 @@
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
#include "api/audio_codecs/audio_encoder.h"
namespace webrtc {
struct AudioEncoderG722Config {
bool IsOk() const {
return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1;
return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels;
}
int frame_size_ms = 20;
int num_channels = 1;

View File

@ -27,9 +27,11 @@ rtc_library("audio_encoder_ilbc") {
deps = [
":audio_encoder_ilbc_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:ilbc",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
@ -46,8 +48,8 @@ rtc_library("audio_decoder_ilbc") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:ilbc",
"../../../rtc_base:rtc_base_approved",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",

View File

@ -20,10 +20,11 @@ namespace webrtc {
absl::optional<AudioDecoderIlbc::Config> AudioDecoderIlbc::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "ILBC") &&
format.clockrate_hz == 8000 && format.num_channels == 1
? absl::optional<Config>(Config())
: absl::nullopt;
if (absl::EqualsIgnoreCase(format.name, "ILBC") &&
format.clockrate_hz == 8000 && format.num_channels == 1) {
return Config();
}
return absl::nullopt;
}
void AudioDecoderIlbc::AppendSupportedDecoders(
@ -33,7 +34,8 @@ void AudioDecoderIlbc::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderIlbc::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return std::make_unique<AudioDecoderIlbcImpl>();
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
namespace webrtc {
@ -29,7 +30,8 @@ struct AudioDecoderIlbc {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -32,7 +32,7 @@ int GetIlbcBitrate(int ptime) {
// 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
return 13333;
default:
FATAL();
RTC_CHECK_NOTREACHED();
}
}
} // namespace
@ -53,8 +53,11 @@ absl::optional<AudioEncoderIlbcConfig> AudioEncoderIlbc::SdpToConfig(
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 20, 60);
}
}
return config.IsOk() ? absl::optional<AudioEncoderIlbcConfig>(config)
: absl::nullopt;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
}
void AudioEncoderIlbc::AppendSupportedEncoders(
@ -73,8 +76,12 @@ AudioCodecInfo AudioEncoderIlbc::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderIlbc::MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderIlbcImpl>(config, payload_type);
}

View File

@ -19,6 +19,7 @@
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
#include "api/field_trials_view.h"
namespace webrtc {
@ -33,7 +34,8 @@ struct AudioEncoderIlbc {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -1,133 +0,0 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
# The targets with _fix and _float suffixes unconditionally use the
# fixed-point and floating-point iSAC implementations, respectively.
# The targets without suffixes pick one of the implementations based
# on cleverly chosen criteria.
rtc_source_set("audio_encoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_encoder_isac_fix" ]
} else {
deps = [ ":audio_encoder_isac_float" ]
}
}
rtc_source_set("audio_decoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_decoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_decoder_isac_fix" ]
} else {
deps = [ ":audio_decoder_isac_float" ]
}
}
config("isac_config") {
visibility = [ ":*" ]
if (current_cpu == "arm") {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=1",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=0",
]
} else {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=0",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=1",
]
}
}
rtc_library("audio_encoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_fix.cc",
"audio_encoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_fix.cc",
"audio_decoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_encoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_float.cc",
"audio_encoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_float.cc",
"audio_decoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_
#if WEBRTC_USE_BUILTIN_ISAC_FIX && !WEBRTC_USE_BUILTIN_ISAC_FLOAT
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h" // nogncheck
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT && !WEBRTC_USE_BUILTIN_ISAC_FIX
#include "api/audio_codecs/isac/audio_decoder_isac_float.h" // nogncheck
#else
#error "Must choose either fix or float"
#endif
namespace webrtc {
#if WEBRTC_USE_BUILTIN_ISAC_FIX
using AudioDecoderIsac = AudioDecoderIsacFix;
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT
using AudioDecoderIsac = AudioDecoderIsacFloat;
#endif
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_

View File

@ -1,41 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFix::Config> AudioDecoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1
? absl::optional<Config>(Config())
: absl::nullopt;
}
void AudioDecoderIsacFix::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFix::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
AudioDecoderIsacFixImpl::Config c;
c.sample_rate_hz = 16000;
return std::make_unique<AudioDecoderIsacFixImpl>(c);
}
} // namespace webrtc

View File

@ -1,38 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFix {
struct Config {}; // Empty---no config values needed!
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFloat::Config>
AudioDecoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderIsacFloat::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
specs->push_back({{"ISAC", 32000, 1}, {32000, 1, 56000, 10000, 56000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFloat::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioDecoderIsacFloatImpl::Config c;
c.sample_rate_hz = config.sample_rate_hz;
return std::make_unique<AudioDecoderIsacFloatImpl>(c);
}
} // namespace webrtc

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (floating-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFloat {
struct Config {
bool IsOk() const {
return sample_rate_hz == 16000 || sample_rate_hz == 32000;
}
int sample_rate_hz = 16000;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_
#if WEBRTC_USE_BUILTIN_ISAC_FIX && !WEBRTC_USE_BUILTIN_ISAC_FLOAT
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h" // nogncheck
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT && !WEBRTC_USE_BUILTIN_ISAC_FIX
#include "api/audio_codecs/isac/audio_encoder_isac_float.h" // nogncheck
#else
#error "Must choose either fix or float"
#endif
namespace webrtc {
#if WEBRTC_USE_BUILTIN_ISAC_FIX
using AudioEncoderIsac = AudioEncoderIsacFix;
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT
using AudioEncoderIsac = AudioEncoderIsacFloat;
#endif
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_

View File

@ -1,64 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderIsacFix::Config> AudioEncoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1) {
Config config;
const auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime >= 60) {
config.frame_size_ms = 60;
}
}
return config;
} else {
return absl::nullopt;
}
}
void AudioEncoderIsacFix::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"ISAC", 16000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderIsacFix::QueryAudioEncoder(
AudioEncoderIsacFix::Config config) {
RTC_DCHECK(config.IsOk());
return {16000, 1, 32000, 10000, 32000};
}
std::unique_ptr<AudioEncoder> AudioEncoderIsacFix::MakeAudioEncoder(
AudioEncoderIsacFix::Config config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioEncoderIsacFixImpl::Config c;
c.frame_size_ms = config.frame_size_ms;
c.bit_rate = config.bit_rate;
c.payload_type = payload_type;
return std::make_unique<AudioEncoderIsacFixImpl>(c);
}
} // namespace webrtc

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFix {
struct Config {
bool IsOk() const {
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
}
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(Config config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
Config config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderIsacFloat::Config>
AudioEncoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.bit_rate = format.clockrate_hz == 16000 ? 32000 : 56000;
if (config.sample_rate_hz == 16000) {
// For sample rate 16 kHz, optionally use 60 ms frames, instead of the
// default 30 ms.
const auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime >= 60) {
config.frame_size_ms = 60;
}
}
}
return config;
} else {
return absl::nullopt;
}
}
void AudioEncoderIsacFloat::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
for (int sample_rate_hz : {16000, 32000}) {
const SdpAudioFormat fmt = {"ISAC", sample_rate_hz, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
}
AudioCodecInfo AudioEncoderIsacFloat::QueryAudioEncoder(
const AudioEncoderIsacFloat::Config& config) {
RTC_DCHECK(config.IsOk());
constexpr int min_bitrate = 10000;
const int max_bitrate = config.sample_rate_hz == 16000 ? 32000 : 56000;
const int default_bitrate = max_bitrate;
return {config.sample_rate_hz, 1, default_bitrate, min_bitrate, max_bitrate};
}
std::unique_ptr<AudioEncoder> AudioEncoderIsacFloat::MakeAudioEncoder(
const AudioEncoderIsacFloat::Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioEncoderIsacFloatImpl::Config c;
c.payload_type = payload_type;
c.sample_rate_hz = config.sample_rate_hz;
c.frame_size_ms = config.frame_size_ms;
c.bit_rate = config.bit_rate;
return std::make_unique<AudioEncoderIsacFloatImpl>(c);
}
} // namespace webrtc

View File

@ -1,66 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (floating-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFloat {
struct Config {
bool IsOk() const {
switch (sample_rate_hz) {
case 16000:
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
case 32000:
if (frame_size_ms != 30) {
return false;
}
if (bit_rate < 10000 || bit_rate > 56000) {
return false;
}
return true;
default:
return false;
}
}
int sample_rate_hz = 16000;
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_

View File

@ -20,10 +20,7 @@ rtc_library("audio_encoder_opus_config") {
"audio_encoder_opus_config.cc",
"audio_encoder_opus_config.h",
]
deps = [
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
deps = [ "../../../rtc_base/system:rtc_export" ]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
defines = []
if (rtc_opus_variable_complexity) {
@ -36,6 +33,7 @@ rtc_library("audio_encoder_opus_config") {
rtc_source_set("audio_decoder_opus_config") {
visibility = [ "*" ]
sources = [ "audio_decoder_multi_channel_opus_config.h" ]
deps = [ "..:audio_codecs_api" ]
}
rtc_library("audio_encoder_opus") {
@ -46,8 +44,8 @@ rtc_library("audio_encoder_opus") {
deps = [
":audio_encoder_opus_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
@ -65,8 +63,8 @@ rtc_library("audio_decoder_opus") {
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
@ -82,8 +80,8 @@ rtc_library("audio_encoder_multiopus") {
sources = [ "audio_encoder_multi_channel_opus.cc" ]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
"../opus:audio_encoder_opus_config",
]
@ -100,8 +98,8 @@ rtc_library("audio_decoder_multiopus") {
deps = [
":audio_decoder_opus_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [

View File

@ -64,7 +64,8 @@ void AudioDecoderMultiChannelOpus::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderMultiChannelOpus::MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder(config);
}
} // namespace webrtc

View File

@ -19,6 +19,7 @@
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -32,7 +33,8 @@ struct RTC_EXPORT AudioDecoderMultiChannelOpus {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -13,6 +13,8 @@
#include <vector>
#include "api/audio_codecs/audio_decoder.h"
namespace webrtc {
struct AudioDecoderMultiChannelOpusConfig {
// The number of channels that the decoder will output.
@ -30,7 +32,8 @@ struct AudioDecoderMultiChannelOpusConfig {
std::vector<unsigned char> channel_mapping;
bool IsOk() const {
if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) {
if (num_channels < 1 || num_channels > AudioDecoder::kMaxNumberOfChannels ||
num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {

View File

@ -51,7 +51,10 @@ absl::optional<AudioDecoderOpus::Config> AudioDecoderOpus::SdpToConfig(
num_channels) {
Config config;
config.num_channels = *num_channels;
RTC_DCHECK(config.IsOk());
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
@ -70,8 +73,12 @@ void AudioDecoderOpus::AppendSupportedDecoders(
std::unique_ptr<AudioDecoder> AudioDecoderOpus::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioDecoderOpusImpl>(config.num_channels,
config.sample_rate_hz);
}

View File

@ -18,6 +18,7 @@
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -34,7 +35,8 @@ struct RTC_EXPORT AudioDecoderOpus {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -66,7 +66,8 @@ AudioCodecInfo AudioEncoderMultiChannelOpus::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderMultiChannelOpus::MakeAudioEncoder(
const AudioEncoderMultiChannelOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder(config,
payload_type);
}

View File

@ -19,6 +19,7 @@
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -33,7 +34,8 @@ struct RTC_EXPORT AudioEncoderMultiChannelOpus {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

View File

@ -38,7 +38,7 @@ operator=(const AudioEncoderMultiChannelOpusConfig&) = default;
bool AudioEncoderMultiChannelOpusConfig::IsOk() const {
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
return false;
if (num_channels < 0 || num_channels >= 255) {
if (num_channels >= 255) {
return false;
}
if (bitrate_bps < kMinBitrateBps || bitrate_bps > kMaxBitrateBps)
@ -47,7 +47,7 @@ bool AudioEncoderMultiChannelOpusConfig::IsOk() const {
return false;
// Check the lengths:
if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) {
if (num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {

View File

@ -32,7 +32,12 @@ AudioCodecInfo AudioEncoderOpus::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderOpus::MakeAudioEncoder(
const AudioEncoderOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return AudioEncoderOpusImpl::MakeAudioEncoder(config, payload_type);
}

View File

@ -19,6 +19,7 @@
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -34,7 +35,8 @@ struct RTC_EXPORT AudioEncoderOpus {
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc

Some files were not shown because too many files have changed in this diff Show More