Bump to WebRTC M120 release

Some API deprecation -- ExperimentalAgc and ExperimentalNs are gone.
We're continuing to carry iSAC even though it's gone upstream, but maybe
we'll want to drop that soon.
This commit is contained in:
Arun Raghavan 2023-12-12 10:42:58 -05:00
parent 9a202fb8c2
commit c6abf6cd3f
479 changed files with 20900 additions and 11996 deletions

View File

@ -1,5 +1,5 @@
project('webrtc-audio-processing', 'c', 'cpp',
version : '1.3',
version : '1.0',
meson_version : '>= 0.63',
default_options : [ 'warning_level=1',
'buildtype=debugoptimized',
@ -166,6 +166,8 @@ endif
common_cflags = [
'-DWEBRTC_LIBRARY_IMPL',
'-DWEBRTC_ENABLE_SYMBOL_EXPORT',
# avoid windows.h/winsock2.h conflicts
'-D_WINSOCKAPI_',
'-DNDEBUG'
] + platform_cflags + os_cflags + arch_cflags
common_cxxflags = common_cflags

View File

@ -12,6 +12,15 @@
# you add a new build file, there must be some path of dependencies from this
# file to your new one or GN won't know about it.
# Use of visibility = clauses:
# The default visibility for all rtc_ targets is equivalent to "//*", or
# "all targets in webrtc can depend on this, nothing outside can".
#
# When overriding, the choices are:
# - visibility = [ "*" ] - public. Stuff outside webrtc can use this.
# - visibility = [ ":*" ] - directory private.
# As a general guideline, only targets in api/ should have public visibility.
import("//build/config/linux/pkg_config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("webrtc.gni")
@ -21,6 +30,7 @@ if (rtc_enable_protobuf) {
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
import("//third_party/jni_zero/jni_zero.gni")
}
if (!build_with_chromium) {
@ -38,7 +48,6 @@ if (!build_with_chromium) {
if (rtc_include_tests) {
deps += [
":rtc_unittests",
":slow_tests",
":video_engine_tests",
":voip_unittests",
":webrtc_nonparallel_tests",
@ -54,9 +63,14 @@ if (!build_with_chromium) {
"modules/remote_bitrate_estimator:rtp_to_text",
"modules/rtp_rtcp:test_packet_masks_metrics",
"modules/video_capture:video_capture_internal_impl",
"modules/video_coding:video_codec_perf_tests",
"net/dcsctp:dcsctp_unittests",
"pc:peerconnection_unittests",
"pc:rtc_pc_unittests",
"pc:slow_peer_connection_unittests",
"pc:svc_tests",
"rtc_tools:rtp_generator",
"rtc_tools:video_encoder",
"rtc_tools:video_replay",
"stats:rtc_stats_unittests",
"system_wrappers:system_wrappers_unittests",
@ -71,6 +85,13 @@ if (!build_with_chromium) {
# see bugs.webrtc.org/11027#c5.
deps += [ ":webrtc_lib_link_test" ]
}
if (is_ios) {
deps += [
"examples:apprtcmobile_tests",
"sdk:sdk_framework_unittests",
"sdk:sdk_unittests",
]
}
if (is_android) {
deps += [
"examples:android_examples_junit_tests",
@ -82,11 +103,16 @@ if (!build_with_chromium) {
}
if (rtc_enable_protobuf) {
deps += [
"audio:low_bandwidth_audio_test",
"logging:rtc_event_log_rtp_dump",
"tools_webrtc/perf:webrtc_dashboard_upload",
]
}
if ((is_linux || is_chromeos) && rtc_use_pipewire) {
deps += [ "modules/desktop_capture:shared_screencast_stream_test" ]
}
}
if (target_os == "android") {
deps += [ "tools_webrtc:binary_version_check" ]
}
}
}
@ -113,12 +139,23 @@ config("common_inherited_config") {
cflags = []
ldflags = []
if (rtc_enable_symbol_export || is_component_build) {
defines = [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ]
if (rtc_jni_generator_legacy_symbols) {
defines += [ "RTC_JNI_GENERATOR_LEGACY_SYMBOLS" ]
}
if (build_with_mozilla) {
defines += [ "WEBRTC_MOZILLA_BUILD" ]
if (rtc_objc_prefix != "") {
defines += [ "RTC_OBJC_TYPE_PREFIX=${rtc_objc_prefix}" ]
}
if (rtc_dlog_always_on) {
defines += [ "DLOG_ALWAYS_ON" ]
}
if (rtc_enable_symbol_export || is_component_build) {
defines += [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ]
}
if (rtc_enable_objc_symbol_export) {
defines += [ "WEBRTC_ENABLE_OBJC_SYMBOL_EXPORT" ]
}
if (!rtc_builtin_ssl_root_certificates) {
@ -133,6 +170,10 @@ config("common_inherited_config") {
defines += [ "WEBRTC_ENABLE_AVX2" ]
}
if (rtc_enable_win_wgc) {
defines += [ "RTC_ENABLE_WIN_WGC" ]
}
# Some tests need to declare their own trace event handlers. If this define is
# not set, the first time TRACE_EVENT_* is called it will store the return
# value for the current handler in an static variable, so that subsequent
@ -210,14 +251,6 @@ config("common_inherited_config") {
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_exit_time_destructors") {
if (is_clang) {
cflags = [ "-Wno-exit-time-destructors" ]
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_global_constructors") {
@ -249,6 +282,18 @@ config("common_config") {
defines += [ "WEBRTC_ENABLE_PROTOBUF=0" ]
}
if (rtc_strict_field_trials == "") {
defines += [ "WEBRTC_STRICT_FIELD_TRIALS=0" ]
} else if (rtc_strict_field_trials == "dcheck") {
defines += [ "WEBRTC_STRICT_FIELD_TRIALS=1" ]
} else if (rtc_strict_field_trials == "warn") {
defines += [ "WEBRTC_STRICT_FIELD_TRIALS=2" ]
} else {
assert(false,
"Unsupported value for rtc_strict_field_trials: " +
"$rtc_strict_field_trials")
}
if (rtc_include_internal_audio_device) {
defines += [ "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE" ]
}
@ -257,8 +302,16 @@ config("common_config") {
defines += [ "RTC_ENABLE_VP9" ]
}
if (rtc_use_h265) {
defines += [ "RTC_ENABLE_H265" ]
}
if (rtc_include_dav1d_in_internal_decoder_factory) {
defines += [ "RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY" ]
}
if (rtc_enable_sctp) {
defines += [ "HAVE_SCTP" ]
defines += [ "WEBRTC_HAVE_SCTP" ]
}
if (rtc_enable_external_auth) {
@ -273,6 +326,10 @@ config("common_config") {
defines += [ "WEBRTC_ABSL_MUTEX" ]
}
if (rtc_enable_libevent) {
defines += [ "WEBRTC_ENABLE_LIBEVENT" ]
}
if (rtc_disable_logging) {
defines += [ "RTC_DISABLE_LOGGING" ]
}
@ -293,7 +350,16 @@ config("common_config") {
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
}
cflags = []
if (is_clang) {
cflags += [
# TODO(webrtc:13219): Fix -Wshadow instances and enable.
"-Wno-shadow",
# See https://reviews.llvm.org/D56731 for details about this
# warning.
"-Wctad-maybe-unsupported",
]
}
if (build_with_chromium) {
defines += [
@ -329,20 +395,17 @@ config("common_config") {
}
if (is_clang) {
cflags += [
"-Wc++11-narrowing",
"-Wimplicit-fallthrough",
"-Wthread-safety",
"-Winconsistent-missing-override",
"-Wundef",
]
cflags += [ "-Wc++11-narrowing" ]
# use_xcode_clang only refers to the iOS toolchain, host binaries use
# chromium's clang always.
if (!is_nacl &&
(!use_xcode_clang || current_toolchain == host_toolchain)) {
# Flags NaCl (Clang 3.7) and Xcode 7.3 (Clang clang-703.0.31) do not
# recognize.
if (!is_fuchsia) {
# Compiling with the Fuchsia SDK results in Wundef errors
# TODO(bugs.fuchsia.dev/100722): Remove from (!is_fuchsia) branch when
# Fuchsia build errors are fixed.
cflags += [ "-Wundef" ]
}
if (!is_nacl) {
# Flags NaCl (Clang 3.7) do not recognize.
cflags += [ "-Wunused-lambda-capture" ]
}
}
@ -404,7 +467,7 @@ config("common_config") {
]
}
if (use_fuzzing_engine && optimize_for_fuzzing) {
if (use_fuzzing_engine) {
# Used in Chromium's overrides to disable logging
defines += [ "WEBRTC_UNSAFE_FUZZER_MODE" ]
}
@ -419,10 +482,6 @@ config("common_config") {
config("common_objc") {
frameworks = [ "Foundation.framework" ]
if (rtc_use_metal_rendering) {
defines = [ "RTC_SUPPORTS_METAL" ]
}
}
if (!build_with_chromium) {
@ -448,6 +507,17 @@ if (!build_with_chromium) {
"api/rtc_event_log:rtc_event_log_factory",
"api/task_queue",
"api/task_queue:default_task_queue_factory",
"api/test/metrics",
"api/video_codecs:video_decoder_factory_template",
"api/video_codecs:video_decoder_factory_template_dav1d_adapter",
"api/video_codecs:video_decoder_factory_template_libvpx_vp8_adapter",
"api/video_codecs:video_decoder_factory_template_libvpx_vp9_adapter",
"api/video_codecs:video_decoder_factory_template_open_h264_adapter",
"api/video_codecs:video_encoder_factory_template",
"api/video_codecs:video_encoder_factory_template_libaom_av1_adapter",
"api/video_codecs:video_encoder_factory_template_libvpx_vp8_adapter",
"api/video_codecs:video_encoder_factory_template_libvpx_vp9_adapter",
"api/video_codecs:video_encoder_factory_template_open_h264_adapter",
"audio",
"call",
"common_audio",
@ -458,10 +528,7 @@ if (!build_with_chromium) {
"modules/video_capture:video_capture_internal_impl",
"p2p:rtc_p2p",
"pc:libjingle_peerconnection",
"pc:peerconnection",
"pc:rtc_pc",
"pc:rtc_pc_base",
"rtc_base",
"sdk",
"video",
]
@ -473,13 +540,6 @@ if (!build_with_chromium) {
]
}
if (rtc_include_builtin_video_codecs) {
deps += [
"api/video_codecs:builtin_video_decoder_factory",
"api/video_codecs:builtin_video_encoder_factory",
]
}
if (build_with_mozilla) {
deps += [
"api/video:video_frame",
@ -504,6 +564,10 @@ if (!build_with_chromium) {
rtc_executable("webrtc_lib_link_test") {
testonly = true
# This target is used for checking to link, so do not check dependencies
# on gn check.
check_includes = false # no-presubmit-check TODO(bugs.webrtc.org/12785)
sources = [ "webrtc_lib_link_test.cc" ]
deps = [
# NOTE: Don't add deps here. If this test fails to link, it means you
@ -523,7 +587,7 @@ if (use_libfuzzer || use_afl) {
}
}
if (rtc_include_tests) {
if (rtc_include_tests && !build_with_chromium) {
rtc_test("rtc_unittests") {
testonly = true
@ -533,13 +597,17 @@ if (rtc_include_tests) {
"api/audio/test:audio_api_unittests",
"api/audio_codecs/test:audio_codecs_api_unittests",
"api/numerics:numerics_unittests",
"api/task_queue:pending_task_safety_flag_unittests",
"api/test/metrics:metrics_unittests",
"api/transport:stun_unittest",
"api/video/test:rtc_api_video_unittests",
"api/video_codecs/test:video_codecs_api_unittests",
"api/voip:compile_all_headers",
"call:fake_network_pipe_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
"rtc_base:robo_caller_unittests",
"rtc_base:async_dns_resolver_unittests",
"rtc_base:callback_list_unittests",
"rtc_base:rtc_base_approved_unittests",
"rtc_base:rtc_base_unittests",
"rtc_base:rtc_json_unittests",
@ -547,12 +615,13 @@ if (rtc_include_tests) {
"rtc_base:rtc_operations_chain_unittests",
"rtc_base:rtc_task_queue_unittests",
"rtc_base:sigslot_unittest",
"rtc_base:task_queue_stdlib_unittest",
"rtc_base:untyped_function_unittest",
"rtc_base:weak_ptr_unittests",
"rtc_base/experiments:experiments_unittests",
"rtc_base/synchronization:sequence_checker_unittests",
"rtc_base/task_utils:pending_task_safety_flag_unittests",
"rtc_base/task_utils:to_queued_task_unittests",
"rtc_base/system:file_wrapper_unittests",
"rtc_base/task_utils:repeating_task_unittests",
"rtc_base/units:units_unittests",
"sdk:sdk_tests",
"test:rtp_test_utils",
"test:test_main",
@ -574,12 +643,9 @@ if (rtc_include_tests) {
]
shard_timeout = 900
}
if (is_ios || is_mac) {
deps += [ "sdk:rtc_unittests_objc" ]
}
}
if (rtc_enable_google_benchmarks) {
rtc_test("benchmarks") {
testonly = true
deps = [
@ -587,16 +653,6 @@ if (rtc_include_tests) {
"test:benchmark_main",
]
}
# This runs tests that must run in real time and therefore can take some
# time to execute. They are in a separate executable to avoid making the
# regular unittest suite too slow to run frequently.
rtc_test("slow_tests") {
testonly = true
deps = [
"rtc_base/task_utils:repeating_task_unittests",
"test:test_main",
]
}
# TODO(pbos): Rename test suite, this is no longer "just" for video targets.
@ -630,7 +686,12 @@ if (rtc_include_tests) {
]
data = video_engine_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
use_default_launcher = false
deps += [
"//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
"//testing/android/native_test:native_test_java",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 900
}
if (is_ios) {
@ -663,7 +724,6 @@ if (rtc_include_tests) {
rtc_test("webrtc_perf_tests") {
testonly = true
deps = [
"audio:audio_perf_tests",
"call:call_perf_tests",
"modules/audio_coding:audio_coding_perf_tests",
"modules/audio_processing:audio_processing_perf_tests",
@ -675,7 +735,12 @@ if (rtc_include_tests) {
data = webrtc_perf_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
use_default_launcher = false
deps += [
"//build/android/gtest_apk:native_test_instrumentation_test_runner_java",
"//testing/android/native_test:native_test_java",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 4500
}
if (is_ios) {
@ -695,6 +760,7 @@ if (rtc_include_tests) {
rtc_test("voip_unittests") {
testonly = true
deps = [
"api/voip:compile_all_headers",
"api/voip:voip_engine_factory_unittests",
"audio/voip/test:audio_channel_unittests",
"audio/voip/test:audio_egress_unittests",
@ -705,6 +771,23 @@ if (rtc_include_tests) {
}
}
# Build target for standalone dcsctp
rtc_static_library("dcsctp") {
# Only the root target should depend on this.
visibility = [ "//:default" ]
sources = []
complete_static_lib = true
suppressed_configs += [ "//build/config/compiler:thin_archive" ]
defines = []
deps = [
"net/dcsctp/public:factory",
"net/dcsctp/public:socket",
"net/dcsctp/public:types",
"net/dcsctp/socket:dcsctp_socket",
"net/dcsctp/timer:task_queue_timeout",
]
}
# ---- Poisons ----
#
# Here is one empty dummy target for each poison type (needed because
@ -720,7 +803,7 @@ group("poison_audio_codecs") {
group("poison_default_task_queue") {
}
group("poison_rtc_json") {
group("poison_default_echo_detector") {
}
group("poison_software_video_codecs") {

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include <array>
#include <iterator>
#include <type_traits>
#include "rtc_base/checks.h"
@ -83,7 +84,7 @@ namespace rtc {
// a pointer if fix-sized) and trivially copyable, so it's probably cheaper to
// pass it by value than by const reference.
namespace impl {
namespace array_view_internal {
// Magic constant for indicating that the size of an ArrayView is variable
// instead of fixed.
@ -124,7 +125,7 @@ class ArrayViewBase<T, 0> {
// Specialized base class for ArrayViews of variable size.
template <typename T>
class ArrayViewBase<T, impl::kArrayViewVarSize> {
class ArrayViewBase<T, array_view_internal::kArrayViewVarSize> {
public:
ArrayViewBase(T* data, size_t size)
: data_(size == 0 ? nullptr : data), size_(size) {}
@ -141,18 +142,23 @@ class ArrayViewBase<T, impl::kArrayViewVarSize> {
size_t size_;
};
} // namespace impl
} // namespace array_view_internal
template <typename T, std::ptrdiff_t Size = impl::kArrayViewVarSize>
class ArrayView final : public impl::ArrayViewBase<T, Size> {
template <typename T,
std::ptrdiff_t Size = array_view_internal::kArrayViewVarSize>
class ArrayView final : public array_view_internal::ArrayViewBase<T, Size> {
public:
using value_type = T;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using const_iterator = const T*;
// Construct an ArrayView from a pointer and a length.
template <typename U>
ArrayView(U* data, size_t size)
: impl::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
: array_view_internal::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data());
RTC_DCHECK_EQ(size, this->size());
RTC_DCHECK_EQ(!this->data(),
@ -166,7 +172,8 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
: ArrayView() {}
ArrayView(std::nullptr_t, size_t size)
: ArrayView(static_cast<T*>(nullptr), size) {
static_assert(Size == 0 || Size == impl::kArrayViewVarSize, "");
static_assert(Size == 0 || Size == array_view_internal::kArrayViewVarSize,
"");
RTC_DCHECK_EQ(0, size);
}
@ -174,7 +181,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
template <typename U, size_t N>
ArrayView(U (&array)[N]) // NOLINT
: ArrayView(array, N) {
static_assert(Size == N || Size == impl::kArrayViewVarSize,
static_assert(Size == N || Size == array_view_internal::kArrayViewVarSize,
"Array size must match ArrayView size");
}
@ -207,7 +214,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
// N> when M != N.
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {
@ -215,7 +222,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
}
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {
@ -235,13 +242,13 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
// const rtc::Buffer to ArrayView<const uint8_t>.
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {}
@ -258,6 +265,18 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
T* end() const { return this->data() + this->size(); }
const T* cbegin() const { return this->data(); }
const T* cend() const { return this->data() + this->size(); }
std::reverse_iterator<T*> rbegin() const {
return std::make_reverse_iterator(end());
}
std::reverse_iterator<T*> rend() const {
return std::make_reverse_iterator(begin());
}
std::reverse_iterator<const T*> crbegin() const {
return std::make_reverse_iterator(cend());
}
std::reverse_iterator<const T*> crend() const {
return std::make_reverse_iterator(cbegin());
}
ArrayView<T> subview(size_t offset, size_t size) const {
return offset < this->size()

View File

@ -11,8 +11,6 @@
#include "api/audio/audio_frame.h"
#include <string.h>
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
@ -24,35 +22,13 @@ AudioFrame::AudioFrame() {
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
void swap(AudioFrame& a, AudioFrame& b) {
using std::swap;
swap(a.timestamp_, b.timestamp_);
swap(a.elapsed_time_ms_, b.elapsed_time_ms_);
swap(a.ntp_time_ms_, b.ntp_time_ms_);
swap(a.samples_per_channel_, b.samples_per_channel_);
swap(a.sample_rate_hz_, b.sample_rate_hz_);
swap(a.num_channels_, b.num_channels_);
swap(a.channel_layout_, b.channel_layout_);
swap(a.speech_type_, b.speech_type_);
swap(a.vad_activity_, b.vad_activity_);
swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_);
swap(a.packet_infos_, b.packet_infos_);
const size_t length_a = a.samples_per_channel_ * a.num_channels_;
const size_t length_b = b.samples_per_channel_ * b.num_channels_;
RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples);
RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples);
std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_);
swap(a.muted_, b.muted_);
swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
}
void AudioFrame::ResetWithoutMuting() {
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
// TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;

View File

@ -14,11 +14,8 @@
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -60,7 +57,8 @@ class AudioFrame {
AudioFrame();
friend void swap(AudioFrame& a, AudioFrame& b);
AudioFrame(const AudioFrame&) = delete;
AudioFrame& operator=(const AudioFrame&) = delete;
// Resets all members to their default state.
void Reset();
@ -139,7 +137,7 @@ class AudioFrame {
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
@ -149,7 +147,7 @@ class AudioFrame {
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
// |RtpPacketInfos| may also be empty if the audio samples did not come from
// `RtpPacketInfos` may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
@ -165,11 +163,9 @@ class AudioFrame {
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in |packet_infos_|.
// capture timestamp of a received frame is found in `packet_infos_`.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
} // namespace webrtc

View File

@ -275,7 +275,7 @@ const char* ChannelLayoutToString(ChannelLayout layout) {
case CHANNEL_LAYOUT_BITSTREAM:
return "BITSTREAM";
}
RTC_NOTREACHED() << "Invalid channel layout provided: " << layout;
RTC_DCHECK_NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
}

View File

@ -153,6 +153,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000);
res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f);
res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 250000);
res = res & Limit(&c->erle.min, 1.f, 100000.f);
res = res & Limit(&c->erle.max_l, 1.f, 100000.f);
@ -165,6 +166,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f);
res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
res = res & Limit(&c->ep_strength.nearend_len, -1.0f, 1.0f);
res =
res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
@ -228,6 +230,12 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
res =
res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.last_permanent_lf_smoothing_band, 0, 64);
res = res & Limit(&c->suppressor.last_lf_smoothing_band, 0, 64);
res = res & Limit(&c->suppressor.last_lf_band, 0, 63);
res = res &
Limit(&c->suppressor.first_hf_band, c->suppressor.last_lf_band + 1, 64);
res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold,

View File

@ -43,6 +43,7 @@ struct RTC_EXPORT EchoCanceller3Config {
size_t hysteresis_limit_blocks = 1;
size_t fixed_capture_delay_samples = 0;
float delay_estimate_smoothing = 0.7f;
float delay_estimate_smoothing_delay_found = 0.7f;
float delay_candidate_detection_threshold = 0.2f;
struct DelaySelectionThresholds {
int initial;
@ -58,6 +59,7 @@ struct RTC_EXPORT EchoCanceller3Config {
};
AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true};
AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false};
bool detect_pre_echo = true;
} delay;
struct Filter {
@ -86,9 +88,11 @@ struct RTC_EXPORT EchoCanceller3Config {
size_t config_change_duration_blocks = 250;
float initial_state_seconds = 2.5f;
int coarse_reset_hangover_blocks = 25;
bool conservative_initial_phase = false;
bool enable_coarse_filter_output_usage = true;
bool use_linear_filter = true;
bool high_pass_filter_echo_reference = false;
bool export_linear_aec_output = false;
} filter;
@ -105,8 +109,11 @@ struct RTC_EXPORT EchoCanceller3Config {
struct EpStrength {
float default_gain = 1.f;
float default_len = 0.83f;
float nearend_len = 0.83f;
bool echo_can_saturate = true;
bool bounded_erl = false;
bool erle_onset_compensation_in_dominant_nearend = false;
bool use_conservative_tail_frequency_response = true;
} ep_strength;
struct EchoAudibility {
@ -190,6 +197,12 @@ struct RTC_EXPORT EchoCanceller3Config {
2.0f,
0.25f);
bool lf_smoothing_during_initial_phase = true;
int last_permanent_lf_smoothing_band = 0;
int last_lf_smoothing_band = 5;
int last_lf_band = 5;
int first_hf_band = 8;
struct DominantNearendDetection {
float enr_threshold = .25f;
float enr_exit_threshold = 10.f;
@ -197,6 +210,7 @@ struct RTC_EXPORT EchoCanceller3Config {
int hold_duration = 50;
int trigger_threshold = 12;
bool use_during_initial_phase = true;
bool use_unbounded_echo_spectrum = true;
} dominant_nearend_detection;
struct SubbandNearendDetection {
@ -221,7 +235,15 @@ struct RTC_EXPORT EchoCanceller3Config {
} high_bands_suppression;
float floor_first_increase = 0.00001f;
bool conservative_hf_suppression = false;
} suppressor;
struct MultiChannel {
bool detect_stereo_content = true;
float stereo_detection_threshold = 0.0f;
int stereo_detection_timeout_threshold_seconds = 300;
float stereo_detection_hysteresis_seconds = 2.0f;
} multi_channel;
};
} // namespace webrtc

View File

@ -48,6 +48,13 @@ class EchoControl {
// Provides an optional external estimate of the audio buffer delay.
virtual void SetAudioBufferDelay(int delay_ms) = 0;
// Specifies whether the capture output will be used. The purpose of this is
// to allow the echo controller to deactivate some of the processing when the
// resulting output is anyway not used, for instance when the endpoint is
// muted.
// TODO(b/177830919): Make pure virtual.
virtual void SetCaptureOutputUsage(bool capture_output_used) {}
// Returns wheter the signal is altered.
virtual bool ActiveProcessing() const = 0;

View File

@ -10,8 +10,6 @@
#include "api/audio_codecs/audio_decoder.h"
#include <assert.h>
#include <memory>
#include <utility>
@ -162,9 +160,10 @@ AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) {
case 2:
return kComfortNoise;
default:
assert(false);
RTC_DCHECK_NOTREACHED();
return kSpeech;
}
}
constexpr int AudioDecoder::kMaxNumberOfChannels;
} // namespace webrtc

View File

@ -20,7 +20,6 @@
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -37,6 +36,9 @@ class AudioDecoder {
AudioDecoder() = default;
virtual ~AudioDecoder() = default;
AudioDecoder(const AudioDecoder&) = delete;
AudioDecoder& operator=(const AudioDecoder&) = delete;
class EncodedAudioFrame {
public:
struct DecodeResult {
@ -53,8 +55,8 @@ class AudioDecoder {
// Returns true if this packet contains DTX.
virtual bool IsDtxPacket() const;
// Decodes this frame of audio and writes the result in |decoded|.
// |decoded| must be large enough to store as many samples as indicated by a
// Decodes this frame of audio and writes the result in `decoded`.
// `decoded` must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an absl::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
@ -85,8 +87,8 @@ class AudioDecoder {
// Let the decoder parse this payload and prepare zero or more decodable
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
// ensure that the AudioDecoder object outlives any frame objects returned by
// this call. The decoder is free to swap or move the data from the |payload|
// buffer. |timestamp| is the input timestamp, in samples, corresponding to
// this call. The decoder is free to swap or move the data from the `payload`
// buffer. `timestamp` is the input timestamp, in samples, corresponding to
// the start of the payload.
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp);
@ -95,12 +97,12 @@ class AudioDecoder {
// obsolete; callers should call ParsePayload instead. For now, subclasses
// must still implement DecodeInternal.
// Decodes |encode_len| bytes from |encoded| and writes the result in
// |decoded|. The maximum bytes allowed to be written into |decoded| is
// |max_decoded_bytes|. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, |speech_type|
// Decodes `encode_len` bytes from `encoded` and writes the result in
// `decoded`. The maximum bytes allowed to be written into `decoded` is
// `max_decoded_bytes`. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, `speech_type`
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
// sample rate is provided in |sample_rate_hz|, which must be valid for the
// sample rate is provided in `sample_rate_hz`, which must be valid for the
// codec at hand.
int Decode(const uint8_t* encoded,
size_t encoded_len,
@ -123,11 +125,11 @@ class AudioDecoder {
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets. The caller has to make sure that the
// memory allocated in |decoded| should accommodate |num_frames| frames.
// memory allocated in `decoded` should accommodate `num_frames` frames.
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Asks the decoder to generate packet-loss concealment and append it to the
// end of |concealment_audio|. The concealment audio should be in
// end of `concealment_audio`. The concealment audio should be in
// channel-interleaved format, with as many channels as the last decoded
// packet produced. The implementation must produce at least
// requested_samples_per_channel, or nothing at all. This is a signal to the
@ -136,7 +138,7 @@ class AudioDecoder {
// with the decoded audio on either side of the concealment.
// Note: The default implementation of GeneratePlc will be deleted soon. All
// implementations must provide their own, which can be a simple as a no-op.
// TODO(bugs.webrtc.org/9676): Remove default impementation.
// TODO(bugs.webrtc.org/9676): Remove default implementation.
virtual void GeneratePlc(size_t requested_samples_per_channel,
rtc::BufferT<int16_t>* concealment_audio);
@ -146,19 +148,19 @@ class AudioDecoder {
// Returns the last error code from the decoder.
virtual int ErrorCode();
// Returns the duration in samples-per-channel of the payload in |encoded|
// which is |encoded_len| bytes long. Returns kNotImplemented if no duration
// Returns the duration in samples-per-channel of the payload in `encoded`
// which is `encoded_len` bytes long. Returns kNotImplemented if no duration
// estimate is available, or -1 in case of an error.
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
// Returns the duration in samples-per-channel of the redandant payload in
// |encoded| which is |encoded_len| bytes long. Returns kNotImplemented if no
// `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no
// duration estimate is available, or -1 in case of an error.
virtual int PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const;
// Detects whether a packet has forward error correction. The packet is
// comprised of the samples in |encoded| which is |encoded_len| bytes long.
// comprised of the samples in `encoded` which is `encoded_len` bytes long.
// Returns true if the packet has FEC and false otherwise.
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
@ -170,6 +172,9 @@ class AudioDecoder {
// during the lifetime of the decoder.
virtual size_t Channels() const = 0;
// The maximum number of audio channels supported by WebRTC decoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
static SpeechType ConvertSpeechType(int16_t type);
@ -184,9 +189,6 @@ class AudioDecoder {
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type);
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
} // namespace webrtc

View File

@ -83,7 +83,7 @@ void AudioEncoder::OnReceivedUplinkPacketLossFraction(
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction) {
RTC_NOTREACHED();
RTC_DCHECK_NOTREACHED();
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
@ -110,4 +110,5 @@ ANAStats AudioEncoder::GetANAStats() const {
return ANAStats();
}
constexpr int AudioEncoder::kMaxNumberOfChannels;
} // namespace webrtc

View File

@ -16,12 +16,12 @@
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
#include "rtc_base/deprecation.h"
namespace webrtc {
@ -95,13 +95,13 @@ class AudioEncoder {
// This is the main struct for auxiliary encoding information. Each encoded
// packet should be accompanied by one EncodedInfo struct, containing the
// total number of |encoded_bytes|, the |encoded_timestamp| and the
// |payload_type|. If the packet contains redundant encodings, the |redundant|
// total number of `encoded_bytes`, the `encoded_timestamp` and the
// `payload_type`. If the packet contains redundant encodings, the `redundant`
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
// vector represents one encoding; the order of structs in the vector is the
// same as the order in which the actual payloads are written to the byte
// stream. When EncoderInfoLeaf structs are present in the vector, the main
// struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the
// struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the
// vector.
struct EncodedInfo : public EncodedInfoLeaf {
EncodedInfo();
@ -143,7 +143,7 @@ class AudioEncoder {
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
// The encoder appends zero or more bytes of output to |encoded| and returns
// The encoder appends zero or more bytes of output to `encoded` and returns
// additional encoding information. Encode() checks some preconditions, calls
// EncodeImpl() which does the actual work, and then checks some
// postconditions.
@ -182,12 +182,11 @@ class AudioEncoder {
// implementation does nothing.
virtual void SetMaxPlaybackRate(int frequency_hz);
// This is to be deprecated. Please use |OnReceivedTargetAudioBitrate|
// instead.
// Tells the encoder what average bitrate we'd like it to produce. The
// encoder is free to adjust or disregard the given bitrate (the default
// implementation does the latter).
RTC_DEPRECATED virtual void SetTargetBitrate(int target_bps);
ABSL_DEPRECATED("Use OnReceivedTargetAudioBitrate instead")
virtual void SetTargetBitrate(int target_bps);
// Causes this encoder to let go of any other encoders it contains, and
// returns a pointer to an array where they are stored (which is required to
@ -206,11 +205,12 @@ class AudioEncoder {
virtual void DisableAudioNetworkAdaptor();
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
// |uplink_packet_loss_fraction| is in the range [0.0, 1.0].
// `uplink_packet_loss_fraction` is in the range [0.0, 1.0].
virtual void OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction);
RTC_DEPRECATED virtual void OnReceivedUplinkRecoverablePacketLossFraction(
ABSL_DEPRECATED("")
virtual void OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction);
// Provides target audio bitrate to this encoder to allow it to adapt.
@ -246,6 +246,9 @@ class AudioEncoder {
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
// The maximum number of audio channels supported by WebRTC encoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode().

View File

@ -32,7 +32,7 @@ struct BitrateAllocationUpdate {
double packet_loss_ratio = 0;
// Predicted round trip time.
TimeDelta round_trip_time = TimeDelta::PlusInfinity();
// |bwe_period| is deprecated, use |stable_target_bitrate| allocation instead.
// `bwe_period` is deprecated, use `stable_target_bitrate` allocation instead.
TimeDelta bwe_period = TimeDelta::PlusInfinity();
// Congestion window pushback bitrate reduction fraction. Used in
// VideoStreamEncoder to reduce the bitrate by the given fraction

31
webrtc/api/location.h Normal file
View File

@ -0,0 +1,31 @@
/*
* Copyright 2023 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_LOCATION_H_
#define API_LOCATION_H_
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Location provides basic info where of an object was constructed, or was
// significantly brought to life. This is a stripped down version of
// https://source.chromium.org/chromium/chromium/src/+/main:base/location.h
// that only specifies an interface compatible to how base::Location is
// supposed to be used.
// The declaration is overriden inside the Chromium build.
class RTC_EXPORT Location {
public:
static Location Current() { return Location(); }
};
} // namespace webrtc
#endif // API_LOCATION_H_

View File

@ -0,0 +1,120 @@
/*
* Copyright 2022 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_MAKE_REF_COUNTED_H_
#define API_MAKE_REF_COUNTED_H_
#include <type_traits>
#include <utility>
#include "rtc_base/ref_counted_object.h"
namespace rtc {
namespace webrtc_make_ref_counted_internal {
// Determines if the given class has AddRef and Release methods.
template <typename T>
class HasAddRefAndRelease {
private:
template <typename C,
decltype(std::declval<C>().AddRef())* = nullptr,
decltype(std::declval<C>().Release())* = nullptr>
static int Test(int);
template <typename>
static char Test(...);
public:
static constexpr bool value = std::is_same_v<decltype(Test<T>(0)), int>;
};
} // namespace webrtc_make_ref_counted_internal
// General utilities for constructing a reference counted class and the
// appropriate reference count implementation for that class.
//
// These utilities select either the `RefCountedObject` implementation or
// `FinalRefCountedObject` depending on whether the to-be-shared class is
// derived from the RefCountInterface interface or not (respectively).
// `make_ref_counted`:
//
// Use this when you want to construct a reference counted object of type T and
// get a `scoped_refptr<>` back. Example:
//
// auto p = make_ref_counted<Foo>("bar", 123);
//
// For a class that inherits from RefCountInterface, this is equivalent to:
//
// auto p = scoped_refptr<Foo>(new RefCountedObject<Foo>("bar", 123));
//
// If the class does not inherit from RefCountInterface, but does have
// AddRef/Release methods (so a T* is convertible to rtc::scoped_refptr), this
// is equivalent to just
//
// auto p = scoped_refptr<Foo>(new Foo("bar", 123));
//
// Otherwise, the example is equivalent to:
//
// auto p = scoped_refptr<FinalRefCountedObject<Foo>>(
// new FinalRefCountedObject<Foo>("bar", 123));
//
// In these cases, `make_ref_counted` reduces the amount of boilerplate code but
// also helps with the most commonly intended usage of RefCountedObject whereby
// methods for reference counting, are virtual and designed to satisfy the need
// of an interface. When such a need does not exist, it is more efficient to use
// the `FinalRefCountedObject` template, which does not add the vtable overhead.
//
// Note that in some cases, using RefCountedObject directly may still be what's
// needed.
// `make_ref_counted` for abstract classes that are convertible to
// RefCountInterface. The is_abstract requirement rejects classes that inherit
// both RefCountInterface and RefCounted object, which is a a discouraged
// pattern, and would result in double inheritance of RefCountedObject if this
// template was applied.
template <
typename T,
typename... Args,
typename std::enable_if<std::is_convertible_v<T*, RefCountInterface*> &&
std::is_abstract_v<T>,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new RefCountedObject<T>(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and already carry a ref count.
template <
typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, RefCountInterface*> &&
webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new T(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and have no ref count of their own.
template <
typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, RefCountInterface*> &&
!webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<FinalRefCountedObject<T>> make_ref_counted(Args&&... args) {
return scoped_refptr<FinalRefCountedObject<T>>(
new FinalRefCountedObject<T>(std::forward<Args>(args)...));
}
} // namespace rtc
#endif // API_MAKE_REF_COUNTED_H_

View File

@ -10,8 +10,8 @@
#ifndef API_REF_COUNTED_BASE_H_
#define API_REF_COUNTED_BASE_H_
#include "rtc_base/constructor_magic.h"
#include "rtc_base/ref_count.h"
#include <type_traits>
#include "rtc_base/ref_counter.h"
namespace rtc {
@ -20,6 +20,9 @@ class RefCountedBase {
public:
RefCountedBase() = default;
RefCountedBase(const RefCountedBase&) = delete;
RefCountedBase& operator=(const RefCountedBase&) = delete;
void AddRef() const { ref_count_.IncRef(); }
RefCountReleaseStatus Release() const {
const auto status = ref_count_.DecRef();
@ -30,12 +33,64 @@ class RefCountedBase {
}
protected:
// Provided for internal webrtc subclasses for corner cases where it's
// necessary to know whether or not a reference is exclusively held.
bool HasOneRef() const { return ref_count_.HasOneRef(); }
virtual ~RefCountedBase() = default;
private:
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
};
RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
// Template based version of `RefCountedBase` for simple implementations that do
// not need (or want) destruction via virtual destructor or the overhead of a
// vtable.
//
// To use:
// struct MyInt : public rtc::RefCountedNonVirtual<MyInt> {
// int foo_ = 0;
// };
//
// rtc::scoped_refptr<MyInt> my_int(new MyInt());
//
// sizeof(MyInt) on a 32 bit system would then be 8, int + refcount and no
// vtable generated.
template <typename T>
class RefCountedNonVirtual {
public:
RefCountedNonVirtual() = default;
RefCountedNonVirtual(const RefCountedNonVirtual&) = delete;
RefCountedNonVirtual& operator=(const RefCountedNonVirtual&) = delete;
void AddRef() const { ref_count_.IncRef(); }
RefCountReleaseStatus Release() const {
// If you run into this assert, T has virtual methods. There are two
// options:
// 1) The class doesn't actually need virtual methods, the type is complete
// so the virtual attribute(s) can be removed.
// 2) The virtual methods are a part of the design of the class. In this
// case you can consider using `RefCountedBase` instead or alternatively
// use `rtc::RefCountedObject`.
static_assert(!std::is_polymorphic<T>::value,
"T has virtual methods. RefCountedBase is a better fit.");
const auto status = ref_count_.DecRef();
if (status == RefCountReleaseStatus::kDroppedLastRef) {
delete static_cast<const T*>(this);
}
return status;
}
protected:
// Provided for internal webrtc subclasses for corner cases where it's
// necessary to know whether or not a reference is exclusively held.
bool HasOneRef() const { return ref_count_.HasOneRef(); }
~RefCountedNonVirtual() = default;
private:
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
};
} // namespace rtc

View File

@ -44,7 +44,6 @@ RTPHeader::RTPHeader()
arrOfCSRCs(),
paddingLength(0),
headerLength(0),
payload_type_frequency(0),
extension() {}
RTPHeader::RTPHeader(const RTPHeader& other) = default;

View File

@ -103,15 +103,6 @@ struct RTPHeaderExtension {
(1 << kAbsSendTimeFraction));
}
TimeDelta GetAbsoluteSendTimeDelta(uint32_t previous_sendtime) const {
RTC_DCHECK(hasAbsoluteSendTime);
RTC_DCHECK(absoluteSendTime < (1ul << 24));
RTC_DCHECK(previous_sendtime < (1ul << 24));
int32_t delta =
static_cast<int32_t>((absoluteSendTime - previous_sendtime) << 8) >> 8;
return TimeDelta::Micros((delta * 1000000ll) / (1 << kAbsSendTimeFraction));
}
bool hasTransmissionTimeOffset;
int32_t transmissionTimeOffset;
bool hasAbsoluteSendTime;
@ -144,13 +135,12 @@ struct RTPHeaderExtension {
VideoPlayoutDelay playout_delay;
// For identification of a stream when ssrc is not signaled. See
// https://tools.ietf.org/html/draft-ietf-avtext-rid-09
// TODO(danilchap): Update url from draft to release version.
// https://tools.ietf.org/html/rfc8852
std::string stream_id;
std::string repaired_stream_id;
// For identifying the media section used to interpret this RTP packet. See
// https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
// https://tools.ietf.org/html/rfc8843
std::string mid;
absl::optional<ColorSpace> color_space;
@ -158,7 +148,7 @@ struct RTPHeaderExtension {
enum { kRtpCsrcSize = 15 }; // RFC 3550 page 13
struct RTPHeader {
struct RTC_EXPORT RTPHeader {
RTPHeader();
RTPHeader(const RTPHeader& other);
RTPHeader& operator=(const RTPHeader& other);
@ -172,7 +162,6 @@ struct RTPHeader {
uint32_t arrOfCSRCs[kRtpCsrcSize];
size_t paddingLength;
size_t headerLength;
int payload_type_frequency;
RTPHeaderExtension extension;
};

View File

@ -16,27 +16,22 @@
namespace webrtc {
RtpPacketInfo::RtpPacketInfo()
: ssrc_(0), rtp_timestamp_(0), receive_time_ms_(-1) {}
: ssrc_(0), rtp_timestamp_(0), receive_time_(Timestamp::MinusInfinity()) {}
RtpPacketInfo::RtpPacketInfo(
uint32_t ssrc,
RtpPacketInfo::RtpPacketInfo(uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
absl::optional<uint8_t> audio_level,
absl::optional<AbsoluteCaptureTime> absolute_capture_time,
int64_t receive_time_ms)
Timestamp receive_time)
: ssrc_(ssrc),
csrcs_(std::move(csrcs)),
rtp_timestamp_(rtp_timestamp),
audio_level_(audio_level),
absolute_capture_time_(absolute_capture_time),
receive_time_ms_(receive_time_ms) {}
receive_time_(receive_time) {}
RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header,
int64_t receive_time_ms)
Timestamp receive_time)
: ssrc_(rtp_header.ssrc),
rtp_timestamp_(rtp_header.timestamp),
receive_time_ms_(receive_time_ms) {
receive_time_(receive_time) {
const auto& extension = rtp_header.extension;
const auto csrcs_count = std::min<size_t>(rtp_header.numCSRCs, kRtpCsrcSize);
@ -52,9 +47,10 @@ RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header,
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) {
return (lhs.ssrc() == rhs.ssrc()) && (lhs.csrcs() == rhs.csrcs()) &&
(lhs.rtp_timestamp() == rhs.rtp_timestamp()) &&
(lhs.receive_time() == rhs.receive_time()) &&
(lhs.audio_level() == rhs.audio_level()) &&
(lhs.absolute_capture_time() == rhs.absolute_capture_time()) &&
(lhs.receive_time_ms() == rhs.receive_time_ms());
(lhs.local_capture_clock_offset() == rhs.local_capture_clock_offset());
}
} // namespace webrtc

View File

@ -17,14 +17,16 @@
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
//
// Structure to hold information about a received |RtpPacket|. It is primarily
// Structure to hold information about a received `RtpPacket`. It is primarily
// used to carry per-packet information from when a packet is received until
// the information is passed to |SourceTracker|.
// the information is passed to `SourceTracker`.
//
class RTC_EXPORT RtpPacketInfo {
public:
@ -33,11 +35,9 @@ class RTC_EXPORT RtpPacketInfo {
RtpPacketInfo(uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
absl::optional<uint8_t> audio_level,
absl::optional<AbsoluteCaptureTime> absolute_capture_time,
int64_t receive_time_ms);
Timestamp receive_time);
RtpPacketInfo(const RTPHeader& rtp_header, int64_t receive_time_ms);
RtpPacketInfo(const RTPHeader& rtp_header, Timestamp receive_time);
RtpPacketInfo(const RtpPacketInfo& other) = default;
RtpPacketInfo(RtpPacketInfo&& other) = default;
@ -53,19 +53,32 @@ class RTC_EXPORT RtpPacketInfo {
uint32_t rtp_timestamp() const { return rtp_timestamp_; }
void set_rtp_timestamp(uint32_t value) { rtp_timestamp_ = value; }
Timestamp receive_time() const { return receive_time_; }
void set_receive_time(Timestamp value) { receive_time_ = value; }
absl::optional<uint8_t> audio_level() const { return audio_level_; }
void set_audio_level(absl::optional<uint8_t> value) { audio_level_ = value; }
RtpPacketInfo& set_audio_level(absl::optional<uint8_t> value) {
audio_level_ = value;
return *this;
}
const absl::optional<AbsoluteCaptureTime>& absolute_capture_time() const {
return absolute_capture_time_;
}
void set_absolute_capture_time(
RtpPacketInfo& set_absolute_capture_time(
const absl::optional<AbsoluteCaptureTime>& value) {
absolute_capture_time_ = value;
return *this;
}
int64_t receive_time_ms() const { return receive_time_ms_; }
void set_receive_time_ms(int64_t value) { receive_time_ms_ = value; }
const absl::optional<TimeDelta>& local_capture_clock_offset() const {
return local_capture_clock_offset_;
}
RtpPacketInfo& set_local_capture_clock_offset(
absl::optional<TimeDelta> value) {
local_capture_clock_offset_ = value;
return *this;
}
private:
// Fields from the RTP header:
@ -74,6 +87,9 @@ class RTC_EXPORT RtpPacketInfo {
std::vector<uint32_t> csrcs_;
uint32_t rtp_timestamp_;
// Local `webrtc::Clock`-based timestamp of when the packet was received.
Timestamp receive_time_;
// Fields from the Audio Level header extension:
// https://tools.ietf.org/html/rfc6464#section-3
absl::optional<uint8_t> audio_level_;
@ -82,8 +98,12 @@ class RTC_EXPORT RtpPacketInfo {
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
absl::optional<AbsoluteCaptureTime> absolute_capture_time_;
// Local |webrtc::Clock|-based timestamp of when the packet was received.
int64_t receive_time_ms_;
// Clock offset between the local clock and the capturer's clock.
// Do not confuse with `AbsoluteCaptureTime::estimated_capture_clock_offset`
// which instead represents the clock offset between a remote sender and the
// capturer. The following holds:
// Capture's NTP Clock = Local NTP Clock + Local-Capture Clock Offset
absl::optional<TimeDelta> local_capture_clock_offset_;
};
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs);

View File

@ -15,6 +15,7 @@
#include <utility>
#include <vector>
#include "api/make_ref_counted.h"
#include "api/ref_counted_base.h"
#include "api/rtp_packet_info.h"
#include "api/scoped_refptr.h"
@ -26,8 +27,8 @@ namespace webrtc {
// an audio or video frame. Uses internal reference counting to make it very
// cheap to copy.
//
// We should ideally just use |std::vector<RtpPacketInfo>| and have it
// |std::move()|-ed as the per-packet information is transferred from one object
// We should ideally just use `std::vector<RtpPacketInfo>` and have it
// `std::move()`-ed as the per-packet information is transferred from one object
// to another. But moving the info, instead of copying it, is not easily done
// for the current video code.
class RTC_EXPORT RtpPacketInfos {
@ -79,7 +80,7 @@ class RTC_EXPORT RtpPacketInfos {
size_type size() const { return entries().size(); }
private:
class Data : public rtc::RefCountedBase {
class Data final : public rtc::RefCountedNonVirtual<Data> {
public:
static rtc::scoped_refptr<Data> Create(const vector_type& entries) {
// Performance optimization for the empty case.
@ -87,7 +88,7 @@ class RTC_EXPORT RtpPacketInfos {
return nullptr;
}
return new Data(entries);
return rtc::make_ref_counted<Data>(entries);
}
static rtc::scoped_refptr<Data> Create(vector_type&& entries) {
@ -96,16 +97,16 @@ class RTC_EXPORT RtpPacketInfos {
return nullptr;
}
return new Data(std::move(entries));
return rtc::make_ref_counted<Data>(std::move(entries));
}
const vector_type& entries() const { return entries_; }
private:
explicit Data(const vector_type& entries) : entries_(entries) {}
explicit Data(vector_type&& entries) : entries_(std::move(entries)) {}
~Data() override {}
~Data() = default;
private:
const vector_type entries_;
};

View File

@ -24,13 +24,13 @@
// void some_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// foo->Method(param);
// // |foo| is released when this function returns
// // `foo` is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// ...
// foo = nullptr; // explicitly releases |foo|
// foo = nullptr; // explicitly releases `foo`
// ...
// if (foo)
// foo->Method(param);
@ -45,10 +45,10 @@
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
// // now, |b| references the MyFoo object, and |a| references null.
// // now, `b` references the MyFoo object, and `a` references null.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
// To make both `a` and `b` in the above example reference the same MyFoo
// object, simply use the assignment operator:
//
// {
@ -56,7 +56,7 @@
// scoped_refptr<MyFoo> b;
//
// b = a;
// // now, |a| and |b| each own a reference to the same MyFoo object.
// // now, `a` and `b` each own a reference to the same MyFoo object.
// }
//
@ -74,8 +74,9 @@ class scoped_refptr {
typedef T element_type;
scoped_refptr() : ptr_(nullptr) {}
scoped_refptr(std::nullptr_t) : ptr_(nullptr) {} // NOLINT(runtime/explicit)
scoped_refptr(T* p) : ptr_(p) { // NOLINT(runtime/explicit)
explicit scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
ptr_->AddRef();
}
@ -103,7 +104,8 @@ class scoped_refptr {
}
T* get() const { return ptr_; }
operator T*() const { return ptr_; }
explicit operator bool() const { return ptr_ != nullptr; }
T& operator*() const { return *ptr_; }
T* operator->() const { return ptr_; }
// Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a
@ -159,6 +161,62 @@ class scoped_refptr {
T* ptr_;
};
template <typename T, typename U>
bool operator==(const rtc::scoped_refptr<T>& a,
const rtc::scoped_refptr<U>& b) {
return a.get() == b.get();
}
template <typename T, typename U>
bool operator!=(const rtc::scoped_refptr<T>& a,
const rtc::scoped_refptr<U>& b) {
return !(a == b);
}
template <typename T>
bool operator==(const rtc::scoped_refptr<T>& a, std::nullptr_t) {
return a.get() == nullptr;
}
template <typename T>
bool operator!=(const rtc::scoped_refptr<T>& a, std::nullptr_t) {
return !(a == nullptr);
}
template <typename T>
bool operator==(std::nullptr_t, const rtc::scoped_refptr<T>& a) {
return a.get() == nullptr;
}
template <typename T>
bool operator!=(std::nullptr_t, const rtc::scoped_refptr<T>& a) {
return !(a == nullptr);
}
// Comparison with raw pointer.
template <typename T, typename U>
bool operator==(const rtc::scoped_refptr<T>& a, const U* b) {
return a.get() == b;
}
template <typename T, typename U>
bool operator!=(const rtc::scoped_refptr<T>& a, const U* b) {
return !(a == b);
}
template <typename T, typename U>
bool operator==(const T* a, const rtc::scoped_refptr<U>& b) {
return a == b.get();
}
template <typename T, typename U>
bool operator!=(const T* a, const rtc::scoped_refptr<U>& b) {
return !(a == b);
}
// Ordered comparison, needed for use as a std::map key.
template <typename T, typename U>
bool operator<(const rtc::scoped_refptr<T>& a, const rtc::scoped_refptr<U>& b) {
return a.get() < b.get();
}
} // namespace rtc
#endif // API_SCOPED_REFPTR_H_

View File

@ -0,0 +1,140 @@
/*
* Copyright 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_SEQUENCE_CHECKER_H_
#define API_SEQUENCE_CHECKER_H_
#include "rtc_base/checks.h"
#include "rtc_base/synchronization/sequence_checker_internal.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
// SequenceChecker is a helper class used to help verify that some methods
// of a class are called on the same task queue or thread. A
// SequenceChecker is bound to a a task queue if the object is
// created on a task queue, or a thread otherwise.
//
//
// Example:
// class MyClass {
// public:
// void Foo() {
// RTC_DCHECK_RUN_ON(&sequence_checker_);
// ... (do stuff) ...
// }
//
// private:
// SequenceChecker sequence_checker_;
// }
//
// In Release mode, IsCurrent will always return true.
class RTC_LOCKABLE SequenceChecker
#if RTC_DCHECK_IS_ON
: public webrtc_sequence_checker_internal::SequenceCheckerImpl {
using Impl = webrtc_sequence_checker_internal::SequenceCheckerImpl;
#else
: public webrtc_sequence_checker_internal::SequenceCheckerDoNothing {
using Impl = webrtc_sequence_checker_internal::SequenceCheckerDoNothing;
#endif
public:
enum InitialState : bool { kDetached = false, kAttached = true };
// TODO(tommi): We could maybe join these two ctors and have fewer factory
// functions. At the moment they're separate to minimize code changes when
// we added the second ctor as well as avoiding to have unnecessary code at
// the SequenceChecker which much only run for the SequenceCheckerImpl
// implementation.
// In theory we could have something like:
//
// SequenceChecker(InitialState initial_state = kAttached,
// TaskQueueBase* attached_queue = TaskQueueBase::Current());
//
// But the problem with that is having the call to `Current()` exist for
// `SequenceCheckerDoNothing`.
explicit SequenceChecker(InitialState initial_state = kAttached)
: Impl(initial_state) {}
explicit SequenceChecker(TaskQueueBase* attached_queue)
: Impl(attached_queue) {}
// Returns true if sequence checker is attached to the current sequence.
bool IsCurrent() const { return Impl::IsCurrent(); }
// Detaches checker from sequence to which it is attached. Next attempt
// to do a check with this checker will result in attaching this checker
// to the sequence on which check was performed.
void Detach() { Impl::Detach(); }
};
} // namespace webrtc
// RTC_RUN_ON/RTC_GUARDED_BY/RTC_DCHECK_RUN_ON macros allows to annotate
// variables are accessed from same thread/task queue.
// Using tools designed to check mutexes, it checks at compile time everywhere
// variable is access, there is a run-time dcheck thread/task queue is correct.
//
// class SequenceCheckerExample {
// public:
// int CalledFromPacer() RTC_RUN_ON(pacer_sequence_checker_) {
// return var2_;
// }
//
// void CallMeFromPacer() {
// RTC_DCHECK_RUN_ON(&pacer_sequence_checker_)
// << "Should be called from pacer";
// CalledFromPacer();
// }
//
// private:
// int pacer_var_ RTC_GUARDED_BY(pacer_sequence_checker_);
// SequenceChecker pacer_sequence_checker_;
// };
//
// class TaskQueueExample {
// public:
// class Encoder {
// public:
// rtc::TaskQueueBase& Queue() { return encoder_queue_; }
// void Encode() {
// RTC_DCHECK_RUN_ON(&encoder_queue_);
// DoSomething(var_);
// }
//
// private:
// rtc::TaskQueueBase& encoder_queue_;
// Frame var_ RTC_GUARDED_BY(encoder_queue_);
// };
//
// void Encode() {
// // Will fail at runtime when DCHECK is enabled:
// // encoder_->Encode();
// // Will work:
// rtc::scoped_refptr<Encoder> encoder = encoder_;
// encoder_->Queue().PostTask([encoder] { encoder->Encode(); });
// }
//
// private:
// rtc::scoped_refptr<Encoder> encoder_;
// }
// Document if a function expected to be called from same thread/task queue.
#define RTC_RUN_ON(x) \
RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
// Checks current code is running on the desired sequence.
//
// First statement validates it is running on the sequence `x`.
// Second statement annotates for the thread safety analyzer the check was done.
// Such annotation has to be attached to a function, and that function has to be
// called. Thus current implementation creates a noop lambda and calls it.
#define RTC_DCHECK_RUN_ON(x) \
RTC_DCHECK((x)->IsCurrent()) \
<< webrtc::webrtc_sequence_checker_internal::ExpectationToString(x); \
[]() RTC_ASSERT_EXCLUSIVE_LOCK(x) {}()
#endif // API_SEQUENCE_CHECKER_H_

View File

@ -11,6 +11,8 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/functional/any_invocable.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#if defined(ABSL_HAVE_THREAD_LOCAL)

View File

@ -11,8 +11,11 @@
#define API_TASK_QUEUE_TASK_QUEUE_BASE_H_
#include <memory>
#include <utility>
#include "api/task_queue/queued_task.h"
#include "absl/functional/any_invocable.h"
#include "api/location.h"
#include "api/units/time_delta.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
@ -24,41 +27,139 @@ namespace webrtc {
// known task queue, use IsCurrent().
class RTC_LOCKABLE RTC_EXPORT TaskQueueBase {
public:
enum class DelayPrecision {
// This may include up to a 17 ms leeway in addition to OS timer precision.
// See PostDelayedTask() for more information.
kLow,
// This does not have the additional delay that kLow has, but it is still
// limited by OS timer precision. See PostDelayedHighPrecisionTask() for
// more information.
kHigh,
};
// Starts destruction of the task queue.
// On return ensures no task are running and no new tasks are able to start
// on the task queue.
// Responsible for deallocation. Deallocation may happen syncrhoniously during
// Responsible for deallocation. Deallocation may happen synchronously during
// Delete or asynchronously after Delete returns.
// Code not running on the TaskQueue should not make any assumption when
// TaskQueue is deallocated and thus should not call any methods after Delete.
// Code running on the TaskQueue should not call Delete, but can assume
// TaskQueue still exists and may call other methods, e.g. PostTask.
// Should be called on the same task queue or thread that this task queue
// was created on.
virtual void Delete() = 0;
// Schedules a task to execute. Tasks are executed in FIFO order.
// If |task->Run()| returns true, task is deleted on the task queue
// before next QueuedTask starts executing.
// Schedules a `task` to execute. Tasks are executed in FIFO order.
// When a TaskQueue is deleted, pending tasks will not be executed but they
// will be deleted. The deletion of tasks may happen synchronously on the
// TaskQueue or it may happen asynchronously after TaskQueue is deleted.
// This may vary from one implementation to the next so assumptions about
// lifetimes of pending tasks should not be made.
virtual void PostTask(std::unique_ptr<QueuedTask> task) = 0;
// will be deleted.
//
// As long as tasks are not posted from task destruction, posted tasks are
// guaranteed to be destroyed with Current() pointing to the task queue they
// were posted to, whether they're executed or not. That means SequenceChecker
// works during task destruction, a fact that can be used to guarantee
// thread-compatible object deletion happening on a particular task queue
// which can simplify class design.
// Note that this guarantee does not apply to delayed tasks.
//
// May be called on any thread or task queue, including this task queue.
void PostTask(absl::AnyInvocable<void() &&> task,
const Location& location = Location::Current()) {
PostTaskImpl(std::move(task), PostTaskTraits{}, location);
}
// Schedules a task to execute a specified number of milliseconds from when
// the call is made. The precision should be considered as "best effort"
// and in some cases, such as on Windows when all high precision timers have
// been used up, can be off by as much as 15 millseconds.
virtual void PostDelayedTask(std::unique_ptr<QueuedTask> task,
uint32_t milliseconds) = 0;
// Prefer PostDelayedTask() over PostDelayedHighPrecisionTask() whenever
// possible.
//
// Schedules a `task` to execute a specified `delay` from when the call is
// made, using "low" precision. All scheduling is affected by OS-specific
// leeway and current workloads which means that in terms of precision there
// are no hard guarantees, but in addition to the OS induced leeway, "low"
// precision adds up to a 17 ms additional leeway. The purpose of this leeway
// is to achieve more efficient CPU scheduling and reduce Idle Wake Up
// frequency.
//
// The task may execute with [-1, 17 + OS induced leeway) ms additional delay.
//
// Avoid making assumptions about the precision of the OS scheduler. On macOS,
// the OS induced leeway may be 10% of sleep interval. On Windows, 1 ms
// precision timers may be used but there are cases, such as when running on
// battery, when the timer precision can be as poor as 15 ms.
//
// "Low" precision is not implemented everywhere yet. Where not yet
// implemented, PostDelayedTask() has "high" precision. See
// https://crbug.com/webrtc/13583 for more information.
//
// May be called on any thread or task queue, including this task queue.
void PostDelayedTask(absl::AnyInvocable<void() &&> task,
TimeDelta delay,
const Location& location = Location::Current()) {
PostDelayedTaskImpl(std::move(task), delay, PostDelayedTaskTraits{},
location);
}
// Prefer PostDelayedTask() over PostDelayedHighPrecisionTask() whenever
// possible.
//
// Schedules a `task` to execute a specified `delay` from when the call is
// made, using "high" precision. All scheduling is affected by OS-specific
// leeway and current workloads which means that in terms of precision there
// are no hard guarantees.
//
// The task may execute with [-1, OS induced leeway] ms additional delay.
//
// Avoid making assumptions about the precision of the OS scheduler. On macOS,
// the OS induced leeway may be 10% of sleep interval. On Windows, 1 ms
// precision timers may be used but there are cases, such as when running on
// battery, when the timer precision can be as poor as 15 ms.
//
// May be called on any thread or task queue, including this task queue.
void PostDelayedHighPrecisionTask(
absl::AnyInvocable<void() &&> task,
TimeDelta delay,
const Location& location = Location::Current()) {
PostDelayedTaskTraits traits;
traits.high_precision = true;
PostDelayedTaskImpl(std::move(task), delay, traits, location);
}
// As specified by `precision`, calls either PostDelayedTask() or
// PostDelayedHighPrecisionTask().
void PostDelayedTaskWithPrecision(
DelayPrecision precision,
absl::AnyInvocable<void() &&> task,
TimeDelta delay,
const Location& location = Location::Current()) {
switch (precision) {
case DelayPrecision::kLow:
PostDelayedTask(std::move(task), delay, location);
break;
case DelayPrecision::kHigh:
PostDelayedHighPrecisionTask(std::move(task), delay, location);
break;
}
}
// Returns the task queue that is running the current thread.
// Returns nullptr if this thread is not associated with any task queue.
// May be called on any thread or task queue, including this task queue.
static TaskQueueBase* Current();
bool IsCurrent() const { return Current() == this; }
protected:
class CurrentTaskQueueSetter {
// This is currently only present here to simplify introduction of future
// planned task queue changes.
struct PostTaskTraits {};
struct PostDelayedTaskTraits {
// If `high_precision` is false, tasks may execute within up to a 17 ms
// leeway in addition to OS timer precision. Otherwise the task should be
// limited to OS timer precision. See PostDelayedTask() and
// PostDelayedHighPrecisionTask() for more information.
bool high_precision = false;
};
class RTC_EXPORT CurrentTaskQueueSetter {
public:
explicit CurrentTaskQueueSetter(TaskQueueBase* task_queue);
CurrentTaskQueueSetter(const CurrentTaskQueueSetter&) = delete;
@ -69,6 +170,20 @@ class RTC_LOCKABLE RTC_EXPORT TaskQueueBase {
TaskQueueBase* const previous_;
};
// Subclasses should implement this method to support the behavior defined in
// the PostTask and PostTaskTraits docs above.
virtual void PostTaskImpl(absl::AnyInvocable<void() &&> task,
const PostTaskTraits& traits,
const Location& location) = 0;
// Subclasses should implement this method to support the behavior defined in
// the PostDelayedTask/PostHighPrecisionDelayedTask and PostDelayedTaskTraits
// docs above.
virtual void PostDelayedTaskImpl(absl::AnyInvocable<void() &&> task,
TimeDelta delay,
const PostDelayedTaskTraits& traits,
const Location& location) = 0;
// Users of the TaskQueue should call Delete instead of directly deleting
// this object.
virtual ~TaskQueueBase() = default;

View File

@ -11,9 +11,9 @@
#ifndef API_UNITS_DATA_RATE_H_
#define API_UNITS_DATA_RATE_H_
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
#include <limits>
#include <string>
@ -23,7 +23,7 @@
#include "api/units/frequency.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/units/unit_base.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
// DataRate is a class that represents a given data rate. This can be used to
@ -142,13 +142,13 @@ inline std::string ToLogString(DataRate value) {
return ToString(value);
}
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataRate value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -11,14 +11,14 @@
#ifndef API_UNITS_DATA_SIZE_H_
#define API_UNITS_DATA_SIZE_H_
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
#include <string>
#include <type_traits>
#include "rtc_base/units/unit_base.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
// DataSize is a class represeting a count of bytes.
@ -53,13 +53,13 @@ inline std::string ToLogString(DataSize value) {
return ToString(value);
}
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataSize value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -10,9 +10,9 @@
#ifndef API_UNITS_FREQUENCY_H_
#define API_UNITS_FREQUENCY_H_
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
#include <cstdlib>
#include <limits>
@ -20,7 +20,7 @@
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/units/unit_base.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -89,13 +89,13 @@ inline std::string ToLogString(Frequency value) {
return ToString(value);
}
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Frequency value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_FREQUENCY_H_

View File

@ -11,15 +11,15 @@
#ifndef API_UNITS_TIME_DELTA_H_
#define API_UNITS_TIME_DELTA_H_
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
#include <cstdlib>
#include <string>
#include <type_traits>
#include "rtc_base/units/unit_base.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -32,6 +32,11 @@ namespace webrtc {
// microseconds (us).
class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
public:
template <typename T>
static constexpr TimeDelta Minutes(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return Seconds(value * 60);
}
template <typename T>
static constexpr TimeDelta Seconds(T value) {
static_assert(std::is_arithmetic<T>::value, "");
@ -92,13 +97,13 @@ inline std::string ToLogString(TimeDelta value) {
return ToString(value);
}
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
TimeDelta value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -11,15 +11,16 @@
#ifndef API_UNITS_TIMESTAMP_H_
#define API_UNITS_TIMESTAMP_H_
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
#include <string>
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
// Timestamp represents the time that has passed since some unspecified epoch.
@ -125,13 +126,13 @@ inline std::string ToLogString(Timestamp value) {
return ToString(value);
}
#ifdef UNIT_TEST
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Timestamp value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -10,9 +10,11 @@
#include "api/video/color_space.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created
// Try to convert `enum_value` into the enum class T. `enum_bitmask` is created
// by the funciton below. Returns true if conversion was successful, false
// otherwise.
template <typename T>
@ -43,7 +45,7 @@ constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
}
// Create a bitmask where each bit corresponds to one potential enum value.
// |values| should be an array listing all possible enum values. The bit is set
// `values` should be an array listing all possible enum values. The bit is set
// to one if the corresponding enum exists. Only works for enums with values
// less than 64.
template <typename T, size_t N>
@ -124,6 +126,80 @@ const HdrMetadata* ColorSpace::hdr_metadata() const {
return hdr_metadata_ ? &*hdr_metadata_ : nullptr;
}
#define PRINT_ENUM_CASE(TYPE, NAME) \
case TYPE::NAME: \
ss << #NAME; \
break;
std::string ColorSpace::AsString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{primaries:";
switch (primaries_) {
PRINT_ENUM_CASE(PrimaryID, kBT709)
PRINT_ENUM_CASE(PrimaryID, kUnspecified)
PRINT_ENUM_CASE(PrimaryID, kBT470M)
PRINT_ENUM_CASE(PrimaryID, kBT470BG)
PRINT_ENUM_CASE(PrimaryID, kSMPTE170M)
PRINT_ENUM_CASE(PrimaryID, kSMPTE240M)
PRINT_ENUM_CASE(PrimaryID, kFILM)
PRINT_ENUM_CASE(PrimaryID, kBT2020)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST428)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST431)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST432)
PRINT_ENUM_CASE(PrimaryID, kJEDECP22)
}
ss << ", transfer:";
switch (transfer_) {
PRINT_ENUM_CASE(TransferID, kBT709)
PRINT_ENUM_CASE(TransferID, kUnspecified)
PRINT_ENUM_CASE(TransferID, kGAMMA22)
PRINT_ENUM_CASE(TransferID, kGAMMA28)
PRINT_ENUM_CASE(TransferID, kSMPTE170M)
PRINT_ENUM_CASE(TransferID, kSMPTE240M)
PRINT_ENUM_CASE(TransferID, kLINEAR)
PRINT_ENUM_CASE(TransferID, kLOG)
PRINT_ENUM_CASE(TransferID, kLOG_SQRT)
PRINT_ENUM_CASE(TransferID, kIEC61966_2_4)
PRINT_ENUM_CASE(TransferID, kBT1361_ECG)
PRINT_ENUM_CASE(TransferID, kIEC61966_2_1)
PRINT_ENUM_CASE(TransferID, kBT2020_10)
PRINT_ENUM_CASE(TransferID, kBT2020_12)
PRINT_ENUM_CASE(TransferID, kSMPTEST2084)
PRINT_ENUM_CASE(TransferID, kSMPTEST428)
PRINT_ENUM_CASE(TransferID, kARIB_STD_B67)
}
ss << ", matrix:";
switch (matrix_) {
PRINT_ENUM_CASE(MatrixID, kRGB)
PRINT_ENUM_CASE(MatrixID, kBT709)
PRINT_ENUM_CASE(MatrixID, kUnspecified)
PRINT_ENUM_CASE(MatrixID, kFCC)
PRINT_ENUM_CASE(MatrixID, kBT470BG)
PRINT_ENUM_CASE(MatrixID, kSMPTE170M)
PRINT_ENUM_CASE(MatrixID, kSMPTE240M)
PRINT_ENUM_CASE(MatrixID, kYCOCG)
PRINT_ENUM_CASE(MatrixID, kBT2020_NCL)
PRINT_ENUM_CASE(MatrixID, kBT2020_CL)
PRINT_ENUM_CASE(MatrixID, kSMPTE2085)
PRINT_ENUM_CASE(MatrixID, kCDNCLS)
PRINT_ENUM_CASE(MatrixID, kCDCLS)
PRINT_ENUM_CASE(MatrixID, kBT2100_ICTCP)
}
ss << ", range:";
switch (range_) {
PRINT_ENUM_CASE(RangeID, kInvalid)
PRINT_ENUM_CASE(RangeID, kLimited)
PRINT_ENUM_CASE(RangeID, kFull)
PRINT_ENUM_CASE(RangeID, kDerived)
}
ss << "}";
return ss.str();
}
#undef PRINT_ENUM_CASE
bool ColorSpace::set_primaries_from_uint8(uint8_t enum_value) {
constexpr PrimaryID kPrimaryIds[] = {
PrimaryID::kBT709, PrimaryID::kUnspecified, PrimaryID::kBT470M,

View File

@ -13,6 +13,8 @@
#include <stdint.h>
#include <string>
#include "absl/types/optional.h"
#include "api/video/hdr_metadata.h"
#include "rtc_base/system/rtc_export.h"
@ -101,7 +103,7 @@ class RTC_EXPORT ColorSpace {
kInvalid = 0,
// Limited Rec. 709 color range with RGB values ranging from 16 to 235.
kLimited = 1,
// Full RGB color range with RGB valees from 0 to 255.
// Full RGB color range with RGB values from 0 to 255.
kFull = 2,
// Range is defined by MatrixCoefficients/TransferCharacteristics.
kDerived = 3,
@ -155,6 +157,7 @@ class RTC_EXPORT ColorSpace {
ChromaSiting chroma_siting_horizontal() const;
ChromaSiting chroma_siting_vertical() const;
const HdrMetadata* hdr_metadata() const;
std::string AsString() const;
bool set_primaries_from_uint8(uint8_t enum_value);
bool set_transfer_from_uint8(uint8_t enum_value);

View File

@ -10,21 +10,7 @@
#include "api/video/video_content_type.h"
// VideoContentType stored as a single byte, which is sent over the network.
// Structure:
//
// 0 1 2 3 4 5 6 7
// +---------------+
// |r r e e e s s c|
//
// where:
// r - reserved bits.
// e - 3-bit number of an experiment group counted from 1. 0 means there's no
// experiment ongoing.
// s - 2-bit simulcast stream id or spatial layer, counted from 1. 0 means that
// no simulcast information is set.
// c - content type. 0 means real-time video, 1 means screenshare.
//
#include "rtc_base/checks.h"
namespace webrtc {
namespace videocontenttypehelpers {
@ -33,57 +19,21 @@ namespace {
static constexpr uint8_t kScreenshareBitsSize = 1;
static constexpr uint8_t kScreenshareBitsMask =
(1u << kScreenshareBitsSize) - 1;
static constexpr uint8_t kSimulcastShift = 1;
static constexpr uint8_t kSimulcastBitsSize = 2;
static constexpr uint8_t kSimulcastBitsMask = ((1u << kSimulcastBitsSize) - 1)
<< kSimulcastShift; // 0b00000110
static constexpr uint8_t kExperimentShift = 3;
static constexpr uint8_t kExperimentBitsSize = 3;
static constexpr uint8_t kExperimentBitsMask =
((1u << kExperimentBitsSize) - 1) << kExperimentShift; // 0b00111000
static constexpr uint8_t kTotalBitsSize =
kScreenshareBitsSize + kSimulcastBitsSize + kExperimentBitsSize;
} // namespace
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id) {
// Store in bits 2-4.
if (experiment_id >= (1 << kExperimentBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kExperimentBitsMask) |
((experiment_id << kExperimentShift) & kExperimentBitsMask));
return true;
}
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id) {
// Store in bits 5-6.
if (simulcast_id >= (1 << kSimulcastBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kSimulcastBitsMask) |
((simulcast_id << kSimulcastShift) & kSimulcastBitsMask));
return true;
}
uint8_t GetExperimentId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kExperimentBitsMask) >>
kExperimentShift;
}
uint8_t GetSimulcastId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kSimulcastBitsMask) >>
kSimulcastShift;
}
bool IsScreenshare(const VideoContentType& content_type) {
// Ensure no bits apart from the screenshare bit is set.
// This CHECK is a temporary measure to detect code that introduces
// values according to old versions.
RTC_CHECK((static_cast<uint8_t>(content_type) & !kScreenshareBitsMask) == 0);
return (static_cast<uint8_t>(content_type) & kScreenshareBitsMask) > 0;
}
bool IsValidContentType(uint8_t value) {
// Any 6-bit value is allowed.
return value < (1 << kTotalBitsSize);
// Only the screenshare bit is allowed.
// However, due to previous usage of the next 5 bits, we allow
// the lower 6 bits to be set.
return value < (1 << 6);
}
const char* ToString(const VideoContentType& content_type) {

View File

@ -15,18 +15,15 @@
namespace webrtc {
// VideoContentType stored as a single byte, which is sent over the network
// in the rtp-hdrext/video-content-type extension.
// Only the lowest bit is used, per the enum.
enum class VideoContentType : uint8_t {
UNSPECIFIED = 0,
SCREENSHARE = 1,
};
namespace videocontenttypehelpers {
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id);
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id);
uint8_t GetExperimentId(const VideoContentType& content_type);
uint8_t GetSimulcastId(const VideoContentType& content_type);
bool IsScreenshare(const VideoContentType& content_type);
bool IsValidContentType(uint8_t value);

View File

@ -10,7 +10,10 @@
#include "api/video/video_timing.h"
#include <algorithm>
#include "api/array_view.h"
#include "api/units/time_delta.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/strings/string_builder.h"
@ -25,6 +28,14 @@ uint16_t VideoSendTiming::GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
}
uint16_t VideoSendTiming::GetDeltaCappedMs(TimeDelta delta) {
if (delta < TimeDelta::Zero()) {
RTC_DLOG(LS_ERROR) << "Delta " << delta.ms()
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(delta.ms());
}
TimingFrameInfo::TimingFrameInfo()
: rtp_timestamp(0),
capture_time_ms(-1),
@ -89,4 +100,23 @@ std::string TimingFrameInfo::ToString() const {
return sb.str();
}
VideoPlayoutDelay::VideoPlayoutDelay(TimeDelta min, TimeDelta max)
: min_(std::clamp(min, TimeDelta::Zero(), kMax)),
max_(std::clamp(max, min_, kMax)) {
if (!(TimeDelta::Zero() <= min && min <= max && max <= kMax)) {
RTC_LOG(LS_ERROR) << "Invalid video playout delay: [" << min << "," << max
<< "]. Clamped to [" << this->min() << "," << this->max()
<< "]";
}
}
bool VideoPlayoutDelay::Set(TimeDelta min, TimeDelta max) {
if (TimeDelta::Zero() <= min && min <= max && max <= kMax) {
min_ = min;
max_ = max;
return true;
}
return false;
}
} // namespace webrtc

View File

@ -16,11 +16,14 @@
#include <limits>
#include <string>
#include "api/units/time_delta.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Video timing timestamps in ms counted from capture_time_ms of a frame.
// This structure represents data sent in video-timing RTP header extension.
struct VideoSendTiming {
struct RTC_EXPORT VideoSendTiming {
enum TimingFrameFlags : uint8_t {
kNotTriggered = 0, // Timing info valid, but not to be transmitted.
// Used on send-side only.
@ -34,6 +37,7 @@ struct VideoSendTiming {
// https://webrtc.org/experiments/rtp-hdrext/video-timing/ extension stores
// 16-bit deltas of timestamps from packet capture time.
static uint16_t GetDeltaCappedMs(int64_t base_ms, int64_t time_ms);
static uint16_t GetDeltaCappedMs(TimeDelta delta);
uint16_t encode_start_delta_ms;
uint16_t encode_finish_delta_ms;
@ -41,21 +45,21 @@ struct VideoSendTiming {
uint16_t pacer_exit_delta_ms;
uint16_t network_timestamp_delta_ms;
uint16_t network2_timestamp_delta_ms;
uint8_t flags;
uint8_t flags = TimingFrameFlags::kInvalid;
};
// Used to report precise timings of a 'timing frames'. Contains all important
// timestamps for a lifetime of that specific frame. Reported as a string via
// GetStats(). Only frame which took the longest between two GetStats calls is
// reported.
struct TimingFrameInfo {
struct RTC_EXPORT TimingFrameInfo {
TimingFrameInfo();
// Returns end-to-end delay of a frame, if sender and receiver timestamps are
// synchronized, -1 otherwise.
int64_t EndToEndDelay() const;
// Returns true if current frame took longer to process than |other| frame.
// Returns true if current frame took longer to process than `other` frame.
// If other frame's clocks are not synchronized, current frame is always
// preferred.
bool IsLongerThan(const TimingFrameInfo& other) const;
@ -103,26 +107,43 @@ struct TimingFrameInfo {
// Minimum and maximum playout delay values from capture to render.
// These are best effort values.
//
// A value < 0 indicates no change from previous valid value.
//
// min = max = 0 indicates that the receiver should try and render
// frame as soon as possible.
//
// min = x, max = y indicates that the receiver is free to adapt
// in the range (x, y) based on network jitter.
struct VideoPlayoutDelay {
VideoPlayoutDelay() = default;
VideoPlayoutDelay(int min_ms, int max_ms) : min_ms(min_ms), max_ms(max_ms) {}
int min_ms = -1;
int max_ms = -1;
// This class ensures invariant 0 <= min <= max <= kMax.
class RTC_EXPORT VideoPlayoutDelay {
public:
// Maximum supported value for the delay limit.
static constexpr TimeDelta kMax = TimeDelta::Millis(10) * 0xFFF;
bool operator==(const VideoPlayoutDelay& rhs) const {
return min_ms == rhs.min_ms && max_ms == rhs.max_ms;
// Creates delay limits that indicates receiver should try to render frame
// as soon as possible.
static VideoPlayoutDelay Minimal() {
return VideoPlayoutDelay(TimeDelta::Zero(), TimeDelta::Zero());
}
};
// TODO(bugs.webrtc.org/7660): Old name, delete after downstream use is updated.
using PlayoutDelay = VideoPlayoutDelay;
// Creates valid, but unspecified limits.
VideoPlayoutDelay() = default;
VideoPlayoutDelay(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay& operator=(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay(TimeDelta min, TimeDelta max);
bool Set(TimeDelta min, TimeDelta max);
TimeDelta min() const { return min_; }
TimeDelta max() const { return max_; }
friend bool operator==(const VideoPlayoutDelay& lhs,
const VideoPlayoutDelay& rhs) {
return lhs.min_ == rhs.min_ && lhs.max_ == rhs.max_;
}
private:
TimeDelta min_ = TimeDelta::Zero();
TimeDelta max_ = kMax;
};
} // namespace webrtc

View File

@ -26,10 +26,11 @@ rtc_library("audio_frame_operations") {
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:deprecation",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:logging",
"../../rtc_base:safe_conversions",
"../../system_wrappers:field_trial",
]
absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
}
if (rtc_include_tests) {
@ -44,7 +45,9 @@ if (rtc_include_tests) {
":audio_frame_operations",
"../../api/audio:audio_frame_api",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:stringutils",
"../../test:field_trial",
"../../test:test_support",
"//testing/gtest",

View File

@ -131,7 +131,7 @@ void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
return;
}
RTC_NOTREACHED() << "src_channels: " << src_channels
RTC_DCHECK_NOTREACHED() << "src_channels: " << src_channels
<< ", dst_channels: " << dst_channels;
}
@ -149,7 +149,7 @@ void AudioFrameOperations::DownmixChannels(size_t dst_channels,
int err = QuadToStereo(frame);
RTC_DCHECK_EQ(err, 0);
} else {
RTC_NOTREACHED() << "src_channels: " << frame->num_channels_
RTC_DCHECK_NOTREACHED() << "src_channels: " << frame->num_channels_
<< ", dst_channels: " << dst_channels;
}
}
@ -169,10 +169,10 @@ void AudioFrameOperations::UpmixChannels(size_t target_number_of_channels,
if (!frame->muted()) {
// Up-mixing done in place. Going backwards through the frame ensure nothing
// is irrevocably overwritten.
int16_t* frame_data = frame->mutable_data();
for (int i = frame->samples_per_channel_ - 1; i >= 0; i--) {
for (size_t j = 0; j < target_number_of_channels; ++j) {
frame->mutable_data()[target_number_of_channels * i + j] =
frame->data()[i];
frame_data[target_number_of_channels * i + j] = frame_data[i];
}
}
}
@ -222,14 +222,14 @@ void AudioFrameOperations::Mute(AudioFrame* frame,
size_t end = count;
float start_g = 0.0f;
if (current_frame_muted) {
// Fade out the last |count| samples of frame.
// Fade out the last `count` samples of frame.
RTC_DCHECK(!previous_frame_muted);
start = frame->samples_per_channel_ - count;
end = frame->samples_per_channel_;
start_g = 1.0f;
inc = -inc;
} else {
// Fade in the first |count| samples of frame.
// Fade in the first `count` samples of frame.
RTC_DCHECK(previous_frame_muted);
}

View File

@ -14,8 +14,8 @@
#include <stddef.h>
#include <stdint.h>
#include "absl/base/attributes.h"
#include "api/audio/audio_frame.h"
#include "rtc_base/deprecation.h"
namespace webrtc {
@ -24,38 +24,40 @@ namespace webrtc {
// than a class.
class AudioFrameOperations {
public:
// Add samples in |frame_to_add| with samples in |result_frame|
// putting the results in |results_frame|. The fields
// |vad_activity_| and |speech_type_| of the result frame are
// updated. If |result_frame| is empty (|samples_per_channel_|==0),
// the samples in |frame_to_add| are added to it. The number of
// Add samples in `frame_to_add` with samples in `result_frame`
// putting the results in `results_frame`. The fields
// `vad_activity_` and `speech_type_` of the result frame are
// updated. If `result_frame` is empty (`samples_per_channel_`==0),
// the samples in `frame_to_add` are added to it. The number of
// channels and number of samples per channel must match except when
// |result_frame| is empty.
// `result_frame` is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
// |frame.num_channels_| will be updated. This version checks for sufficient
// buffer size and that |num_channels_| is mono. Use UpmixChannels
// `frame.num_channels_` will be updated. This version checks for sufficient
// buffer size and that `num_channels_` is mono. Use UpmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
RTC_DEPRECATED static int MonoToStereo(AudioFrame* frame);
ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int MonoToStereo(AudioFrame* frame);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is stereo. Use DownmixChannels
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is stereo. Use DownmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
RTC_DEPRECATED static int StereoToMono(AudioFrame* frame);
ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int StereoToMono(AudioFrame* frame);
// Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place
// operation, meaning |src_audio| and |dst_audio| may point to the same
// Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
// operation, meaning `src_audio` and `dst_audio` may point to the same
// buffer.
static void QuadToStereo(const int16_t* src_audio,
size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is 4 channels.
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is 4 channels.
static int QuadToStereo(AudioFrame* frame);
// Downmixes |src_channels| |src_audio| to |dst_channels| |dst_audio|.
// This is an in-place operation, meaning |src_audio| and |dst_audio|
// Downmixes `src_channels` `src_audio` to `dst_channels` `dst_audio`.
// This is an in-place operation, meaning `src_audio` and `dst_audio`
// may point to the same buffer. Supported channel combinations are
// Stereo to Mono, Quad to Mono, and Quad to Stereo.
static void DownmixChannels(const int16_t* src_audio,
@ -64,27 +66,27 @@ class AudioFrameOperations {
size_t dst_channels,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| and |dst_channels| are valid and performs relevant downmix.
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` and `dst_channels` are valid and performs relevant downmix.
// Supported channel combinations are N channels to Mono, and Quad to Stereo.
static void DownmixChannels(size_t dst_channels, AudioFrame* frame);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| and |dst_channels| are valid and performs relevant
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` and `dst_channels` are valid and performs relevant
// downmix. Supported channel combinations are Mono to N
// channels. The single channel is replicated.
static void UpmixChannels(size_t target_number_of_channels,
AudioFrame* frame);
// Swap the left and right channels of |frame|. Fails silently if |frame| is
// Swap the left and right channels of `frame`. Fails silently if `frame` is
// not stereo.
static void SwapStereoChannels(AudioFrame* frame);
// Conditionally zero out contents of |frame| for implementing audio mute:
// |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
// Conditionally zero out contents of `frame` for implementing audio mute:
// `previous_frame_muted` && `current_frame_muted` - Zero out whole frame.
// `previous_frame_muted` && !`current_frame_muted` - Fade-in at frame start.
// !`previous_frame_muted` && `current_frame_muted` - Fade-out at frame end.
// !`previous_frame_muted` && !`current_frame_muted` - Leave frame untouched.
static void Mute(AudioFrame* frame,
bool previous_frame_muted,
bool current_frame_muted);
@ -92,7 +94,7 @@ class AudioFrameOperations {
// Zero out contents of frame.
static void Mute(AudioFrame* frame);
// Halve samples in |frame|.
// Halve samples in `frame`.
static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame* frame);

View File

@ -48,8 +48,10 @@ rtc_library("common_audio") {
"../api:array_view",
"../rtc_base:checks",
"../rtc_base:gtest_prod",
"../rtc_base:rtc_base_approved",
"../rtc_base:logging",
"../rtc_base:safe_conversions",
"../rtc_base:sanitizer",
"../rtc_base:timeutils",
"../rtc_base/memory:aligned_malloc",
"../rtc_base/system:arch",
"../rtc_base/system:file_wrapper",
@ -180,7 +182,6 @@ rtc_library("common_audio_c") {
":common_audio_cc",
"../rtc_base:checks",
"../rtc_base:compile_assert_c",
"../rtc_base:rtc_base_approved",
"../rtc_base:sanitizer",
"../rtc_base/system:arch",
"../system_wrappers",
@ -196,7 +197,7 @@ rtc_library("common_audio_cc") {
]
deps = [
"../rtc_base:rtc_base_approved",
"../rtc_base:safe_conversions",
"../system_wrappers",
]
}
@ -205,7 +206,6 @@ rtc_source_set("sinc_resampler") {
sources = [ "resampler/sinc_resampler.h" ]
deps = [
"../rtc_base:gtest_prod",
"../rtc_base:rtc_base_approved",
"../rtc_base/memory:aligned_malloc",
"../rtc_base/system:arch",
"../system_wrappers",
@ -228,7 +228,6 @@ rtc_library("fir_filter_factory") {
deps = [
":fir_filter",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base/system:arch",
"../system_wrappers",
]
@ -257,7 +256,6 @@ if (current_cpu == "x86" || current_cpu == "x64") {
":fir_filter",
":sinc_resampler",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base/memory:aligned_malloc",
]
}
@ -282,7 +280,6 @@ if (current_cpu == "x86" || current_cpu == "x64") {
":fir_filter",
":sinc_resampler",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base/memory:aligned_malloc",
]
}
@ -307,7 +304,6 @@ if (rtc_build_with_neon) {
":fir_filter",
":sinc_resampler",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base/memory:aligned_malloc",
]
}
@ -329,13 +325,12 @@ if (rtc_build_with_neon) {
deps = [
":common_audio_c",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base/system:arch",
]
}
}
if (rtc_include_tests) {
if (rtc_include_tests && !build_with_chromium) {
rtc_test("common_audio_unittests") {
visibility += webrtc_default_visibility
testonly = true
@ -378,8 +373,10 @@ if (rtc_include_tests) {
":fir_filter_factory",
":sinc_resampler",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
"../rtc_base:macromagic",
"../rtc_base:rtc_base_tests_utils",
"../rtc_base:stringutils",
"../rtc_base:timeutils",
"../rtc_base/system:arch",
"../system_wrappers",
"../test:fileutils",

View File

@ -15,12 +15,10 @@
#include <memory>
#include "rtc_base/constructor_magic.h"
namespace webrtc {
// Format conversion (remixing and resampling) for audio. Only simple remixing
// conversions are supported: downmix to mono (i.e. |dst_channels| == 1) or
// conversions are supported: downmix to mono (i.e. `dst_channels` == 1) or
// upmix from mono (i.e. |src_channels == 1|).
//
// The source and destination chunks have the same duration in time; specifying
@ -35,8 +33,11 @@ class AudioConverter {
size_t dst_frames);
virtual ~AudioConverter() {}
// Convert |src|, containing |src_size| samples, to |dst|, having a sample
// capacity of |dst_capacity|. Both point to a series of buffers containing
AudioConverter(const AudioConverter&) = delete;
AudioConverter& operator=(const AudioConverter&) = delete;
// Convert `src`, containing `src_size` samples, to `dst`, having a sample
// capacity of `dst_capacity`. Both point to a series of buffers containing
// the samples for each channel. The sizes must correspond to the format
// passed to Create().
virtual void Convert(const float* const* src,
@ -64,8 +65,6 @@ class AudioConverter {
const size_t src_frames_;
const size_t dst_channels_;
const size_t dst_frames_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioConverter);
};
} // namespace webrtc

View File

@ -29,15 +29,15 @@ namespace webrtc {
//
// The buffer structure is showed below for a 2 channel and 2 bands case:
//
// |data_|:
// `data_`:
// { [ --- b1ch1 --- ] [ --- b2ch1 --- ] [ --- b1ch2 --- ] [ --- b2ch2 --- ] }
//
// The pointer arrays for the same example are as follows:
//
// |channels_|:
// `channels_`:
// { [ b1ch1* ] [ b1ch2* ] [ b2ch1* ] [ b2ch2* ] }
//
// |bands_|:
// `bands_`:
// { [ b1ch1* ] [ b2ch1* ] [ b1ch2* ] [ b2ch2* ] }
template <typename T>
class ChannelBuffer {
@ -81,15 +81,15 @@ class ChannelBuffer {
// If band is explicitly specificed, the channels for a specific band are
// returned and the usage becomes: channels(band)[channel][sample].
// Where:
// 0 <= band < |num_bands_|
// 0 <= channel < |num_allocated_channels_|
// 0 <= sample < |num_frames_per_band_|
// 0 <= band < `num_bands_`
// 0 <= channel < `num_allocated_channels_`
// 0 <= sample < `num_frames_per_band_`
// If band is not explicitly specified, the full-band channels (or lower band
// channels) are returned and the usage becomes: channels()[channel][sample].
// Where:
// 0 <= channel < |num_allocated_channels_|
// 0 <= sample < |num_frames_|
// 0 <= channel < `num_allocated_channels_`
// 0 <= sample < `num_frames_`
const T* const* channels(size_t band = 0) const {
RTC_DCHECK_LT(band, num_bands_);
return &channels_[band * num_allocated_channels_];
@ -109,9 +109,9 @@ class ChannelBuffer {
// Usage:
// bands(channel)[band][sample].
// Where:
// 0 <= channel < |num_channels_|
// 0 <= band < |num_bands_|
// 0 <= sample < |num_frames_per_band_|
// 0 <= channel < `num_channels_`
// 0 <= band < `num_bands_`
// 0 <= sample < `num_frames_per_band_`
const T* const* bands(size_t channel) const {
RTC_DCHECK_LT(channel, num_channels_);
RTC_DCHECK_GE(channel, 0);
@ -129,8 +129,8 @@ class ChannelBuffer {
return bands_view_[channel];
}
// Sets the |slice| pointers to the |start_frame| position for each channel.
// Returns |slice| for convenience.
// Sets the `slice` pointers to the `start_frame` position for each channel.
// Returns `slice` for convenience.
const T* const* Slice(T** slice, size_t start_frame) const {
RTC_DCHECK_LT(start_frame, num_frames_);
for (size_t i = 0; i < num_channels_; ++i)

View File

@ -20,8 +20,8 @@ class FIRFilter {
public:
virtual ~FIRFilter() {}
// Filters the |in| data supplied.
// |out| must be previously allocated and it must be at least of |length|.
// Filters the `in` data supplied.
// `out` must be previously allocated and it must be at least of `length`.
virtual void Filter(const float* in, size_t length, float* out) = 0;
};

View File

@ -52,7 +52,7 @@ void FIRFilterAVX2::Filter(const float* in, size_t length, float* out) {
memcpy(&state_[state_length_], in, length * sizeof(*in));
// Convolves the input signal |in| with the filter kernel |coefficients_|
// Convolves the input signal `in` with the filter kernel `coefficients_`
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
float* in_ptr = &state_[i];

View File

@ -34,7 +34,7 @@ FIRFilterC::FIRFilterC(const float* coefficients, size_t coefficients_length)
void FIRFilterC::Filter(const float* in, size_t length, float* out) {
RTC_DCHECK_GT(length, 0);
// Convolves the input signal |in| with the filter kernel |coefficients_|
// Convolves the input signal `in` with the filter kernel `coefficients_`
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
out[i] = 0.f;

View File

@ -28,7 +28,7 @@ FIRFilter* CreateFirFilter(const float* coefficients,
size_t coefficients_length,
size_t max_input_length) {
if (!coefficients || coefficients_length <= 0 || max_input_length <= 0) {
RTC_NOTREACHED();
RTC_DCHECK_NOTREACHED();
return nullptr;
}

View File

@ -20,7 +20,7 @@ class FIRFilter;
// Creates a filter with the given coefficients. All initial state values will
// be zeros.
// The length of the chunks fed to the filter should never be greater than
// |max_input_length|. This is needed because, when vectorizing it is
// `max_input_length`. This is needed because, when vectorizing it is
// necessary to concatenate the input after the state, and resizing this array
// dynamically is expensive.
FIRFilter* CreateFirFilter(const float* coefficients,

View File

@ -48,7 +48,7 @@ void FIRFilterNEON::Filter(const float* in, size_t length, float* out) {
memcpy(&state_[state_length_], in, length * sizeof(*in));
// Convolves the input signal |in| with the filter kernel |coefficients_|
// Convolves the input signal `in` with the filter kernel `coefficients_`
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
float* in_ptr = &state_[i];

View File

@ -49,7 +49,7 @@ void FIRFilterSSE2::Filter(const float* in, size_t length, float* out) {
memcpy(&state_[state_length_], in, length * sizeof(*in));
// Convolves the input signal |in| with the filter kernel |coefficients_|
// Convolves the input signal `in` with the filter kernel `coefficients_`
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
float* in_ptr = &state_[i];

View File

@ -91,9 +91,9 @@ inline float FloatS16ToDbfs(float v) {
return 20.0f * std::log10(v) + kMinDbfs;
}
// Copy audio from |src| channels to |dest| channels unless |src| and |dest|
// point to the same address. |src| and |dest| must have the same number of
// channels, and there must be sufficient space allocated in |dest|.
// Copy audio from `src` channels to `dest` channels unless `src` and `dest`
// point to the same address. `src` and `dest` must have the same number of
// channels, and there must be sufficient space allocated in `dest`.
template <typename T>
void CopyAudioIfNeeded(const T* const* src,
int num_frames,
@ -106,9 +106,9 @@ void CopyAudioIfNeeded(const T* const* src,
}
}
// Deinterleave audio from |interleaved| to the channel buffers pointed to
// by |deinterleaved|. There must be sufficient space allocated in the
// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel|
// Deinterleave audio from `interleaved` to the channel buffers pointed to
// by `deinterleaved`. There must be sufficient space allocated in the
// `deinterleaved` buffers (`num_channel` buffers with `samples_per_channel`
// per buffer).
template <typename T>
void Deinterleave(const T* interleaved,
@ -125,9 +125,9 @@ void Deinterleave(const T* interleaved,
}
}
// Interleave audio from the channel buffers pointed to by |deinterleaved| to
// |interleaved|. There must be sufficient space allocated in |interleaved|
// (|samples_per_channel| * |num_channels|).
// Interleave audio from the channel buffers pointed to by `deinterleaved` to
// `interleaved`. There must be sufficient space allocated in `interleaved`
// (`samples_per_channel` * `num_channels`).
template <typename T>
void Interleave(const T* const* deinterleaved,
size_t samples_per_channel,
@ -143,9 +143,9 @@ void Interleave(const T* const* deinterleaved,
}
}
// Copies audio from a single channel buffer pointed to by |mono| to each
// channel of |interleaved|. There must be sufficient space allocated in
// |interleaved| (|samples_per_channel| * |num_channels|).
// Copies audio from a single channel buffer pointed to by `mono` to each
// channel of `interleaved`. There must be sufficient space allocated in
// `interleaved` (`samples_per_channel` * `num_channels`).
template <typename T>
void UpmixMonoToInterleaved(const T* mono,
int num_frames,

View File

@ -50,7 +50,7 @@ class RealFourier {
// output (i.e. |2^order / 2 + 1|).
static size_t ComplexLength(int order);
// Buffer allocation helpers. The buffers are large enough to hold |count|
// Buffer allocation helpers. The buffers are large enough to hold `count`
// floats/complexes and suitably aligned for use by the implementation.
// The returned scopers are set up with proper deleters; the caller owns
// the allocated memory.

View File

@ -20,42 +20,6 @@
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
// These checks were factored out into a non-templatized function
// due to problems with clang on Windows in debug builds.
// For some reason having the DCHECKs inline in the template code
// caused the compiler to generate code that threw off the linker.
// TODO(tommi): Re-enable when we've figured out what the problem is.
// http://crbug.com/615050
void CheckValidInitParams(int src_sample_rate_hz,
int dst_sample_rate_hz,
size_t num_channels) {
// The below checks are temporarily disabled on WEBRTC_WIN due to problems
// with clang debug builds.
#if !defined(WEBRTC_WIN) && defined(__clang__)
RTC_DCHECK_GT(src_sample_rate_hz, 0);
RTC_DCHECK_GT(dst_sample_rate_hz, 0);
RTC_DCHECK_GT(num_channels, 0);
#endif
}
void CheckExpectedBufferSizes(size_t src_length,
size_t dst_capacity,
size_t num_channels,
int src_sample_rate,
int dst_sample_rate) {
// The below checks are temporarily disabled on WEBRTC_WIN due to problems
// with clang debug builds.
// TODO(tommi): Re-enable when we've figured out what the problem is.
// http://crbug.com/615050
#if !defined(WEBRTC_WIN) && defined(__clang__)
const size_t src_size_10ms = src_sample_rate * num_channels / 100;
const size_t dst_size_10ms = dst_sample_rate * num_channels / 100;
RTC_DCHECK_EQ(src_length, src_size_10ms);
RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
#endif
}
} // namespace
template <typename T>
PushResampler<T>::PushResampler()
@ -68,7 +32,11 @@ template <typename T>
int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
int dst_sample_rate_hz,
size_t num_channels) {
CheckValidInitParams(src_sample_rate_hz, dst_sample_rate_hz, num_channels);
// These checks used to be factored out of this template function due to
// Windows debug build issues with clang. http://crbug.com/615050
RTC_DCHECK_GT(src_sample_rate_hz, 0);
RTC_DCHECK_GT(dst_sample_rate_hz, 0);
RTC_DCHECK_GT(num_channels, 0);
if (src_sample_rate_hz == src_sample_rate_hz_ &&
dst_sample_rate_hz == dst_sample_rate_hz_ &&
@ -109,8 +77,12 @@ int PushResampler<T>::Resample(const T* src,
size_t src_length,
T* dst,
size_t dst_capacity) {
CheckExpectedBufferSizes(src_length, dst_capacity, num_channels_,
src_sample_rate_hz_, dst_sample_rate_hz_);
// These checks used to be factored out of this template function due to
// Windows debug build issues with clang. http://crbug.com/615050
const size_t src_size_10ms = (src_sample_rate_hz_ / 100) * num_channels_;
const size_t dst_size_10ms = (dst_sample_rate_hz_ / 100) * num_channels_;
RTC_DCHECK_EQ(src_length, src_size_10ms);
RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
// The old resampler provides this memcpy facility in the case of matching

View File

@ -63,12 +63,12 @@ size_t PushSincResampler::Resample(const float* source,
// request through Run().
//
// If this wasn't done, SincResampler would call Run() twice on the first
// pass, and we'd have to introduce an entire |source_frames| of delay, rather
// pass, and we'd have to introduce an entire `source_frames` of delay, rather
// than the minimum half kernel.
//
// It works out that ChunkSize() is exactly the amount of output we need to
// request in order to prime the buffer with a single Run() request for
// |source_frames|.
// `source_frames`.
if (first_pass_)
resampler_->Resample(resampler_->ChunkSize(), destination);

View File

@ -17,7 +17,6 @@
#include <memory>
#include "common_audio/resampler/sinc_resampler.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -33,11 +32,14 @@ class PushSincResampler : public SincResamplerCallback {
PushSincResampler(size_t source_frames, size_t destination_frames);
~PushSincResampler() override;
// Perform the resampling. |source_frames| must always equal the
// |source_frames| provided at construction. |destination_capacity| must be
// at least as large as |destination_frames|. Returns the number of samples
PushSincResampler(const PushSincResampler&) = delete;
PushSincResampler& operator=(const PushSincResampler&) = delete;
// Perform the resampling. `source_frames` must always equal the
// `source_frames` provided at construction. `destination_capacity` must be
// at least as large as `destination_frames`. Returns the number of samples
// provided in destination (for convenience, since this will always be equal
// to |destination_frames|).
// to `destination_frames`).
size_t Resample(const int16_t* source,
size_t source_frames,
int16_t* destination,
@ -72,8 +74,6 @@ class PushSincResampler : public SincResamplerCallback {
// Used to assert we are only requested for as much data as is available.
size_t source_available_;
RTC_DISALLOW_COPY_AND_ASSIGN(PushSincResampler);
};
} // namespace webrtc

View File

@ -916,7 +916,6 @@ int Resampler::Push(const int16_t* samplesIn,
outLen = (lengthIn * 8) / 11;
free(tmp_mem);
return 0;
break;
}
return 0;
}

View File

@ -80,7 +80,7 @@
// 8) Else, if we're not on the second load, goto (4).
//
// Note: we're glossing over how the sub-sample handling works with
// |virtual_source_idx_|, etc.
// `virtual_source_idx_`, etc.
// MSVC++ requires this to be set before any other includes to get M_PI.
#define _USE_MATH_DEFINES
@ -102,7 +102,7 @@ namespace webrtc {
namespace {
double SincScaleFactor(double io_ratio) {
// |sinc_scale_factor| is basically the normalized cutoff frequency of the
// `sinc_scale_factor` is basically the normalized cutoff frequency of the
// low-pass filter.
double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
@ -126,8 +126,8 @@ void SincResampler::InitializeCPUSpecificFeatures() {
#if defined(WEBRTC_HAS_NEON)
convolve_proc_ = Convolve_NEON;
#elif defined(WEBRTC_ARCH_X86_FAMILY)
// Using AVX2 instead of SSE2 when AVX2 supported.
if (GetCPUInfo(kAVX2))
// Using AVX2 instead of SSE2 when AVX2/FMA3 supported.
if (GetCPUInfo(kAVX2) && GetCPUInfo(kFMA3))
convolve_proc_ = Convolve_AVX2;
else if (GetCPUInfo(kSSE2))
convolve_proc_ = Convolve_SSE;
@ -238,7 +238,7 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
io_sample_rate_ratio_ = io_sample_rate_ratio;
// Optimize reinitialization by reusing values which are independent of
// |sinc_scale_factor|. Provides a 3x speedup.
// `sinc_scale_factor`. Provides a 3x speedup.
const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
for (size_t i = 0; i < kKernelSize; ++i) {
@ -268,8 +268,8 @@ void SincResampler::Resample(size_t frames, float* destination) {
const double current_io_ratio = io_sample_rate_ratio_;
const float* const kernel_ptr = kernel_storage_.get();
while (remaining_frames) {
// |i| may be negative if the last Resample() call ended on an iteration
// that put |virtual_source_idx_| over the limit.
// `i` may be negative if the last Resample() call ended on an iteration
// that put `virtual_source_idx_` over the limit.
//
// Note: The loop construct here can severely impact performance on ARM
// or when built with clang. See https://codereview.chromium.org/18566009/
@ -278,7 +278,7 @@ void SincResampler::Resample(size_t frames, float* destination) {
i > 0; --i) {
RTC_DCHECK_LT(virtual_source_idx_, block_size_);
// |virtual_source_idx_| lies in between two kernel offsets so figure out
// `virtual_source_idx_` lies in between two kernel offsets so figure out
// what they are.
const int source_idx = static_cast<int>(virtual_source_idx_);
const double subsample_remainder = virtual_source_idx_ - source_idx;
@ -288,16 +288,16 @@ void SincResampler::Resample(size_t frames, float* destination) {
const int offset_idx = static_cast<int>(virtual_offset_idx);
// We'll compute "convolutions" for the two kernels which straddle
// |virtual_source_idx_|.
// `virtual_source_idx_`.
const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
const float* const k2 = k1 + kKernelSize;
// Ensure |k1|, |k2| are 32-byte aligned for SIMD usage. Should always be
// Ensure `k1`, `k2` are 32-byte aligned for SIMD usage. Should always be
// true so long as kKernelSize is a multiple of 32.
RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k1) % 32);
RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k2) % 32);
// Initialize input pointer based on quantized |virtual_source_idx_|.
// Initialize input pointer based on quantized `virtual_source_idx_`.
const float* const input_ptr = r1_ + source_idx;
// Figure out how much to weight each kernel's "convolution".

View File

@ -18,15 +18,14 @@
#include <memory>
#include "rtc_base/constructor_magic.h"
#include "rtc_base/gtest_prod_util.h"
#include "rtc_base/memory/aligned_malloc.h"
#include "rtc_base/system/arch.h"
namespace webrtc {
// Callback class for providing more data into the resampler. Expects |frames|
// of data to be rendered into |destination|; zero padded if not enough frames
// Callback class for providing more data into the resampler. Expects `frames`
// of data to be rendered into `destination`; zero padded if not enough frames
// are available to satisfy the request.
class SincResamplerCallback {
public:
@ -53,10 +52,10 @@ class SincResampler {
static const size_t kKernelStorageSize =
kKernelSize * (kKernelOffsetCount + 1);
// Constructs a SincResampler with the specified |read_cb|, which is used to
// acquire audio data for resampling. |io_sample_rate_ratio| is the ratio
// of input / output sample rates. |request_frames| controls the size in
// frames of the buffer requested by each |read_cb| call. The value must be
// Constructs a SincResampler with the specified `read_cb`, which is used to
// acquire audio data for resampling. `io_sample_rate_ratio` is the ratio
// of input / output sample rates. `request_frames` controls the size in
// frames of the buffer requested by each `read_cb` call. The value must be
// greater than kKernelSize. Specify kDefaultRequestSize if there are no
// request size constraints.
SincResampler(double io_sample_rate_ratio,
@ -64,11 +63,14 @@ class SincResampler {
SincResamplerCallback* read_cb);
virtual ~SincResampler();
// Resample |frames| of data from |read_cb_| into |destination|.
SincResampler(const SincResampler&) = delete;
SincResampler& operator=(const SincResampler&) = delete;
// Resample `frames` of data from `read_cb_` into `destination`.
void Resample(size_t frames, float* destination);
// The maximum size in frames that guarantees Resample() will only make a
// single call to |read_cb_| for more data.
// single call to `read_cb_` for more data.
size_t ChunkSize() const;
size_t request_frames() const { return request_frames_; }
@ -77,12 +79,12 @@ class SincResampler {
// not call while Resample() is in progress.
void Flush();
// Update |io_sample_rate_ratio_|. SetRatio() will cause a reconstruction of
// Update `io_sample_rate_ratio_`. SetRatio() will cause a reconstruction of
// the kernels used for resampling. Not thread safe, do not call while
// Resample() is in progress.
//
// TODO(ajm): Use this in PushSincResampler rather than reconstructing
// SincResampler. We would also need a way to update |request_frames_|.
// SincResampler. We would also need a way to update `request_frames_`.
void SetRatio(double io_sample_rate_ratio);
float* get_kernel_for_testing() { return kernel_storage_.get(); }
@ -97,11 +99,11 @@ class SincResampler {
// Selects runtime specific CPU features like SSE. Must be called before
// using SincResampler.
// TODO(ajm): Currently managed by the class internally. See the note with
// |convolve_proc_| below.
// `convolve_proc_` below.
void InitializeCPUSpecificFeatures();
// Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are
// linearly interpolated using |kernel_interpolation_factor|. On x86 and ARM
// Compute convolution of `k1` and `k2` over `input_ptr`, resultant sums are
// linearly interpolated using `kernel_interpolation_factor`. On x86 and ARM
// the underlying implementation is chosen at run time.
static float Convolve_C(const float* input_ptr,
const float* k1,
@ -136,7 +138,7 @@ class SincResampler {
// Source of data for resampling.
SincResamplerCallback* read_cb_;
// The size (in samples) to request from each |read_cb_| execution.
// The size (in samples) to request from each `read_cb_` execution.
const size_t request_frames_;
// The number of source frames processed per pass.
@ -165,15 +167,13 @@ class SincResampler {
double);
ConvolveProc convolve_proc_;
// Pointers to the various regions inside |input_buffer_|. See the diagram at
// Pointers to the various regions inside `input_buffer_`. See the diagram at
// the top of the .cc file for more information.
float* r0_;
float* const r1_;
float* const r2_;
float* r3_;
float* r4_;
RTC_DISALLOW_COPY_AND_ASSIGN(SincResampler);
};
} // namespace webrtc

View File

@ -25,7 +25,7 @@ float SincResampler::Convolve_AVX2(const float* input_ptr,
__m256 m_sums1 = _mm256_setzero_ps();
__m256 m_sums2 = _mm256_setzero_ps();
// Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
// Based on `input_ptr` alignment, we need to use loadu or load. Unrolling
// these loops has not been tested or benchmarked.
bool aligned_input = (reinterpret_cast<uintptr_t>(input_ptr) & 0x1F) == 0;
if (!aligned_input) {

View File

@ -27,7 +27,7 @@ float SincResampler::Convolve_SSE(const float* input_ptr,
__m128 m_sums1 = _mm_setzero_ps();
__m128 m_sums2 = _mm_setzero_ps();
// Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
// Based on `input_ptr` alignment, we need to use loadu or load. Unrolling
// these loops hurt performance in local testing.
if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
for (size_t i = 0; i < kKernelSize; i += 4) {

View File

@ -15,7 +15,6 @@
#define COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
#include "common_audio/resampler/sinc_resampler.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -24,7 +23,7 @@ namespace webrtc {
// resampler for the specific sample rate conversion being used.
class SinusoidalLinearChirpSource : public SincResamplerCallback {
public:
// |delay_samples| can be used to insert a fractional sample delay into the
// `delay_samples` can be used to insert a fractional sample delay into the
// source. It will produce zeros until non-negative time is reached.
SinusoidalLinearChirpSource(int sample_rate,
size_t samples,
@ -33,12 +32,16 @@ class SinusoidalLinearChirpSource : public SincResamplerCallback {
~SinusoidalLinearChirpSource() override {}
SinusoidalLinearChirpSource(const SinusoidalLinearChirpSource&) = delete;
SinusoidalLinearChirpSource& operator=(const SinusoidalLinearChirpSource&) =
delete;
void Run(size_t frames, float* destination) override;
double Frequency(size_t position);
private:
enum { kMinFrequency = 5 };
static constexpr int kMinFrequency = 5;
int sample_rate_;
size_t total_samples_;
@ -46,8 +49,6 @@ class SinusoidalLinearChirpSource : public SincResamplerCallback {
double k_;
size_t current_index_;
double delay_samples_;
RTC_DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
};
} // namespace webrtc

View File

@ -18,9 +18,9 @@
#include <string.h>
// Get address of region(s) from which we can read data.
// If the region is contiguous, |data_ptr_bytes_2| will be zero.
// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
// region. Returns room available to be read or |element_count|, whichever is
// If the region is contiguous, `data_ptr_bytes_2` will be zero.
// If non-contiguous, `data_ptr_bytes_2` will be the size in bytes of the second
// region. Returns room available to be read or `element_count`, whichever is
// smaller.
static size_t GetBufferReadRegions(RingBuffer* buf,
size_t element_count,
@ -120,7 +120,7 @@ size_t WebRtc_ReadBuffer(RingBuffer* self,
&buf_ptr_bytes_2);
if (buf_ptr_bytes_2 > 0) {
// We have a wrap around when reading the buffer. Copy the buffer data to
// |data| and point to it.
// `data` and point to it.
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2);
buf_ptr_1 = data;
@ -129,7 +129,7 @@ size_t WebRtc_ReadBuffer(RingBuffer* self,
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
}
if (data_ptr) {
// |buf_ptr_1| == |data| in the case of a wrap.
// `buf_ptr_1` == `data` in the case of a wrap.
*data_ptr = read_count == 0 ? NULL : buf_ptr_1;
}

View File

@ -39,14 +39,14 @@ void WebRtc_InitBuffer(RingBuffer* handle);
void WebRtc_FreeBuffer(void* handle);
// Reads data from the buffer. Returns the number of elements that were read.
// The |data_ptr| will point to the address where the read data is located.
// If no data can be read, |data_ptr| is set to |NULL|. If all data can be read
// without buffer wrap around then |data_ptr| will point to the location in the
// buffer. Otherwise, the data will be copied to |data| (memory allocation done
// by the user) and |data_ptr| points to the address of |data|. |data_ptr| is
// The `data_ptr` will point to the address where the read data is located.
// If no data can be read, `data_ptr` is set to `NULL`. If all data can be read
// without buffer wrap around then `data_ptr` will point to the location in the
// buffer. Otherwise, the data will be copied to `data` (memory allocation done
// by the user) and `data_ptr` points to the address of `data`. `data_ptr` is
// only guaranteed to be valid until the next call to WebRtc_WriteBuffer().
//
// To force a copying to |data|, pass a null |data_ptr|.
// To force a copying to `data`, pass a null `data_ptr`.
//
// Returns number of elements read.
size_t WebRtc_ReadBuffer(RingBuffer* handle,
@ -54,14 +54,14 @@ size_t WebRtc_ReadBuffer(RingBuffer* handle,
void* data,
size_t element_count);
// Writes |data| to buffer and returns the number of elements written.
// Writes `data` to buffer and returns the number of elements written.
size_t WebRtc_WriteBuffer(RingBuffer* handle,
const void* data,
size_t element_count);
// Moves the buffer read position and returns the number of elements moved.
// Positive |element_count| moves the read position towards the write position,
// that is, flushing the buffer. Negative |element_count| moves the read
// Positive `element_count` moves the read position towards the write position,
// that is, flushing the buffer. Negative `element_count` moves the read
// position away from the the write position, that is, stuffing the buffer.
// Returns number of elements moved.
int WebRtc_MoveReadPtr(RingBuffer* handle, int element_count);

View File

@ -72,9 +72,9 @@ void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation,
size_t dim_cross_correlation,
int right_shifts,
int step_seq2) {
size_t i = 0;
int i = 0;
for (i = 0; i < dim_cross_correlation; i++) {
for (i = 0; i < (int)dim_cross_correlation; i++) {
const int16_t* seq1_ptr = seq1;
const int16_t* seq2_ptr = seq2 + (step_seq2 * i);

View File

@ -98,8 +98,7 @@ int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den)
return div;
}
int32_t RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/5486
WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low)
int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low)
{
int16_t approx, tmp_hi, tmp_low, num_hi, num_low;
int32_t tmpW32;
@ -111,8 +110,8 @@ WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low)
tmpW32 = (den_hi * approx << 1) + ((den_low * approx >> 15) << 1);
// tmpW32 = den * approx
tmpW32 = (int32_t)0x7fffffffL - tmpW32; // result in Q30 (tmpW32 = 2.0-(den*approx))
// UBSan: 2147483647 - -2 cannot be represented in type 'int'
// result in Q30 (tmpW32 = 2.0-(den*approx))
tmpW32 = (int32_t)((int64_t)0x7fffffffL - tmpW32);
// Store tmpW32 in hi and low format
tmp_hi = (int16_t)(tmpW32 >> 16);

View File

@ -26,7 +26,7 @@ extern "C" {
// - vector_length : Number of samples used in the dot product
// - scaling : The number of right bit shifts to apply on each term
// during calculation to avoid overflow, i.e., the
// output will be in Q(-|scaling|)
// output will be in Q(-`scaling`)
//
// Return value : The dot product in Q(-scaling)
int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,

View File

@ -8,9 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include <arm_neon.h>
#include "rtc_base/checks.h"
// NEON intrinsics version of WebRtcSpl_DownsampleFast()
// for ARM 32-bit/64-bit platforms.
@ -22,19 +24,24 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
size_t coefficients_length,
int factor,
size_t delay) {
size_t i = 0;
size_t j = 0;
// Using signed indexes to be able to compute negative i-j that
// is used to index data_in.
int i = 0;
int j = 0;
int32_t out_s32 = 0;
size_t endpos = delay + factor * (data_out_length - 1) + 1;
int endpos = delay + factor * (data_out_length - 1) + 1;
size_t res = data_out_length & 0x7;
size_t endpos1 = endpos - factor * res;
int endpos1 = endpos - factor * res;
// Return error if any of the running conditions doesn't meet.
if (data_out_length == 0 || coefficients_length == 0
|| data_in_length < endpos) {
|| (int)data_in_length < endpos) {
return -1;
}
RTC_DCHECK_GE(endpos, 0);
RTC_DCHECK_GE(endpos1, 0);
// First part, unroll the loop 8 times, with 3 subcases
// (factor == 2, 4, others).
switch (factor) {
@ -46,7 +53,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
#if defined(WEBRTC_ARCH_ARM64)
// Unroll the loop 2 times.
for (j = 0; j < coefficients_length - 1; j += 2) {
for (j = 0; j < (int)coefficients_length - 1; j += 2) {
int32x2_t coeff32 = vld1_dup_s32((int32_t*)&coefficients[j]);
int16x4_t coeff16x4 = vreinterpret_s16_s32(coeff32);
int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j - 1]);
@ -68,7 +75,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_3, coeff16x4, 0);
}
for (; j < coefficients_length; j++) {
for (; j < (int)coefficients_length; j++) {
int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]);
@ -87,7 +94,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
#else
// On ARMv7, the loop unrolling 2 times results in performance
// regression.
for (j = 0; j < coefficients_length; j++) {
for (j = 0; j < (int)coefficients_length; j++) {
int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]);
@ -114,7 +121,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
int32x4_t out32x4_1 = vdupq_n_s32(2048);
// Unroll the loop 4 times.
for (j = 0; j < coefficients_length - 3; j += 4) {
for (j = 0; j < (int)coefficients_length - 3; j += 4) {
int16x4_t coeff16x4 = vld1_s16(&coefficients[j]);
int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j - 3]);
@ -143,7 +150,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_7, coeff16x4, 0);
}
for (; j < coefficients_length; j++) {
for (; j < (int)coefficients_length; j++) {
int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j]);
@ -174,7 +181,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
int32x4_t out32x4_0 = vdupq_n_s32(2048);
int32x4_t out32x4_1 = vdupq_n_s32(2048);
for (j = 0; j < coefficients_length; j++) {
for (j = 0; j < (int)coefficients_length; j++) {
int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
int16x4_t in16x4_0 = vld1_dup_s16(&data_in[i - j]);
in16x4_0 = vld1_lane_s16(&data_in[i + factor - j], in16x4_0, 1);
@ -204,7 +211,7 @@ int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
for (; i < endpos; i += factor) {
out_s32 = 2048; // Round value, 0.5 in Q12.
for (j = 0; j < coefficients_length; j++) {
for (j = 0; j < (int)coefficients_length; j++) {
out_s32 = WebRtc_MulAccumW16(coefficients[j], data_in[i - j], out_s32);
}

View File

@ -81,7 +81,7 @@ int WebRtcSpl_RealForwardFFT(struct RealFFT* self,
// boundary.
//
// Return Value:
// 0 or a positive number - a value that the elements in the |real_data_out|
// 0 or a positive number - a value that the elements in the `real_data_out`
// should be shifted left with in order to get
// correct physical values.
// -1 - Error with bad arguments (null pointers).

View File

@ -166,7 +166,7 @@ int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length);
// - vector : 16-bit input vector.
// - length : Number of samples in vector.
//
// Return value : Maximum sample value in |vector|.
// Return value : Maximum sample value in `vector`.
typedef int16_t (*MaxValueW16)(const int16_t* vector, size_t length);
extern const MaxValueW16 WebRtcSpl_MaxValueW16;
int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length);
@ -183,7 +183,7 @@ int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length);
// - vector : 32-bit input vector.
// - length : Number of samples in vector.
//
// Return value : Maximum sample value in |vector|.
// Return value : Maximum sample value in `vector`.
typedef int32_t (*MaxValueW32)(const int32_t* vector, size_t length);
extern const MaxValueW32 WebRtcSpl_MaxValueW32;
int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length);
@ -200,7 +200,7 @@ int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length);
// - vector : 16-bit input vector.
// - length : Number of samples in vector.
//
// Return value : Minimum sample value in |vector|.
// Return value : Minimum sample value in `vector`.
typedef int16_t (*MinValueW16)(const int16_t* vector, size_t length);
extern const MinValueW16 WebRtcSpl_MinValueW16;
int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length);
@ -217,7 +217,7 @@ int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length);
// - vector : 32-bit input vector.
// - length : Number of samples in vector.
//
// Return value : Minimum sample value in |vector|.
// Return value : Minimum sample value in `vector`.
typedef int32_t (*MinValueW32)(const int32_t* vector, size_t length);
extern const MinValueW32 WebRtcSpl_MinValueW32;
int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length);
@ -228,6 +228,25 @@ int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length);
int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length);
#endif
// Returns both the minimum and maximum values of a 16-bit vector.
//
// Input:
// - vector : 16-bit input vector.
// - length : Number of samples in vector.
// Ouput:
// - max_val : Maximum sample value in `vector`.
// - min_val : Minimum sample value in `vector`.
void WebRtcSpl_MinMaxW16(const int16_t* vector,
size_t length,
int16_t* min_val,
int16_t* max_val);
#if defined(WEBRTC_HAS_NEON)
void WebRtcSpl_MinMaxW16Neon(const int16_t* vector,
size_t length,
int16_t* min_val,
int16_t* max_val);
#endif
// Returns the vector index to the largest absolute value of a 16-bit vector.
//
// Input:
@ -240,6 +259,17 @@ int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length);
// -32768 presenting an int16 absolute value of 32767).
size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length);
// Returns the element with the largest absolute value of a 16-bit vector. Note
// that this function can return a negative value.
//
// Input:
// - vector : 16-bit input vector.
// - length : Number of samples in vector.
//
// Return value : The element with the largest absolute value. Note that this
// may be a negative value.
int16_t WebRtcSpl_MaxAbsElementW16(const int16_t* vector, size_t length);
// Returns the vector index to the maximum sample value of a 16-bit vector.
//
// Input:
@ -396,7 +426,7 @@ void WebRtcSpl_AffineTransformVector(int16_t* out_vector,
//
// Input:
// - in_vector : Vector to calculate autocorrelation upon
// - in_vector_length : Length (in samples) of |vector|
// - in_vector_length : Length (in samples) of `vector`
// - order : The order up to which the autocorrelation should be
// calculated
//
@ -408,7 +438,7 @@ void WebRtcSpl_AffineTransformVector(int16_t* out_vector,
// - scale : The number of left shifts required to obtain the
// auto-correlation in Q0
//
// Return value : Number of samples in |result|, i.e. (order+1)
// Return value : Number of samples in `result`, i.e. (order+1)
size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
size_t in_vector_length,
size_t order,
@ -419,7 +449,7 @@ size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
// does NOT use the 64 bit class
//
// Input:
// - auto_corr : Vector with autocorrelation values of length >= |order|+1
// - auto_corr : Vector with autocorrelation values of length >= `order`+1
// - order : The LPC filter order (support up to order 20)
//
// Output:
@ -432,7 +462,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* auto_corr,
int16_t* refl_coef,
size_t order);
// Converts reflection coefficients |refl_coef| to LPC coefficients |lpc_coef|.
// Converts reflection coefficients `refl_coef` to LPC coefficients `lpc_coef`.
// This version is a 16 bit operation.
//
// NOTE: The 16 bit refl_coef -> lpc_coef conversion might result in a
@ -442,7 +472,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* auto_corr,
// Input:
// - refl_coef : Reflection coefficients in Q15 that should be converted
// to LPC coefficients
// - use_order : Number of coefficients in |refl_coef|
// - use_order : Number of coefficients in `refl_coef`
//
// Output:
// - lpc_coef : LPC coefficients in Q12
@ -450,14 +480,14 @@ void WebRtcSpl_ReflCoefToLpc(const int16_t* refl_coef,
int use_order,
int16_t* lpc_coef);
// Converts LPC coefficients |lpc_coef| to reflection coefficients |refl_coef|.
// Converts LPC coefficients `lpc_coef` to reflection coefficients `refl_coef`.
// This version is a 16 bit operation.
// The conversion is implemented by the step-down algorithm.
//
// Input:
// - lpc_coef : LPC coefficients in Q12, that should be converted to
// reflection coefficients
// - use_order : Number of coefficients in |lpc_coef|
// - use_order : Number of coefficients in `lpc_coef`
//
// Output:
// - refl_coef : Reflection coefficients in Q15.
@ -478,24 +508,24 @@ void WebRtcSpl_AutoCorrToReflCoef(const int32_t* auto_corr,
int16_t* refl_coef);
// The functions (with related pointer) calculate the cross-correlation between
// two sequences |seq1| and |seq2|.
// |seq1| is fixed and |seq2| slides as the pointer is increased with the
// amount |step_seq2|. Note the arguments should obey the relationship:
// |dim_seq| - 1 + |step_seq2| * (|dim_cross_correlation| - 1) <
// buffer size of |seq2|
// two sequences `seq1` and `seq2`.
// `seq1` is fixed and `seq2` slides as the pointer is increased with the
// amount `step_seq2`. Note the arguments should obey the relationship:
// `dim_seq` - 1 + `step_seq2` * (`dim_cross_correlation` - 1) <
// buffer size of `seq2`
//
// Input:
// - seq1 : First sequence (fixed throughout the correlation)
// - seq2 : Second sequence (slides |step_vector2| for each
// - seq2 : Second sequence (slides `step_vector2` for each
// new correlation)
// - dim_seq : Number of samples to use in the cross-correlation
// - dim_cross_correlation : Number of cross-correlations to calculate (the
// start position for |vector2| is updated for each
// start position for `vector2` is updated for each
// new one)
// - right_shifts : Number of right bit shifts to use. This will
// become the output Q-domain.
// - step_seq2 : How many (positive or negative) steps the
// |vector2| pointer should be updated for each new
// `vector2` pointer should be updated for each new
// cross-correlation value.
//
// Output:
@ -545,11 +575,11 @@ void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation,
void WebRtcSpl_GetHanningWindow(int16_t* window, size_t size);
// Calculates y[k] = sqrt(1 - x[k]^2) for each element of the input vector
// |in_vector|. Input and output values are in Q15.
// `in_vector`. Input and output values are in Q15.
//
// Inputs:
// - in_vector : Values to calculate sqrt(1 - x^2) of
// - vector_length : Length of vector |in_vector|
// - vector_length : Length of vector `in_vector`
//
// Output:
// - out_vector : Output values in Q15
@ -637,9 +667,9 @@ void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
// Input:
// - data_in : Input samples (state in positions
// data_in[-order] .. data_in[-1])
// - data_in_length : Number of samples in |data_in| to be filtered.
// - data_in_length : Number of samples in `data_in` to be filtered.
// This must be at least
// |delay| + |factor|*(|out_vector_length|-1) + 1)
// `delay` + `factor`*(`out_vector_length`-1) + 1)
// - data_out_length : Number of down sampled samples desired
// - coefficients : Filter coefficients (in Q12)
// - coefficients_length: Number of coefficients (order+1)
@ -647,7 +677,7 @@ void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
// - delay : Delay of filter (compensated for in out_vector)
// Output:
// - data_out : Filtered samples
// Return value : 0 if OK, -1 if |in_vector| is too short
// Return value : 0 if OK, -1 if `in_vector` is too short
typedef int (*DownsampleFast)(const int16_t* data_in,
size_t data_in_length,
int16_t* data_out,
@ -693,12 +723,12 @@ int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in,
int WebRtcSpl_ComplexFFT(int16_t vector[], int stages, int mode);
int WebRtcSpl_ComplexIFFT(int16_t vector[], int stages, int mode);
// Treat a 16-bit complex data buffer |complex_data| as an array of 32-bit
// Treat a 16-bit complex data buffer `complex_data` as an array of 32-bit
// values, and swap elements whose indexes are bit-reverses of each other.
//
// Input:
// - complex_data : Complex data buffer containing 2^|stages| real
// elements interleaved with 2^|stages| imaginary
// - complex_data : Complex data buffer containing 2^`stages` real
// elements interleaved with 2^`stages` imaginary
// elements: [Re Im Re Im Re Im....]
// - stages : Number of FFT stages. Must be at least 3 and at most
// 10, since the table WebRtcSpl_kSinTable1024[] is 1024
@ -908,7 +938,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// WebRtcSpl_AddSatW32(...)
//
// Returns the result of a saturated 16-bit, respectively 32-bit, addition of
// the numbers specified by the |var1| and |var2| parameters.
// the numbers specified by the `var1` and `var2` parameters.
//
// Input:
// - var1 : Input variable 1
@ -922,7 +952,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// WebRtcSpl_SubSatW32(...)
//
// Returns the result of a saturated 16-bit, respectively 32-bit, subtraction
// of the numbers specified by the |var1| and |var2| parameters.
// of the numbers specified by the `var1` and `var2` parameters.
//
// Input:
// - var1 : Input variable 1
@ -935,61 +965,61 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// WebRtcSpl_GetSizeInBits(...)
//
// Returns the # of bits that are needed at the most to represent the number
// specified by the |value| parameter.
// specified by the `value` parameter.
//
// Input:
// - value : Input value
//
// Return value : Number of bits needed to represent |value|
// Return value : Number of bits needed to represent `value`
//
//
// WebRtcSpl_NormW32(...)
//
// Norm returns the # of left shifts required to 32-bit normalize the 32-bit
// signed number specified by the |value| parameter.
// signed number specified by the `value` parameter.
//
// Input:
// - value : Input value
//
// Return value : Number of bit shifts needed to 32-bit normalize |value|
// Return value : Number of bit shifts needed to 32-bit normalize `value`
//
//
// WebRtcSpl_NormW16(...)
//
// Norm returns the # of left shifts required to 16-bit normalize the 16-bit
// signed number specified by the |value| parameter.
// signed number specified by the `value` parameter.
//
// Input:
// - value : Input value
//
// Return value : Number of bit shifts needed to 32-bit normalize |value|
// Return value : Number of bit shifts needed to 32-bit normalize `value`
//
//
// WebRtcSpl_NormU32(...)
//
// Norm returns the # of left shifts required to 32-bit normalize the unsigned
// 32-bit number specified by the |value| parameter.
// 32-bit number specified by the `value` parameter.
//
// Input:
// - value : Input value
//
// Return value : Number of bit shifts needed to 32-bit normalize |value|
// Return value : Number of bit shifts needed to 32-bit normalize `value`
//
//
// WebRtcSpl_GetScalingSquare(...)
//
// Returns the # of bits required to scale the samples specified in the
// |in_vector| parameter so that, if the squares of the samples are added the
// # of times specified by the |times| parameter, the 32-bit addition will not
// `in_vector` parameter so that, if the squares of the samples are added the
// # of times specified by the `times` parameter, the 32-bit addition will not
// overflow (result in int32_t).
//
// Input:
// - in_vector : Input vector to check scaling on
// - in_vector_length : Samples in |in_vector|
// - in_vector_length : Samples in `in_vector`
// - times : Number of additions to be performed
//
// Return value : Number of right bit shifts needed to avoid
@ -999,8 +1029,8 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_MemSetW16(...)
//
// Sets all the values in the int16_t vector |vector| of length
// |vector_length| to the specified value |set_value|
// Sets all the values in the int16_t vector `vector` of length
// `vector_length` to the specified value `set_value`
//
// Input:
// - vector : Pointer to the int16_t vector
@ -1011,8 +1041,8 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_MemSetW32(...)
//
// Sets all the values in the int32_t vector |vector| of length
// |vector_length| to the specified value |set_value|
// Sets all the values in the int32_t vector `vector` of length
// `vector_length` to the specified value `set_value`
//
// Input:
// - vector : Pointer to the int16_t vector
@ -1023,34 +1053,34 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_MemCpyReversedOrder(...)
//
// Copies all the values from the source int16_t vector |in_vector| to a
// destination int16_t vector |out_vector|. It is done in reversed order,
// meaning that the first sample of |in_vector| is copied to the last sample of
// the |out_vector|. The procedure continues until the last sample of
// |in_vector| has been copied to the first sample of |out_vector|. This
// Copies all the values from the source int16_t vector `in_vector` to a
// destination int16_t vector `out_vector`. It is done in reversed order,
// meaning that the first sample of `in_vector` is copied to the last sample of
// the `out_vector`. The procedure continues until the last sample of
// `in_vector` has been copied to the first sample of `out_vector`. This
// creates a reversed vector. Used in e.g. prediction in iLBC.
//
// Input:
// - in_vector : Pointer to the first sample in a int16_t vector
// of length |length|
// of length `length`
// - vector_length : Number of elements to copy
//
// Output:
// - out_vector : Pointer to the last sample in a int16_t vector
// of length |length|
// of length `length`
//
//
// WebRtcSpl_CopyFromEndW16(...)
//
// Copies the rightmost |samples| of |in_vector| (of length |in_vector_length|)
// to the vector |out_vector|.
// Copies the rightmost `samples` of `in_vector` (of length `in_vector_length`)
// to the vector `out_vector`.
//
// Input:
// - in_vector : Input vector
// - in_vector_length : Number of samples in |in_vector|
// - in_vector_length : Number of samples in `in_vector`
// - samples : Number of samples to extract (from right side)
// from |in_vector|
// from `in_vector`
//
// Output:
// - out_vector : Vector with the requested samples
@ -1085,7 +1115,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// Output:
// - out_vector : Pointer to the result vector (can be the same as
// |in_vector|)
// `in_vector`)
//
//
@ -1103,7 +1133,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// Output:
// - out_vector : Pointer to the result vector (can be the same as
// |in_vector|)
// `in_vector`)
//
//
@ -1115,11 +1145,11 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// Input:
// - in_vector : Input vector
// - gain : Scaling gain
// - vector_length : Elements in the |in_vector|
// - vector_length : Elements in the `in_vector`
// - right_shifts : Number of right bit shifts applied
//
// Output:
// - out_vector : Output vector (can be the same as |in_vector|)
// - out_vector : Output vector (can be the same as `in_vector`)
//
//
@ -1131,11 +1161,11 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// Input:
// - in_vector : Input vector
// - gain : Scaling gain
// - vector_length : Elements in the |in_vector|
// - vector_length : Elements in the `in_vector`
// - right_shifts : Number of right bit shifts applied
//
// Output:
// - out_vector : Output vector (can be the same as |in_vector|)
// - out_vector : Output vector (can be the same as `in_vector`)
//
//
@ -1170,10 +1200,10 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// should be set to the last value in the vector
// - right_shifts : Number of right bit shift to be applied after the
// multiplication
// - vector_length : Number of elements in |in_vector|
// - vector_length : Number of elements in `in_vector`
//
// Output:
// - out_vector : Output vector (can be same as |in_vector|)
// - out_vector : Output vector (can be same as `in_vector`)
//
//
@ -1187,10 +1217,10 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - window : Window vector.
// - right_shifts : Number of right bit shift to be applied after the
// multiplication
// - vector_length : Number of elements in |in_vector|
// - vector_length : Number of elements in `in_vector`
//
// Output:
// - out_vector : Output vector (can be same as |in_vector|)
// - out_vector : Output vector (can be same as `in_vector`)
//
//
@ -1204,16 +1234,16 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - in_vector2 : Input vector 2
// - right_shifts : Number of right bit shift to be applied after the
// multiplication
// - vector_length : Number of elements in |in_vector1| and |in_vector2|
// - vector_length : Number of elements in `in_vector1` and `in_vector2`
//
// Output:
// - out_vector : Output vector (can be same as |in_vector1|)
// - out_vector : Output vector (can be same as `in_vector1`)
//
//
// WebRtcSpl_AddAffineVectorToVector(...)
//
// Adds an affine transformed vector to another vector |out_vector|, i.e,
// Adds an affine transformed vector to another vector `out_vector`, i.e,
// performs
// out_vector[k] += (in_vector[k]*gain+add_constant)>>right_shifts
//
@ -1223,7 +1253,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - add_constant : Constant value to add (usually 1<<(right_shifts-1),
// but others can be used as well
// - right_shifts : Number of right bit shifts (0-16)
// - vector_length : Number of samples in |in_vector| and |out_vector|
// - vector_length : Number of samples in `in_vector` and `out_vector`
//
// Output:
// - out_vector : Vector with the output
@ -1241,7 +1271,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - add_constant : Constant value to add (usually 1<<(right_shifts-1),
// but others can be used as well
// - right_shifts : Number of right bit shifts (0-16)
// - vector_length : Number of samples in |in_vector| and |out_vector|
// - vector_length : Number of samples in `in_vector` and `out_vector`
//
// Output:
// - out_vector : Vector with the output
@ -1304,15 +1334,15 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - vector : Vector with the uniform values
// - seed : Updated seed value
//
// Return value : Number of samples in vector, i.e., |vector_length|
// Return value : Number of samples in vector, i.e., `vector_length`
//
//
// WebRtcSpl_Sqrt(...)
//
// Returns the square root of the input value |value|. The precision of this
// Returns the square root of the input value `value`. The precision of this
// function is integer precision, i.e., sqrt(8) gives 2 as answer.
// If |value| is a negative number then 0 is returned.
// If `value` is a negative number then 0 is returned.
//
// Algorithm:
//
@ -1332,9 +1362,9 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_DivU32U16(...)
//
// Divides a uint32_t |num| by a uint16_t |den|.
// Divides a uint32_t `num` by a uint16_t `den`.
//
// If |den|==0, (uint32_t)0xFFFFFFFF is returned.
// If `den`==0, (uint32_t)0xFFFFFFFF is returned.
//
// Input:
// - num : Numerator
@ -1347,9 +1377,9 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_DivW32W16(...)
//
// Divides a int32_t |num| by a int16_t |den|.
// Divides a int32_t `num` by a int16_t `den`.
//
// If |den|==0, (int32_t)0x7FFFFFFF is returned.
// If `den`==0, (int32_t)0x7FFFFFFF is returned.
//
// Input:
// - num : Numerator
@ -1362,10 +1392,10 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_DivW32W16ResW16(...)
//
// Divides a int32_t |num| by a int16_t |den|, assuming that the
// Divides a int32_t `num` by a int16_t `den`, assuming that the
// result is less than 32768, otherwise an unpredictable result will occur.
//
// If |den|==0, (int16_t)0x7FFF is returned.
// If `den`==0, (int16_t)0x7FFF is returned.
//
// Input:
// - num : Numerator
@ -1378,7 +1408,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_DivResultInQ31(...)
//
// Divides a int32_t |num| by a int16_t |den|, assuming that the
// Divides a int32_t `num` by a int16_t `den`, assuming that the
// absolute value of the denominator is larger than the numerator, otherwise
// an unpredictable result will occur.
//
@ -1392,7 +1422,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// WebRtcSpl_DivW32HiLow(...)
//
// Divides a int32_t |num| by a denominator in hi, low format. The
// Divides a int32_t `num` by a denominator in hi, low format. The
// absolute value of the denominator has to be larger (or equal to) the
// numerator.
//
@ -1417,7 +1447,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - scale_factor : Number of left bit shifts needed to get the physical
// energy value, i.e, to get the Q0 value
//
// Return value : Energy value in Q(-|scale_factor|)
// Return value : Energy value in Q(-`scale_factor`)
//
//
@ -1428,15 +1458,15 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// Input:
// - ar_coef : AR-coefficient vector (values in Q12),
// ar_coef[0] must be 4096.
// - ar_coef_length : Number of coefficients in |ar_coef|.
// - ar_coef_length : Number of coefficients in `ar_coef`.
// - in_vector : Vector to be filtered.
// - in_vector_length : Number of samples in |in_vector|.
// - in_vector_length : Number of samples in `in_vector`.
// - filter_state : Current state (higher part) of the filter.
// - filter_state_length : Length (in samples) of |filter_state|.
// - filter_state_length : Length (in samples) of `filter_state`.
// - filter_state_low : Current state (lower part) of the filter.
// - filter_state_low_length : Length (in samples) of |filter_state_low|.
// - filter_state_low_length : Length (in samples) of `filter_state_low`.
// - out_vector_low_length : Maximum length (in samples) of
// |out_vector_low|.
// `out_vector_low`.
//
// Output:
// - filter_state : Updated state (upper part) vector.
@ -1446,7 +1476,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - out_vector_low : Vector containing the lower part of the
// filtered values.
//
// Return value : Number of samples in the |out_vector|.
// Return value : Number of samples in the `out_vector`.
//
//
@ -1454,11 +1484,11 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// Complex Inverse FFT
//
// Computes an inverse complex 2^|stages|-point FFT on the input vector, which
// Computes an inverse complex 2^`stages`-point FFT on the input vector, which
// is in bit-reversed order. The original content of the vector is destroyed in
// the process, since the input is overwritten by the output, normal-ordered,
// FFT vector. With X as the input complex vector, y as the output complex
// vector and with M = 2^|stages|, the following is computed:
// vector and with M = 2^`stages`, the following is computed:
//
// M-1
// y(k) = sum[X(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
@ -1468,8 +1498,8 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// decimation-in-time algorithm with radix-2 butterfly technique.
//
// Input:
// - vector : In pointer to complex vector containing 2^|stages|
// real elements interleaved with 2^|stages| imaginary
// - vector : In pointer to complex vector containing 2^`stages`
// real elements interleaved with 2^`stages` imaginary
// elements.
// [ReImReImReIm....]
// The elements are in Q(-scale) domain, see more on Return
@ -1488,10 +1518,10 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - vector : Out pointer to the FFT vector (the same as input).
//
// Return Value : The scale value that tells the number of left bit shifts
// that the elements in the |vector| should be shifted with
// that the elements in the `vector` should be shifted with
// in order to get Q0 values, i.e. the physically correct
// values. The scale parameter is always 0 or positive,
// except if N>1024 (|stages|>10), which returns a scale
// except if N>1024 (`stages`>10), which returns a scale
// value of -1, indicating error.
//
@ -1500,11 +1530,11 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
//
// Complex FFT
//
// Computes a complex 2^|stages|-point FFT on the input vector, which is in
// Computes a complex 2^`stages`-point FFT on the input vector, which is in
// bit-reversed order. The original content of the vector is destroyed in
// the process, since the input is overwritten by the output, normal-ordered,
// FFT vector. With x as the input complex vector, Y as the output complex
// vector and with M = 2^|stages|, the following is computed:
// vector and with M = 2^`stages`, the following is computed:
//
// M-1
// Y(k) = 1/M * sum[x(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
@ -1519,8 +1549,8 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// accuracy.
//
// Input:
// - vector : In pointer to complex vector containing 2^|stages| real
// elements interleaved with 2^|stages| imaginary elements.
// - vector : In pointer to complex vector containing 2^`stages` real
// elements interleaved with 2^`stages` imaginary elements.
// [ReImReImReIm....]
// The output is in the Q0 domain.
//

View File

@ -14,6 +14,8 @@
#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
#include <stdint.h>
#include "rtc_base/compile_assert_c.h"
extern const int8_t kWebRtcSpl_CountLeadingZeros32_Table[64];

View File

@ -15,6 +15,8 @@
#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
#include <stdint.h>
/* TODO(kma): Replace some assembly code with GCC intrinsics
* (e.g. __builtin_clz).
*/

View File

@ -25,6 +25,7 @@
*/
#include <stdlib.h>
#include <limits.h>
#include "rtc_base/checks.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
@ -67,7 +68,8 @@ int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length) {
RTC_DCHECK_GT(length, 0);
for (i = 0; i < length; i++) {
absolute = abs((int)vector[i]);
absolute =
(vector[i] != INT_MIN) ? abs((int)vector[i]) : INT_MAX + (uint32_t)1;
if (absolute > maximum) {
maximum = absolute;
}
@ -155,6 +157,15 @@ size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length) {
return index;
}
int16_t WebRtcSpl_MaxAbsElementW16(const int16_t* vector, size_t length) {
int16_t min_val, max_val;
WebRtcSpl_MinMaxW16(vector, length, &min_val, &max_val);
if (min_val == max_val || min_val < -max_val) {
return min_val;
}
return max_val;
}
// Index of maximum value in a word16 vector.
size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length) {
size_t i = 0, index = 0;
@ -222,3 +233,26 @@ size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length) {
return index;
}
// Finds both the minimum and maximum elements in an array of 16-bit integers.
void WebRtcSpl_MinMaxW16(const int16_t* vector, size_t length,
int16_t* min_val, int16_t* max_val) {
#if defined(WEBRTC_HAS_NEON)
return WebRtcSpl_MinMaxW16Neon(vector, length, min_val, max_val);
#else
int16_t minimum = WEBRTC_SPL_WORD16_MAX;
int16_t maximum = WEBRTC_SPL_WORD16_MIN;
size_t i = 0;
RTC_DCHECK_GT(length, 0);
for (i = 0; i < length; i++) {
if (vector[i] < minimum)
minimum = vector[i];
if (vector[i] > maximum)
maximum = vector[i];
}
*min_val = minimum;
*max_val = maximum;
#endif
}

View File

@ -281,3 +281,53 @@ int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length) {
return minimum;
}
// Finds both the minimum and maximum elements in an array of 16-bit integers.
void WebRtcSpl_MinMaxW16Neon(const int16_t* vector, size_t length,
int16_t* min_val, int16_t* max_val) {
int16_t minimum = WEBRTC_SPL_WORD16_MAX;
int16_t maximum = WEBRTC_SPL_WORD16_MIN;
size_t i = 0;
size_t residual = length & 0x7;
RTC_DCHECK_GT(length, 0);
const int16_t* p_start = vector;
int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX);
int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN);
// First part, unroll the loop 8 times.
for (i = 0; i < length - residual; i += 8) {
int16x8_t in16x8 = vld1q_s16(p_start);
min16x8 = vminq_s16(min16x8, in16x8);
max16x8 = vmaxq_s16(max16x8, in16x8);
p_start += 8;
}
#if defined(WEBRTC_ARCH_ARM64)
minimum = vminvq_s16(min16x8);
maximum = vmaxvq_s16(max16x8);
#else
int16x4_t min16x4 = vmin_s16(vget_low_s16(min16x8), vget_high_s16(min16x8));
min16x4 = vpmin_s16(min16x4, min16x4);
min16x4 = vpmin_s16(min16x4, min16x4);
minimum = vget_lane_s16(min16x4, 0);
int16x4_t max16x4 = vmax_s16(vget_low_s16(max16x8), vget_high_s16(max16x8));
max16x4 = vpmax_s16(max16x4, max16x4);
max16x4 = vpmax_s16(max16x4, max16x4);
maximum = vget_lane_s16(max16x4, 0);
#endif
// Second part, do the remaining iterations (if any).
for (i = residual; i > 0; i--) {
if (*p_start < minimum)
minimum = *p_start;
if (*p_start > maximum)
maximum = *p_start;
p_start++;
}
*min_val = minimum;
*max_val = maximum;
}

View File

@ -41,35 +41,39 @@ static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010};
//
// Output:
// - out_data : Output data sequence (Q10), length equal to
// |data_length|
// `data_length`
//
void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length,
int32_t* out_data, const uint16_t* filter_coefficients,
static void WebRtcSpl_AllPassQMF(int32_t* in_data,
size_t data_length,
int32_t* out_data,
const uint16_t* filter_coefficients,
int32_t* filter_state)
{
// The procedure is to filter the input with three first order all pass filters
// (cascade operations).
// The procedure is to filter the input with three first order all pass
// filters (cascade operations).
//
// a_3 + q^-1 a_2 + q^-1 a_1 + q^-1
// y[n] = ----------- ----------- ----------- x[n]
// 1 + a_3q^-1 1 + a_2q^-1 1 + a_1q^-1
//
// The input vector |filter_coefficients| includes these three filter coefficients.
// The filter state contains the in_data state, in_data[-1], followed by
// the out_data state, out_data[-1]. This is repeated for each cascade.
// The first cascade filter will filter the |in_data| and store the output in
// |out_data|. The second will the take the |out_data| as input and make an
// intermediate storage in |in_data|, to save memory. The third, and final, cascade
// filter operation takes the |in_data| (which is the output from the previous cascade
// filter) and store the output in |out_data|.
// Note that the input vector values are changed during the process.
// The input vector `filter_coefficients` includes these three filter
// coefficients. The filter state contains the in_data state, in_data[-1],
// followed by the out_data state, out_data[-1]. This is repeated for each
// cascade. The first cascade filter will filter the `in_data` and store
// the output in `out_data`. The second will the take the `out_data` as
// input and make an intermediate storage in `in_data`, to save memory. The
// third, and final, cascade filter operation takes the `in_data` (which is
// the output from the previous cascade filter) and store the output in
// `out_data`. Note that the input vector values are changed during the
// process.
size_t k;
int32_t diff;
// First all-pass cascade; filter from in_data to out_data.
// Let y_i[n] indicate the output of cascade filter i (with filter coefficient a_i) at
// vector position n. Then the final output will be y[n] = y_3[n]
// Let y_i[n] indicate the output of cascade filter i (with filter
// coefficient a_i) at vector position n. Then the final output will be
// y[n] = y_3[n]
// First loop, use the states stored in memory.
// "diff" should be safe from wrap around since max values are 2^25

View File

@ -23,12 +23,12 @@ SmoothingFilterImpl::SmoothingFilterImpl(int init_time_ms)
: init_time_ms_(init_time_ms),
// Duing the initalization time, we use an increasing alpha. Specifically,
// alpha(n) = exp(-powf(init_factor_, n)),
// where |init_factor_| is chosen such that
// where `init_factor_` is chosen such that
// alpha(init_time_ms_) = exp(-1.0f / init_time_ms_),
init_factor_(init_time_ms_ == 0
? 0.0f
: powf(init_time_ms_, -1.0f / init_time_ms_)),
// |init_const_| is to a factor to help the calculation during
// `init_const_` is to a factor to help the calculation during
// initialization phase.
init_const_(init_time_ms_ == 0
? 0.0f
@ -57,7 +57,7 @@ void SmoothingFilterImpl::AddSample(float sample) {
absl::optional<float> SmoothingFilterImpl::GetAverage() {
if (!init_end_time_ms_) {
// |init_end_time_ms_| undefined since we have not received any sample.
// `init_end_time_ms_` undefined since we have not received any sample.
return absl::nullopt;
}
ExtrapolateLastSample(rtc::TimeMillis());
@ -84,17 +84,17 @@ void SmoothingFilterImpl::ExtrapolateLastSample(int64_t time_ms) {
if (time_ms <= *init_end_time_ms_) {
// Current update is to be made during initialization phase.
// We update the state as if the |alpha| has been increased according
// We update the state as if the `alpha` has been increased according
// alpha(n) = exp(-powf(init_factor_, n)),
// where n is the time (in millisecond) since the first sample received.
// With algebraic derivation as shown in the Appendix, we can find that the
// state can be updated in a similar manner as if alpha is a constant,
// except for a different multiplier.
if (init_time_ms_ == 0) {
// This means |init_factor_| = 0.
// This means `init_factor_` = 0.
multiplier = 0.0f;
} else if (init_time_ms_ == 1) {
// This means |init_factor_| = 1.
// This means `init_factor_` = 1.
multiplier = std::exp(last_state_time_ms_ - time_ms);
} else {
multiplier = std::exp(

View File

@ -33,13 +33,13 @@ class SmoothingFilter {
// assumed to equal the last received sample.
class SmoothingFilterImpl final : public SmoothingFilter {
public:
// |init_time_ms| is initialization time. It defines a period starting from
// `init_time_ms` is initialization time. It defines a period starting from
// the arriving time of the first sample. During this period, the exponential
// filter uses a varying time constant so that a smaller time constant will be
// applied to the earlier samples. This is to allow the the filter to adapt to
// earlier samples quickly. After the initialization period, the time constant
// will be set to |init_time_ms| first and can be changed through
// |SetTimeConstantMs|.
// will be set to `init_time_ms` first and can be changed through
// `SetTimeConstantMs`.
explicit SmoothingFilterImpl(int init_time_ms);
SmoothingFilterImpl() = delete;

View File

@ -286,11 +286,11 @@ Appendix :
w[] and ip[] are compatible with all routines.
*/
#include "common_audio/third_party/ooura/fft_size_256/fft4g.h"
#include <math.h>
#include <stddef.h>
#include "common_audio/third_party/ooura/fft_size_256/fft4g.h"
namespace webrtc {
namespace {

View File

@ -11,6 +11,8 @@
#ifndef COMMON_AUDIO_THIRD_PARTY_OOURA_FFT_SIZE_256_FFT4G_H_
#define COMMON_AUDIO_THIRD_PARTY_OOURA_FFT_SIZE_256_FFT4G_H_
#include <stddef.h>
namespace webrtc {
// Refer to fft4g.c for documentation.

View File

@ -13,9 +13,9 @@
//
// WebRtcSpl_SqrtFloor(...)
//
// Returns the square root of the input value |value|. The precision of this
// Returns the square root of the input value `value`. The precision of this
// function is rounding down integer precision, i.e., sqrt(8) gives 2 as answer.
// If |value| is a negative number then 0 is returned.
// If `value` is a negative number then 0 is returned.
//
// Algorithm:
//

View File

@ -54,7 +54,7 @@ int WebRtcVad_Init(VadInst* handle);
// has not been initialized).
int WebRtcVad_set_mode(VadInst* handle, int mode);
// Calculates a VAD decision for the |audio_frame|. For valid sampling rates
// Calculates a VAD decision for the `audio_frame`. For valid sampling rates
// frame lengths, see the description of WebRtcVad_ValidRatesAndFrameLengths().
//
// - handle [i/o] : VAD Instance. Needs to be initialized by
@ -71,7 +71,7 @@ int WebRtcVad_Process(VadInst* handle,
const int16_t* audio_frame,
size_t frame_length);
// Checks for valid combinations of |rate| and |frame_length|. We support 10,
// Checks for valid combinations of `rate` and `frame_length`. We support 10,
// 20 and 30 ms frames and the rates 8000, 16000 and 32000 Hz.
//
// - rate [i] : Sampling frequency (Hz).

View File

@ -38,7 +38,7 @@ class VadImpl final : public Vad {
case 1:
return kActive;
default:
RTC_NOTREACHED() << "WebRtcVad_Process returned an error.";
RTC_DCHECK_NOTREACHED() << "WebRtcVad_Process returned an error.";
return kError;
}
}

View File

@ -90,11 +90,11 @@ static const int16_t kOverHangMax2VAG[3] = { 9, 5, 3 };
static const int16_t kLocalThresholdVAG[3] = { 94, 94, 94 };
static const int16_t kGlobalThresholdVAG[3] = { 1100, 1050, 1100 };
// Calculates the weighted average w.r.t. number of Gaussians. The |data| are
// updated with an |offset| before averaging.
// Calculates the weighted average w.r.t. number of Gaussians. The `data` are
// updated with an `offset` before averaging.
//
// - data [i/o] : Data to average.
// - offset [i] : An offset added to |data|.
// - offset [i] : An offset added to `data`.
// - weights [i] : Weights used for averaging.
//
// returns : The weighted average.
@ -124,7 +124,7 @@ static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
// type of signal is most probable.
//
// - self [i/o] : Pointer to VAD instance
// - features [i] : Feature vector of length |kNumChannels|
// - features [i] : Feature vector of length `kNumChannels`
// = log10(energy in frequency band)
// - total_power [i] : Total power in audio frame.
// - frame_length [i] : Number of input samples
@ -183,10 +183,10 @@ static int16_t GmmProbability(VadInstT* self, int16_t* features,
// H1: Speech
//
// We combine a global LRT with local tests, for each frequency sub-band,
// here defined as |channel|.
// here defined as `channel`.
for (channel = 0; channel < kNumChannels; channel++) {
// For each channel we model the probability with a GMM consisting of
// |kNumGaussians|, with different means and standard deviations depending
// `kNumGaussians`, with different means and standard deviations depending
// on H0 or H1.
h0_test = 0;
h1_test = 0;
@ -234,7 +234,7 @@ static int16_t GmmProbability(VadInstT* self, int16_t* features,
}
log_likelihood_ratio = shifts_h0 - shifts_h1;
// Update |sum_log_likelihood_ratios| with spectrum weighting. This is
// Update `sum_log_likelihood_ratios` with spectrum weighting. This is
// used for the global VAD decision.
sum_log_likelihood_ratios +=
(int32_t) (log_likelihood_ratio * kSpectrumWeight[channel]);
@ -298,8 +298,8 @@ static int16_t GmmProbability(VadInstT* self, int16_t* features,
nmk2 = nmk;
if (!vadflag) {
// deltaN = (x-mu)/sigma^2
// ngprvec[k] = |noise_probability[k]| /
// (|noise_probability[0]| + |noise_probability[1]|)
// ngprvec[k] = `noise_probability[k]` /
// (`noise_probability[0]` + `noise_probability[1]`)
// (Q14 * Q11 >> 11) = Q14.
delt = (int16_t)((ngprvec[gaussian] * deltaN[gaussian]) >> 11);
@ -326,9 +326,9 @@ static int16_t GmmProbability(VadInstT* self, int16_t* features,
if (vadflag) {
// Update speech mean vector:
// |deltaS| = (x-mu)/sigma^2
// sgprvec[k] = |speech_probability[k]| /
// (|speech_probability[0]| + |speech_probability[1]|)
// `deltaS` = (x-mu)/sigma^2
// sgprvec[k] = `speech_probability[k]` /
// (`speech_probability[0]` + `speech_probability[1]`)
// (Q14 * Q11) >> 11 = Q14.
delt = (int16_t)((sgprvec[gaussian] * deltaS[gaussian]) >> 11);
@ -409,35 +409,35 @@ static int16_t GmmProbability(VadInstT* self, int16_t* features,
}
// Separate models if they are too close.
// |noise_global_mean| in Q14 (= Q7 * Q7).
// `noise_global_mean` in Q14 (= Q7 * Q7).
noise_global_mean = WeightedAverage(&self->noise_means[channel], 0,
&kNoiseDataWeights[channel]);
// |speech_global_mean| in Q14 (= Q7 * Q7).
// `speech_global_mean` in Q14 (= Q7 * Q7).
speech_global_mean = WeightedAverage(&self->speech_means[channel], 0,
&kSpeechDataWeights[channel]);
// |diff| = "global" speech mean - "global" noise mean.
// `diff` = "global" speech mean - "global" noise mean.
// (Q14 >> 9) - (Q14 >> 9) = Q5.
diff = (int16_t) (speech_global_mean >> 9) -
(int16_t) (noise_global_mean >> 9);
if (diff < kMinimumDifference[channel]) {
tmp_s16 = kMinimumDifference[channel] - diff;
// |tmp1_s16| = ~0.8 * (kMinimumDifference - diff) in Q7.
// |tmp2_s16| = ~0.2 * (kMinimumDifference - diff) in Q7.
// `tmp1_s16` = ~0.8 * (kMinimumDifference - diff) in Q7.
// `tmp2_s16` = ~0.2 * (kMinimumDifference - diff) in Q7.
tmp1_s16 = (int16_t)((13 * tmp_s16) >> 2);
tmp2_s16 = (int16_t)((3 * tmp_s16) >> 2);
// Move Gaussian means for speech model by |tmp1_s16| and update
// |speech_global_mean|. Note that |self->speech_means[channel]| is
// Move Gaussian means for speech model by `tmp1_s16` and update
// `speech_global_mean`. Note that `self->speech_means[channel]` is
// changed after the call.
speech_global_mean = WeightedAverage(&self->speech_means[channel],
tmp1_s16,
&kSpeechDataWeights[channel]);
// Move Gaussian means for noise model by -|tmp2_s16| and update
// |noise_global_mean|. Note that |self->noise_means[channel]| is
// Move Gaussian means for noise model by -`tmp2_s16` and update
// `noise_global_mean`. Note that `self->noise_means[channel]` is
// changed after the call.
noise_global_mean = WeightedAverage(&self->noise_means[channel],
-tmp2_s16,
@ -534,7 +534,7 @@ int WebRtcVad_InitCore(VadInstT* self) {
self->mean_value[i] = 1600;
}
// Set aggressiveness mode to default (=|kDefaultMode|).
// Set aggressiveness mode to default (=`kDefaultMode`).
if (WebRtcVad_set_mode_core(self, kDefaultMode) != 0) {
return -1;
}
@ -609,7 +609,7 @@ int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
int vad;
size_t i;
int16_t speech_nb[240]; // 30 ms in 8 kHz.
// |tmp_mem| is a temporary memory used by resample function, length is
// `tmp_mem` is a temporary memory used by resample function, length is
// frame length in 10 ms (480 samples) + 256 extra.
int32_t tmp_mem[480 + 256] = { 0 };
const size_t kFrameLen10ms48khz = 480;

View File

@ -17,10 +17,19 @@
#include "common_audio/signal_processing/include/signal_processing_library.h"
enum { kNumChannels = 6 }; // Number of frequency bands (named channels).
enum { kNumGaussians = 2 }; // Number of Gaussians per channel in the GMM.
enum { kTableSize = kNumChannels * kNumGaussians };
enum { kMinEnergy = 10 }; // Minimum energy required to trigger audio signal.
// TODO(https://bugs.webrtc.org/14476): When converted to C++, remove the macro.
#if defined(__cplusplus)
#define CONSTEXPR_INT(x) constexpr int x
#else
#define CONSTEXPR_INT(x) enum { x }
#endif
CONSTEXPR_INT(kNumChannels = 6); // Number of frequency bands (named channels).
CONSTEXPR_INT(
kNumGaussians = 2); // Number of Gaussians per channel in the GMM.
CONSTEXPR_INT(kTableSize = kNumChannels * kNumGaussians);
CONSTEXPR_INT(
kMinEnergy = 10); // Minimum energy required to trigger audio signal.
typedef struct VadInstT_ {
int vad;
@ -30,14 +39,14 @@ typedef struct VadInstT_ {
int16_t speech_means[kTableSize];
int16_t noise_stds[kTableSize];
int16_t speech_stds[kTableSize];
// TODO(bjornv): Change to |frame_count|.
// TODO(bjornv): Change to `frame_count`.
int32_t frame_counter;
int16_t over_hang; // Over Hang
int16_t num_of_speech;
// TODO(bjornv): Change to |age_vector|.
// TODO(bjornv): Change to `age_vector`.
int16_t index_vector[16 * kNumChannels];
int16_t low_value_vector[16 * kNumChannels];
// TODO(bjornv): Change to |median|.
// TODO(bjornv): Change to `median`.
int16_t mean_value[kNumChannels];
int16_t upper_state[5];
int16_t lower_state[5];
@ -51,7 +60,7 @@ typedef struct VadInstT_ {
} VadInstT;
// Initializes the core VAD component. The default aggressiveness mode is
// controlled by |kDefaultMode| in vad_core.c.
// controlled by `kDefaultMode` in vad_core.c.
//
// - self [i/o] : Instance that should be initialized
//

View File

@ -28,7 +28,7 @@ static const int16_t kAllPassCoefsQ15[2] = { 20972, 5571 };
// Adjustment for division with two in SplitFilter.
static const int16_t kOffsetVector[6] = { 368, 368, 272, 176, 176, 176 };
// High pass filtering, with a cut-off frequency at 80 Hz, if the |data_in| is
// High pass filtering, with a cut-off frequency at 80 Hz, if the `data_in` is
// sampled at 500 Hz.
//
// - data_in [i] : Input audio data sampled at 500 Hz.
@ -69,9 +69,9 @@ static void HighPassFilter(const int16_t* data_in, size_t data_length,
}
}
// All pass filtering of |data_in|, used before splitting the signal into two
// All pass filtering of `data_in`, used before splitting the signal into two
// frequency bands (low pass vs high pass).
// Note that |data_in| and |data_out| can NOT correspond to the same address.
// Note that `data_in` and `data_out` can NOT correspond to the same address.
//
// - data_in [i] : Input audio signal given in Q0.
// - data_length [i] : Length of input and output data.
@ -104,17 +104,17 @@ static void AllPassFilter(const int16_t* data_in, size_t data_length,
*filter_state = (int16_t) (state32 >> 16); // Q(-1)
}
// Splits |data_in| into |hp_data_out| and |lp_data_out| corresponding to
// Splits `data_in` into `hp_data_out` and `lp_data_out` corresponding to
// an upper (high pass) part and a lower (low pass) part respectively.
//
// - data_in [i] : Input audio data to be split into two frequency bands.
// - data_length [i] : Length of |data_in|.
// - data_length [i] : Length of `data_in`.
// - upper_state [i/o] : State of the upper filter, given in Q(-1).
// - lower_state [i/o] : State of the lower filter, given in Q(-1).
// - hp_data_out [o] : Output audio data of the upper half of the spectrum.
// The length is |data_length| / 2.
// The length is `data_length` / 2.
// - lp_data_out [o] : Output audio data of the lower half of the spectrum.
// The length is |data_length| / 2.
// The length is `data_length` / 2.
static void SplitFilter(const int16_t* data_in, size_t data_length,
int16_t* upper_state, int16_t* lower_state,
int16_t* hp_data_out, int16_t* lp_data_out) {
@ -138,23 +138,23 @@ static void SplitFilter(const int16_t* data_in, size_t data_length,
}
}
// Calculates the energy of |data_in| in dB, and also updates an overall
// |total_energy| if necessary.
// Calculates the energy of `data_in` in dB, and also updates an overall
// `total_energy` if necessary.
//
// - data_in [i] : Input audio data for energy calculation.
// - data_length [i] : Length of input data.
// - offset [i] : Offset value added to |log_energy|.
// - offset [i] : Offset value added to `log_energy`.
// - total_energy [i/o] : An external energy updated with the energy of
// |data_in|.
// NOTE: |total_energy| is only updated if
// |total_energy| <= |kMinEnergy|.
// - log_energy [o] : 10 * log10("energy of |data_in|") given in Q4.
// `data_in`.
// NOTE: `total_energy` is only updated if
// `total_energy` <= `kMinEnergy`.
// - log_energy [o] : 10 * log10("energy of `data_in`") given in Q4.
static void LogOfEnergy(const int16_t* data_in, size_t data_length,
int16_t offset, int16_t* total_energy,
int16_t* log_energy) {
// |tot_rshifts| accumulates the number of right shifts performed on |energy|.
// `tot_rshifts` accumulates the number of right shifts performed on `energy`.
int tot_rshifts = 0;
// The |energy| will be normalized to 15 bits. We use unsigned integer because
// The `energy` will be normalized to 15 bits. We use unsigned integer because
// we eventually will mask out the fractional part.
uint32_t energy = 0;
@ -169,14 +169,14 @@ static void LogOfEnergy(const int16_t* data_in, size_t data_length,
// zeros of an unsigned 32 bit value.
int normalizing_rshifts = 17 - WebRtcSpl_NormU32(energy);
// In a 15 bit representation the leading bit is 2^14. log2(2^14) in Q10 is
// (14 << 10), which is what we initialize |log2_energy| with. For a more
// (14 << 10), which is what we initialize `log2_energy` with. For a more
// detailed derivations, see below.
int16_t log2_energy = kLogEnergyIntPart;
tot_rshifts += normalizing_rshifts;
// Normalize |energy| to 15 bits.
// |tot_rshifts| is now the total number of right shifts performed on
// |energy| after normalization. This means that |energy| is in
// Normalize `energy` to 15 bits.
// `tot_rshifts` is now the total number of right shifts performed on
// `energy` after normalization. This means that `energy` is in
// Q(-tot_rshifts).
if (normalizing_rshifts < 0) {
energy <<= -normalizing_rshifts;
@ -184,30 +184,30 @@ static void LogOfEnergy(const int16_t* data_in, size_t data_length,
energy >>= normalizing_rshifts;
}
// Calculate the energy of |data_in| in dB, in Q4.
// Calculate the energy of `data_in` in dB, in Q4.
//
// 10 * log10("true energy") in Q4 = 2^4 * 10 * log10("true energy") =
// 160 * log10(|energy| * 2^|tot_rshifts|) =
// 160 * log10(2) * log2(|energy| * 2^|tot_rshifts|) =
// 160 * log10(2) * (log2(|energy|) + log2(2^|tot_rshifts|)) =
// (160 * log10(2)) * (log2(|energy|) + |tot_rshifts|) =
// |kLogConst| * (|log2_energy| + |tot_rshifts|)
// 160 * log10(`energy` * 2^`tot_rshifts`) =
// 160 * log10(2) * log2(`energy` * 2^`tot_rshifts`) =
// 160 * log10(2) * (log2(`energy`) + log2(2^`tot_rshifts`)) =
// (160 * log10(2)) * (log2(`energy`) + `tot_rshifts`) =
// `kLogConst` * (`log2_energy` + `tot_rshifts`)
//
// We know by construction that |energy| is normalized to 15 bits. Hence,
// |energy| = 2^14 + frac_Q15, where frac_Q15 is a fractional part in Q15.
// Further, we'd like |log2_energy| in Q10
// log2(|energy|) in Q10 = 2^10 * log2(2^14 + frac_Q15) =
// We know by construction that `energy` is normalized to 15 bits. Hence,
// `energy` = 2^14 + frac_Q15, where frac_Q15 is a fractional part in Q15.
// Further, we'd like `log2_energy` in Q10
// log2(`energy`) in Q10 = 2^10 * log2(2^14 + frac_Q15) =
// 2^10 * log2(2^14 * (1 + frac_Q15 * 2^-14)) =
// 2^10 * (14 + log2(1 + frac_Q15 * 2^-14)) ~=
// (14 << 10) + 2^10 * (frac_Q15 * 2^-14) =
// (14 << 10) + (frac_Q15 * 2^-4) = (14 << 10) + (frac_Q15 >> 4)
//
// Note that frac_Q15 = (|energy| & 0x00003FFF)
// Note that frac_Q15 = (`energy` & 0x00003FFF)
// Calculate and add the fractional part to |log2_energy|.
// Calculate and add the fractional part to `log2_energy`.
log2_energy += (int16_t) ((energy & 0x00003FFF) >> 4);
// |kLogConst| is in Q9, |log2_energy| in Q10 and |tot_rshifts| in Q0.
// `kLogConst` is in Q9, `log2_energy` in Q10 and `tot_rshifts` in Q0.
// Note that we in our derivation above have accounted for an output in Q4.
*log_energy = (int16_t)(((kLogConst * log2_energy) >> 19) +
((tot_rshifts * kLogConst) >> 9));
@ -222,19 +222,19 @@ static void LogOfEnergy(const int16_t* data_in, size_t data_length,
*log_energy += offset;
// Update the approximate |total_energy| with the energy of |data_in|, if
// |total_energy| has not exceeded |kMinEnergy|. |total_energy| is used as an
// Update the approximate `total_energy` with the energy of `data_in`, if
// `total_energy` has not exceeded `kMinEnergy`. `total_energy` is used as an
// energy indicator in WebRtcVad_GmmProbability() in vad_core.c.
if (*total_energy <= kMinEnergy) {
if (tot_rshifts >= 0) {
// We know by construction that the |energy| > |kMinEnergy| in Q0, so add
// an arbitrary value such that |total_energy| exceeds |kMinEnergy|.
// We know by construction that the `energy` > `kMinEnergy` in Q0, so add
// an arbitrary value such that `total_energy` exceeds `kMinEnergy`.
*total_energy += kMinEnergy + 1;
} else {
// By construction |energy| is represented by 15 bits, hence any number of
// right shifted |energy| will fit in an int16_t. In addition, adding the
// value to |total_energy| is wrap around safe as long as
// |kMinEnergy| < 8192.
// By construction `energy` is represented by 15 bits, hence any number of
// right shifted `energy` will fit in an int16_t. In addition, adding the
// value to `total_energy` is wrap around safe as long as
// `kMinEnergy` < 8192.
*total_energy += (int16_t) (energy >> -tot_rshifts); // Q0.
}
}
@ -243,14 +243,14 @@ static void LogOfEnergy(const int16_t* data_in, size_t data_length,
int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
size_t data_length, int16_t* features) {
int16_t total_energy = 0;
// We expect |data_length| to be 80, 160 or 240 samples, which corresponds to
// We expect `data_length` to be 80, 160 or 240 samples, which corresponds to
// 10, 20 or 30 ms in 8 kHz. Therefore, the intermediate downsampled data will
// have at most 120 samples after the first split and at most 60 samples after
// the second split.
int16_t hp_120[120], lp_120[120];
int16_t hp_60[60], lp_60[60];
const size_t half_data_length = data_length >> 1;
size_t length = half_data_length; // |data_length| / 2, corresponds to
size_t length = half_data_length; // `data_length` / 2, corresponds to
// bandwidth = 2000 Hz after downsampling.
// Initialize variables for the first SplitFilter().
@ -260,7 +260,7 @@ int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
int16_t* lp_out_ptr = lp_120; // [0 - 2000] Hz.
RTC_DCHECK_LE(data_length, 240);
RTC_DCHECK_LT(4, kNumChannels - 1); // Checking maximum |frequency_band|.
RTC_DCHECK_LT(4, kNumChannels - 1); // Checking maximum `frequency_band`.
// Split at 2000 Hz and downsample.
SplitFilter(in_ptr, data_length, &self->upper_state[frequency_band],
@ -275,7 +275,7 @@ int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
&self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
// Energy in 3000 Hz - 4000 Hz.
length >>= 1; // |data_length| / 4 <=> bandwidth = 1000 Hz.
length >>= 1; // `data_length` / 4 <=> bandwidth = 1000 Hz.
LogOfEnergy(hp_60, length, kOffsetVector[5], &total_energy, &features[5]);
@ -287,12 +287,12 @@ int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
in_ptr = lp_120; // [0 - 2000] Hz.
hp_out_ptr = hp_60; // [1000 - 2000] Hz.
lp_out_ptr = lp_60; // [0 - 1000] Hz.
length = half_data_length; // |data_length| / 2 <=> bandwidth = 2000 Hz.
length = half_data_length; // `data_length` / 2 <=> bandwidth = 2000 Hz.
SplitFilter(in_ptr, length, &self->upper_state[frequency_band],
&self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
// Energy in 1000 Hz - 2000 Hz.
length >>= 1; // |data_length| / 4 <=> bandwidth = 1000 Hz.
length >>= 1; // `data_length` / 4 <=> bandwidth = 1000 Hz.
LogOfEnergy(hp_60, length, kOffsetVector[3], &total_energy, &features[3]);
// For the lower band (0 Hz - 1000 Hz) split at 500 Hz and downsample.
@ -304,7 +304,7 @@ int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
&self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
// Energy in 500 Hz - 1000 Hz.
length >>= 1; // |data_length| / 8 <=> bandwidth = 500 Hz.
length >>= 1; // `data_length` / 8 <=> bandwidth = 500 Hz.
LogOfEnergy(hp_120, length, kOffsetVector[2], &total_energy, &features[2]);
// For the lower band (0 Hz - 500 Hz) split at 250 Hz and downsample.
@ -316,7 +316,7 @@ int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
&self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
// Energy in 250 Hz - 500 Hz.
length >>= 1; // |data_length| / 16 <=> bandwidth = 250 Hz.
length >>= 1; // `data_length` / 16 <=> bandwidth = 250 Hz.
LogOfEnergy(hp_60, length, kOffsetVector[1], &total_energy, &features[1]);
// Remove 0 Hz - 80 Hz, by high pass filtering the lower band.

View File

@ -17,8 +17,8 @@
#include "common_audio/vad/vad_core.h"
// Takes |data_length| samples of |data_in| and calculates the logarithm of the
// energy of each of the |kNumChannels| = 6 frequency bands used by the VAD:
// Takes `data_length` samples of `data_in` and calculates the logarithm of the
// energy of each of the `kNumChannels` = 6 frequency bands used by the VAD:
// 80 Hz - 250 Hz
// 250 Hz - 500 Hz
// 500 Hz - 1000 Hz
@ -26,10 +26,10 @@
// 2000 Hz - 3000 Hz
// 3000 Hz - 4000 Hz
//
// The values are given in Q4 and written to |features|. Further, an approximate
// The values are given in Q4 and written to `features`. Further, an approximate
// overall energy is returned. The return value is used in
// WebRtcVad_GmmProbability() as a signal indicator, hence it is arbitrary above
// the threshold |kMinEnergy|.
// the threshold `kMinEnergy`.
//
// - self [i/o] : State information of the VAD.
// - data_in [i] : Input audio data, for feature extraction.

View File

@ -15,16 +15,16 @@
static const int32_t kCompVar = 22005;
static const int16_t kLog2Exp = 5909; // log2(exp(1)) in Q12.
// For a normal distribution, the probability of |input| is calculated and
// For a normal distribution, the probability of `input` is calculated and
// returned (in Q20). The formula for normal distributed probability is
//
// 1 / s * exp(-(x - m)^2 / (2 * s^2))
//
// where the parameters are given in the following Q domains:
// m = |mean| (Q7)
// s = |std| (Q7)
// x = |input| (Q4)
// in addition to the probability we output |delta| (in Q11) used when updating
// m = `mean` (Q7)
// s = `std` (Q7)
// x = `input` (Q4)
// in addition to the probability we output `delta` (in Q11) used when updating
// the noise/speech model.
int32_t WebRtcVad_GaussianProbability(int16_t input,
int16_t mean,
@ -33,13 +33,13 @@ int32_t WebRtcVad_GaussianProbability(int16_t input,
int16_t tmp16, inv_std, inv_std2, exp_value = 0;
int32_t tmp32;
// Calculate |inv_std| = 1 / s, in Q10.
// 131072 = 1 in Q17, and (|std| >> 1) is for rounding instead of truncation.
// Calculate `inv_std` = 1 / s, in Q10.
// 131072 = 1 in Q17, and (`std` >> 1) is for rounding instead of truncation.
// Q-domain: Q17 / Q7 = Q10.
tmp32 = (int32_t) 131072 + (int32_t) (std >> 1);
inv_std = (int16_t) WebRtcSpl_DivW32W16(tmp32, std);
// Calculate |inv_std2| = 1 / s^2, in Q14.
// Calculate `inv_std2` = 1 / s^2, in Q14.
tmp16 = (inv_std >> 2); // Q10 -> Q8.
// Q-domain: (Q8 * Q8) >> 2 = Q14.
inv_std2 = (int16_t)((tmp16 * tmp16) >> 2);
@ -51,20 +51,20 @@ int32_t WebRtcVad_GaussianProbability(int16_t input,
tmp16 = tmp16 - mean; // Q7 - Q7 = Q7
// To be used later, when updating noise/speech model.
// |delta| = (x - m) / s^2, in Q11.
// `delta` = (x - m) / s^2, in Q11.
// Q-domain: (Q14 * Q7) >> 10 = Q11.
*delta = (int16_t)((inv_std2 * tmp16) >> 10);
// Calculate the exponent |tmp32| = (x - m)^2 / (2 * s^2), in Q10. Replacing
// Calculate the exponent `tmp32` = (x - m)^2 / (2 * s^2), in Q10. Replacing
// division by two with one shift.
// Q-domain: (Q11 * Q7) >> 8 = Q10.
tmp32 = (*delta * tmp16) >> 9;
// If the exponent is small enough to give a non-zero probability we calculate
// |exp_value| ~= exp(-(x - m)^2 / (2 * s^2))
// ~= exp2(-log2(exp(1)) * |tmp32|).
// `exp_value` ~= exp(-(x - m)^2 / (2 * s^2))
// ~= exp2(-log2(exp(1)) * `tmp32`).
if (tmp32 < kCompVar) {
// Calculate |tmp16| = log2(exp(1)) * |tmp32|, in Q10.
// Calculate `tmp16` = log2(exp(1)) * `tmp32`, in Q10.
// Q-domain: (Q12 * Q10) >> 12 = Q10.
tmp16 = (int16_t)((kLog2Exp * tmp32) >> 12);
tmp16 = -tmp16;
@ -72,7 +72,7 @@ int32_t WebRtcVad_GaussianProbability(int16_t input,
tmp16 ^= 0xFFFF;
tmp16 >>= 10;
tmp16 += 1;
// Get |exp_value| = exp(-|tmp32|) in Q10.
// Get `exp_value` = exp(-`tmp32`) in Q10.
exp_value >>= tmp16;
}

View File

@ -15,8 +15,8 @@
#include <stdint.h>
// Calculates the probability for |input|, given that |input| comes from a
// normal distribution with mean and standard deviation (|mean|, |std|).
// Calculates the probability for `input`, given that `input` comes from a
// normal distribution with mean and standard deviation (`mean`, `std`).
//
// Inputs:
// - input : input sample in Q4.
@ -26,11 +26,11 @@
// Output:
//
// - delta : input used when updating the model, Q11.
// |delta| = (|input| - |mean|) / |std|^2.
// `delta` = (`input` - `mean`) / `std`^2.
//
// Return:
// (probability for |input|) =
// 1 / |std| * exp(-(|input| - |mean|)^2 / (2 * |std|^2));
// (probability for `input`) =
// 1 / `std` * exp(-(`input` - `mean`)^2 / (2 * `std`^2));
int32_t WebRtcVad_GaussianProbability(int16_t input,
int16_t mean,
int16_t std,

View File

@ -52,7 +52,7 @@ void WebRtcVad_Downsampling(const int16_t* signal_in,
filter_state[1] = tmp32_2;
}
// Inserts |feature_value| into |low_value_vector|, if it is one of the 16
// Inserts `feature_value` into `low_value_vector`, if it is one of the 16
// smallest values the last 100 frames. Then calculates and returns the median
// of the five smallest values.
int16_t WebRtcVad_FindMinimum(VadInstT* self,
@ -66,13 +66,13 @@ int16_t WebRtcVad_FindMinimum(VadInstT* self,
int16_t alpha = 0;
int32_t tmp32 = 0;
// Pointer to memory for the 16 minimum values and the age of each value of
// the |channel|.
// the `channel`.
int16_t* age = &self->index_vector[offset];
int16_t* smallest_values = &self->low_value_vector[offset];
RTC_DCHECK_LT(channel, kNumChannels);
// Each value in |smallest_values| is getting 1 loop older. Update |age|, and
// Each value in `smallest_values` is getting 1 loop older. Update `age`, and
// remove old values.
for (i = 0; i < 16; i++) {
if (age[i] != 100) {
@ -88,9 +88,9 @@ int16_t WebRtcVad_FindMinimum(VadInstT* self,
}
}
// Check if |feature_value| is smaller than any of the values in
// |smallest_values|. If so, find the |position| where to insert the new value
// (|feature_value|).
// Check if `feature_value` is smaller than any of the values in
// `smallest_values`. If so, find the `position` where to insert the new value
// (`feature_value`).
if (feature_value < smallest_values[7]) {
if (feature_value < smallest_values[3]) {
if (feature_value < smallest_values[1]) {
@ -152,7 +152,7 @@ int16_t WebRtcVad_FindMinimum(VadInstT* self,
age[position] = 1;
}
// Get |current_median|.
// Get `current_median`.
if (self->frame_counter > 2) {
current_median = smallest_values[2];
} else if (self->frame_counter > 0) {

View File

@ -23,11 +23,11 @@
//
// Input & Output:
// - filter_state : Current filter states of the two all-pass filters. The
// |filter_state| is updated after all samples have been
// `filter_state` is updated after all samples have been
// processed.
//
// Output:
// - signal_out : Downsampled signal (of length |in_length| / 2).
// - signal_out : Downsampled signal (of length `in_length` / 2).
void WebRtcVad_Downsampling(const int16_t* signal_in,
int16_t* signal_out,
int32_t* filter_state,
@ -35,7 +35,7 @@ void WebRtcVad_Downsampling(const int16_t* signal_in,
// Updates and returns the smoothed feature minimum. As minimum we use the
// median of the five smallest feature values in a 100 frames long window.
// As long as |handle->frame_counter| is zero, that is, we haven't received any
// As long as `handle->frame_counter` is zero, that is, we haven't received any
// "valid" data, FindMinimum() outputs the default value of 1600.
//
// Inputs:

View File

@ -21,7 +21,7 @@ static const int kValidRates[] = { 8000, 16000, 32000, 48000 };
static const size_t kRatesSize = sizeof(kValidRates) / sizeof(*kValidRates);
static const int kMaxFrameLengthMs = 30;
VadInst* WebRtcVad_Create() {
VadInst* WebRtcVad_Create(void) {
VadInstT* self = (VadInstT*)malloc(sizeof(VadInstT));
self->init_flag = 0;

View File

@ -65,7 +65,7 @@ constexpr size_t kMaxChunksize = 4096;
} // namespace
WavReader::WavReader(const std::string& filename)
WavReader::WavReader(absl::string_view filename)
: WavReader(FileWrapper::OpenReadOnly(filename)) {}
WavReader::WavReader(FileWrapper file) : file_(std::move(file)) {
@ -178,7 +178,7 @@ void WavReader::Close() {
file_.Close();
}
WavWriter::WavWriter(const std::string& filename,
WavWriter::WavWriter(absl::string_view filename,
int sample_rate,
size_t num_channels,
SampleFormat sample_format)

View File

@ -39,7 +39,7 @@ class WavFile {
class WavWriter final : public WavFile {
public:
// Opens a new WAV file for writing.
WavWriter(const std::string& filename,
WavWriter(absl::string_view filename,
int sample_rate,
size_t num_channels,
SampleFormat sample_format = SampleFormat::kInt16);
@ -77,7 +77,7 @@ class WavWriter final : public WavFile {
class WavReader final : public WavFile {
public:
// Opens an existing WAV file for reading.
explicit WavReader(const std::string& filename);
explicit WavReader(absl::string_view filename);
explicit WavReader(FileWrapper file);
// Close the WAV file.

View File

@ -80,8 +80,6 @@ const uint32_t kFmtIeeeFloatSubchunkSize =
// read audio samples.
#pragma pack(2)
struct WavHeaderPcm {
WavHeaderPcm(const WavHeaderPcm&) = default;
WavHeaderPcm& operator=(const WavHeaderPcm&) = default;
RiffHeader riff;
FmtPcmSubchunk fmt;
struct {
@ -95,8 +93,6 @@ static_assert(sizeof(WavHeaderPcm) == kPcmWavHeaderSize,
// WAV implementation.
#pragma pack(2)
struct WavHeaderIeeeFloat {
WavHeaderIeeeFloat(const WavHeaderIeeeFloat&) = default;
WavHeaderIeeeFloat& operator=(const WavHeaderIeeeFloat&) = default;
RiffHeader riff;
FmtIeeeFloatSubchunk fmt;
struct {
@ -132,7 +128,7 @@ uint16_t MapWavFormatToHeaderField(WavFormat format) {
case WavFormat::kWavFormatMuLaw:
return 7;
}
RTC_CHECK(false);
RTC_CHECK_NOTREACHED();
}
WavFormat MapHeaderFieldToWavFormat(uint16_t format_header_value) {
@ -161,7 +157,7 @@ uint16_t BlockAlign(size_t num_channels, size_t bytes_per_sample) {
return static_cast<uint16_t>(num_channels * bytes_per_sample);
}
// Finds a chunk having the sought ID. If found, then |readable| points to the
// Finds a chunk having the sought ID. If found, then `readable` points to the
// first byte of the sought chunk data. If not found, the end of the file is
// reached.
bool FindWaveChunk(ChunkHeader* chunk_header,
@ -278,10 +274,8 @@ size_t GetFormatBytesPerSample(WavFormat format) {
return 1;
case WavFormat::kWavFormatIeeeFloat:
return 4;
default:
RTC_CHECK(false);
return 2;
}
RTC_CHECK_NOTREACHED();
}
bool CheckWavParameters(size_t num_channels,

View File

@ -13,6 +13,7 @@
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include "rtc_base/checks.h"

View File

@ -0,0 +1,297 @@
// This file was automatically generated. Do not edit.
#ifndef GEN_REGISTERED_FIELD_TRIALS_H_
#define GEN_REGISTERED_FIELD_TRIALS_H_
#include "absl/strings/string_view.h"
namespace webrtc {
inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"UseTwccPlrForAna",
"WebRTC-AddNetworkCostToVpn",
"WebRTC-AddPacingToCongestionWindowPushback",
"WebRTC-AdjustOpusBandwidth",
"WebRTC-Aec3AecStateFullResetKillSwitch",
"WebRTC-Aec3AecStateSubtractorAnalyzerResetKillSwitch",
"WebRTC-Aec3AntiHowlingMinimizationKillSwitch",
"WebRTC-Aec3ClampInstQualityToOneKillSwitch",
"WebRTC-Aec3ClampInstQualityToZeroKillSwitch",
"WebRTC-Aec3CoarseFilterResetHangoverKillSwitch",
"WebRTC-Aec3ConservativeTailFreqResponse",
"WebRTC-Aec3DeactivateInitialStateResetKillSwitch",
"WebRTC-Aec3DelayEstimateSmoothingDelayFoundOverride",
"WebRTC-Aec3DelayEstimateSmoothingOverride",
"WebRTC-Aec3DelayEstimatorDetectPreEcho",
"WebRTC-Aec3EchoSaturationDetectionKillSwitch",
"WebRTC-Aec3EnforceCaptureDelayEstimationDownmixing",
"WebRTC-Aec3EnforceCaptureDelayEstimationLeftRightPrioritization",
"WebRTC-Aec3EnforceConservativeHfSuppression",
"WebRTC-Aec3EnforceLowActiveRenderLimit",
"WebRTC-Aec3EnforceMoreTransparentNearendSuppressorHfTuning",
"WebRTC-Aec3EnforceMoreTransparentNearendSuppressorTuning",
"WebRTC-Aec3EnforceMoreTransparentNormalSuppressorHfTuning",
"WebRTC-Aec3EnforceMoreTransparentNormalSuppressorTuning",
"WebRTC-Aec3EnforceRapidlyAdjustingNearendSuppressorTunings",
"WebRTC-Aec3EnforceRapidlyAdjustingNormalSuppressorTunings",
"WebRTC-Aec3EnforceRenderDelayEstimationDownmixing",
"WebRTC-Aec3EnforceSlowlyAdjustingNearendSuppressorTunings",
"WebRTC-Aec3EnforceSlowlyAdjustingNormalSuppressorTunings",
"WebRTC-Aec3EnforceStationarityProperties",
"WebRTC-Aec3EnforceStationarityPropertiesAtInit",
"WebRTC-Aec3EnforceVeryLowActiveRenderLimit",
"WebRTC-Aec3HighPassFilterEchoReference",
"WebRTC-Aec3MinErleDuringOnsetsKillSwitch",
"WebRTC-Aec3NonlinearModeReverbKillSwitch",
"WebRTC-Aec3OnsetDetectionKillSwitch",
"WebRTC-Aec3PenalyzeHighDelaysInitialPhase",
"WebRTC-Aec3PreEchoConfiguration",
"WebRTC-Aec3RenderDelayEstimationLeftRightPrioritizationKillSwitch",
"WebRTC-Aec3SensitiveDominantNearendActivation",
"WebRTC-Aec3SetupSpecificDefaultConfigDefaultsKillSwitch",
"WebRTC-Aec3ShortHeadroomKillSwitch",
"WebRTC-Aec3StereoContentDetectionKillSwitch",
"WebRTC-Aec3SuppressorAntiHowlingGainOverride",
"WebRTC-Aec3SuppressorDominantNearendEnrExitThresholdOverride",
"WebRTC-Aec3SuppressorDominantNearendEnrThresholdOverride",
"WebRTC-Aec3SuppressorDominantNearendHoldDurationOverride",
"WebRTC-Aec3SuppressorDominantNearendSnrThresholdOverride",
"WebRTC-Aec3SuppressorDominantNearendTriggerThresholdOverride",
"WebRTC-Aec3SuppressorNearendHfMaskSuppressOverride",
"WebRTC-Aec3SuppressorNearendHfMaskTransparentOverride",
"WebRTC-Aec3SuppressorNearendLfMaskSuppressOverride",
"WebRTC-Aec3SuppressorNearendLfMaskTransparentOverride",
"WebRTC-Aec3SuppressorNearendMaxDecFactorLfOverride",
"WebRTC-Aec3SuppressorNearendMaxIncFactorOverride",
"WebRTC-Aec3SuppressorNormalHfMaskSuppressOverride",
"WebRTC-Aec3SuppressorNormalHfMaskTransparentOverride",
"WebRTC-Aec3SuppressorNormalLfMaskSuppressOverride",
"WebRTC-Aec3SuppressorNormalLfMaskTransparentOverride",
"WebRTC-Aec3SuppressorNormalMaxDecFactorLfOverride",
"WebRTC-Aec3SuppressorNormalMaxIncFactorOverride",
"WebRTC-Aec3SuppressorTuningOverride",
"WebRTC-Aec3TransparentAntiHowlingGain",
"WebRTC-Aec3TransparentModeHmm",
"WebRTC-Aec3TransparentModeKillSwitch",
"WebRTC-Aec3Use1Dot2SecondsInitialStateDuration",
"WebRTC-Aec3Use1Dot6SecondsInitialStateDuration",
"WebRTC-Aec3Use2Dot0SecondsInitialStateDuration",
"WebRTC-Aec3UseDot1SecondsInitialStateDuration",
"WebRTC-Aec3UseDot2SecondsInitialStateDuration",
"WebRTC-Aec3UseDot3SecondsInitialStateDuration",
"WebRTC-Aec3UseDot6SecondsInitialStateDuration",
"WebRTC-Aec3UseDot9SecondsInitialStateDuration",
"WebRTC-Aec3UseErleOnsetCompensationInDominantNearend",
"WebRTC-Aec3UseLowEarlyReflectionsDefaultGain",
"WebRTC-Aec3UseLowLateReflectionsDefaultGain",
"WebRTC-Aec3UseNearendReverbLen",
"WebRTC-Aec3UseShortConfigChangeDuration",
"WebRTC-Aec3UseZeroInitialStateDuration",
"WebRTC-Aec3VerySensitiveDominantNearendActivation",
"WebRTC-Agc2SimdAvx2KillSwitch",
"WebRTC-Agc2SimdNeonKillSwitch",
"WebRTC-Agc2SimdSse2KillSwitch",
"WebRTC-AllowMACBasedIPv6",
"WebRTC-AlrDetectorParameters",
"WebRTC-AndroidNetworkMonitor-IsAdapterAvailable",
"WebRTC-ApmExperimentalMultiChannelCaptureKillSwitch",
"WebRTC-ApmExperimentalMultiChannelRenderKillSwitch",
"WebRTC-Audio-2ndAgcMinMicLevelExperiment",
"WebRTC-Audio-ABWENoTWCC",
"WebRTC-Audio-AdaptivePtime",
"WebRTC-Audio-Allocation",
"WebRTC-Audio-AlrProbing",
"WebRTC-Audio-FecAdaptation",
"WebRTC-Audio-GainController2",
"WebRTC-Audio-LegacyOverhead",
"WebRTC-Audio-MinimizeResamplingOnMobile",
"WebRTC-Audio-NetEqDecisionLogicConfig",
"WebRTC-Audio-NetEqDelayManagerConfig",
"WebRTC-Audio-NetEqNackTrackerConfig",
"WebRTC-Audio-NetEqSmartFlushing",
"WebRTC-Audio-OpusAvoidNoisePumpingDuringDtx",
"WebRTC-Audio-OpusBitrateMultipliers",
"WebRTC-Audio-OpusPlcUsePrevDecodedSamples",
"WebRTC-Audio-OpusSetSignalVoiceWithDtx",
"WebRTC-Audio-Red-For-Opus",
"WebRTC-Audio-StableTargetAdaptation",
"WebRTC-Audio-iOS-Holding",
"WebRTC-AudioDevicePlayoutBufferSizeFactor",
"WebRTC-AutomaticAnimationDetectionScreenshare",
"WebRTC-Av1-GetEncoderInfoOverride",
"WebRTC-Avx2SupportKillSwitch",
"WebRTC-BindUsingInterfaceName",
"WebRTC-BoostedScreenshareQp",
"WebRTC-BurstyPacer",
"WebRTC-Bwe-AllocationProbing",
"WebRTC-Bwe-AlrProbing",
"WebRTC-Bwe-EstimateBoundedIncrease",
"WebRTC-Bwe-ExponentialProbing",
"WebRTC-Bwe-IgnoreProbesLowerThanNetworkStateEstimate",
"WebRTC-Bwe-InitialProbing",
"WebRTC-Bwe-InjectedCongestionController",
"WebRTC-Bwe-LimitProbesLowerThanThroughputEstimate",
"WebRTC-Bwe-LinkCapacity",
"WebRTC-Bwe-LossBasedBweV2",
"WebRTC-Bwe-LossBasedControl",
"WebRTC-Bwe-MaxRttLimit",
"WebRTC-Bwe-MinAllocAsLowerBound",
"WebRTC-Bwe-NetworkRouteConstraints",
"WebRTC-Bwe-NoFeedbackReset",
"WebRTC-Bwe-PaceAtMaxOfBweAndLowerLinkCapacity",
"WebRTC-Bwe-ProbingBehavior",
"WebRTC-Bwe-ProbingConfiguration",
"WebRTC-Bwe-ReceiveTimeFix",
"WebRTC-Bwe-ReceiverLimitCapsOnly",
"WebRTC-Bwe-RobustThroughputEstimatorSettings",
"WebRTC-Bwe-SafeResetOnRouteChange",
"WebRTC-Bwe-SeparateAudioPackets",
"WebRTC-Bwe-SubtractAdditionalBackoffTerm",
"WebRTC-Bwe-TrendlineEstimatorSettings",
"WebRTC-BweBackOffFactor",
"WebRTC-BweLossExperiment",
"WebRTC-BweRapidRecoveryExperiment",
"WebRTC-BweThroughputWindowConfig",
"WebRTC-BweWindowSizeInPackets",
"WebRTC-CongestionWindow",
"WebRTC-CpuLoadEstimator",
"WebRTC-Debugging-RtpDump",
"WebRTC-DecoderDataDumpDirectory",
"WebRTC-DefaultBitrateLimitsKillSwitch",
"WebRTC-DependencyDescriptorAdvertised",
"WebRTC-DisablePacerEmergencyStop",
"WebRTC-DisableRtxRateLimiter",
"WebRTC-DisableUlpFecExperiment",
"WebRTC-DontIncreaseDelayBasedBweInAlr",
"WebRTC-DscpFieldTrial",
"WebRTC-EncoderDataDumpDirectory",
"WebRTC-ExtraICEPing",
"WebRTC-FakeNetworkReceiveConfig",
"WebRTC-FakeNetworkSendConfig",
"WebRTC-FilterAbsSendTimeExtension",
"WebRTC-FindNetworkHandleWithoutIpv6TemporaryPart",
"WebRTC-FlexFEC-03",
"WebRTC-FlexFEC-03-Advertised",
"WebRTC-ForcePlayoutDelay",
"WebRTC-ForceSendPlayoutDelay",
"WebRTC-ForceSimulatedOveruseIntervalMs",
"WebRTC-FrameDropper",
"WebRTC-FullBandHpfKillSwitch",
"WebRTC-GenericCodecDependencyDescriptor",
"WebRTC-GenericDescriptorAdvertised",
"WebRTC-GenericDescriptorAuth",
"WebRTC-GenericPictureId",
"WebRTC-GetEncoderInfoOverride",
"WebRTC-H264HighProfile",
"WebRTC-IPv6Default",
"WebRTC-IPv6NetworkResolutionFixes",
"WebRTC-IceControllerFieldTrials",
"WebRTC-IceFieldTrials",
"WebRTC-IncomingTimestampOnMarkerBitOnly",
"WebRTC-IncreaseIceCandidatePriorityHostSrflx",
"WebRTC-JitterEstimatorConfig",
"WebRTC-KeyframeInterval",
"WebRTC-LegacyFrameIdJumpBehavior",
"WebRTC-LegacySimulcastLayerLimit",
"WebRTC-LegacyTlsProtocols",
"WebRTC-LibaomAv1Encoder-DisableFrameDropping",
"WebRTC-LowresSimulcastBitrateInterpolation",
"WebRTC-MutedStateKillSwitch",
"WebRTC-Network-UseNWPathMonitor",
"WebRTC-NetworkMonitorAutoDetect",
"WebRTC-NormalizeSimulcastResolution",
"WebRTC-Pacer-BlockAudio",
"WebRTC-Pacer-DrainQueue",
"WebRTC-Pacer-FastRetransmissions",
"WebRTC-Pacer-IgnoreTransportOverhead",
"WebRTC-Pacer-KeyframeFlushing",
"WebRTC-Pacer-PadInSilence",
"WebRTC-PacketBufferMaxSize",
"WebRTC-PaddingMode-RecentLargePacket",
"WebRTC-PcFactoryDefaultBitrates",
"WebRTC-PermuteTlsClientHello",
"WebRTC-PiggybackIceCheckAcknowledgement",
"WebRTC-PixelLimitResource",
"WebRTC-PreventSsrcGroupsWithUnexpectedSize",
"WebRTC-ProbingScreenshareBwe",
"WebRTC-ProtectionOverheadRateThreshold",
"WebRTC-QpParsingKillSwitch",
"WebRTC-ReceiveBufferSize",
"WebRTC-RtcEventLogEncodeDependencyDescriptor",
"WebRTC-RtcEventLogEncodeNetEqSetMinimumDelayKillSwitch",
"WebRTC-RtcEventLogKillSwitch",
"WebRTC-RtcEventLogNewFormat",
"WebRTC-RtcpLossNotification",
"WebRTC-RttMult",
"WebRTC-SCM-Timestamp",
"WebRTC-SendBufferSizeBytes",
"WebRTC-SendNackDelayMs",
"WebRTC-SendPacketsOnWorkerThread",
"WebRTC-SetSocketReceiveBuffer",
"WebRTC-SignalNetworkPreferenceChange",
"WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride",
"WebRTC-SimulcastLayerLimitRoundUp",
"WebRTC-SpsPpsIdrIsH264Keyframe",
"WebRTC-StableTargetRate",
"WebRTC-Stats-RtxReceiveStats",
"WebRTC-StrictPacingAndProbing",
"WebRTC-StunInterPacketDelay",
"WebRTC-SurfaceCellularTypes",
"WebRTC-SwitchEncoderOnInitializationFailures",
"WebRTC-Target-Bitrate-Rtcp",
"WebRTC-TaskQueue-ReplaceLibeventWithStdlib",
"WebRTC-TransientSuppressorForcedOff",
"WebRTC-UseBaseHeavyVP8TL3RateAllocation",
"WebRTC-UseDifferentiatedCellularCosts",
"WebRTC-UseShortVP8TL2Pattern",
"WebRTC-UseShortVP8TL3Pattern",
"WebRTC-UseStandardBytesStats",
"WebRTC-UseTurnServerAsStunServer",
"WebRTC-VP8-CpuSpeed-Arm",
"WebRTC-VP8-ForcePartitionResilience",
"WebRTC-VP8-Forced-Fallback-Encoder-v2",
"WebRTC-VP8-GetEncoderInfoOverride",
"WebRTC-VP8-MaxFrameInterval",
"WebRTC-VP8-Postproc-Config",
"WebRTC-VP8-Postproc-Config-Arm",
"WebRTC-VP8ConferenceTemporalLayers",
"WebRTC-VP8IosMaxNumberOfThread",
"WebRTC-VP8VariableFramerateScreenshare",
"WebRTC-VP9-GetEncoderInfoOverride",
"WebRTC-VP9-LowTierOptimizations",
"WebRTC-VP9-PerformanceFlags",
"WebRTC-VP9QualityScaler",
"WebRTC-VP9VariableFramerateScreenshare",
"WebRTC-Video-BalancedDegradation",
"WebRTC-Video-BalancedDegradationSettings",
"WebRTC-Video-BandwidthQualityScalerSettings",
"WebRTC-Video-DisableAutomaticResize",
"WebRTC-Video-DiscardPacketsWithUnknownSsrc",
"WebRTC-Video-EnableRetransmitAllLayers",
"WebRTC-Video-EncoderFallbackSettings",
"WebRTC-Video-ForcedSwDecoderFallback",
"WebRTC-Video-InitialDecoderResolution",
"WebRTC-Video-MinVideoBitrate",
"WebRTC-Video-Pacing",
"WebRTC-Video-PreferTemporalSupportOnBaseLayer",
"WebRTC-Video-QualityRampupSettings",
"WebRTC-Video-QualityScalerSettings",
"WebRTC-Video-QualityScaling",
"WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest",
"WebRTC-Video-UseFrameRateForOverhead",
"WebRTC-Video-VariableStartScaleFactor",
"WebRTC-VideoEncoderSettings",
"WebRTC-VideoFrameTrackingIdAdvertised",
"WebRTC-VideoLayersAllocationAdvertised",
"WebRTC-VideoRateControl",
"WebRTC-VoIPChannelRemixingAdjustmentKillSwitch",
"WebRTC-Vp9ExternalRefCtrl",
"WebRTC-Vp9InterLayerPred",
"WebRTC-Vp9IssueKeyFrameOnLayerDeactivation",
"WebRTC-ZeroHertzScreenshare",
"WebRTC-ZeroPlayoutDelay",
};
} // namespace webrtc
#endif // GEN_REGISTERED_FIELD_TRIALS_H_

View File

@ -21,7 +21,6 @@ group("modules") {
"rtp_rtcp",
"utility",
"video_coding",
"video_processing",
]
if (rtc_desktop_capture_supported) {
@ -36,10 +35,7 @@ rtc_source_set("module_api_public") {
rtc_source_set("module_api") {
visibility = [ "*" ]
sources = [
"include/module.h",
"include/module_common_types.h",
]
sources = [ "include/module_common_types.h" ]
}
rtc_source_set("module_fec_api") {
@ -47,7 +43,7 @@ rtc_source_set("module_fec_api") {
sources = [ "include/module_fec_types.h" ]
}
if (rtc_include_tests) {
if (rtc_include_tests && !build_with_chromium) {
modules_tests_resources = [
"../resources/audio_coding/testfile16kHz.pcm",
"../resources/audio_coding/testfile32kHz.pcm",
@ -82,12 +78,14 @@ if (rtc_include_tests) {
data = modules_tests_resources
if (is_android) {
use_default_launcher = false
deps += [
# NOTE(brandtr): Including Java classes seems only to be possible from
# rtc_test targets. Therefore we include this target here, instead of
# in video_coding_modules_tests, where it is actually used.
"../sdk/android:libjingle_peerconnection_java",
"//testing/android/native_test:native_test_native_code",
"//sdk/android:native_test_jni_onload",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 900
}
@ -147,18 +145,28 @@ if (rtc_include_tests) {
"../resources/audio_processing/transient/wpd7.dat",
"../resources/deflicker_before_cif_short.yuv",
"../resources/far16_stereo.pcm",
"../resources/far176_stereo.pcm",
"../resources/far192_stereo.pcm",
"../resources/far22_stereo.pcm",
"../resources/far32_stereo.pcm",
"../resources/far44_stereo.pcm",
"../resources/far48_stereo.pcm",
"../resources/far88_stereo.pcm",
"../resources/far8_stereo.pcm",
"../resources/far96_stereo.pcm",
"../resources/foremanColorEnhanced_cif_short.yuv",
"../resources/foreman_cif.yuv",
"../resources/foreman_cif_short.yuv",
"../resources/near16_stereo.pcm",
"../resources/near176_stereo.pcm",
"../resources/near192_stereo.pcm",
"../resources/near22_stereo.pcm",
"../resources/near32_stereo.pcm",
"../resources/near44_stereo.pcm",
"../resources/near48_stereo.pcm",
"../resources/near88_stereo.pcm",
"../resources/near8_stereo.pcm",
"../resources/near96_stereo.pcm",
"../resources/ref03.aecdump",
"../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_AST.bin",
"../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_TOF.bin",
@ -219,9 +227,9 @@ if (rtc_include_tests) {
"pacing:pacing_unittests",
"remote_bitrate_estimator:remote_bitrate_estimator_unittests",
"rtp_rtcp:rtp_rtcp_unittests",
"utility:utility_unittests",
"video_coding:video_coding_unittests",
"video_processing:video_processing_unittests",
"video_coding/deprecated:deprecated_unittests",
"video_coding/timing:timing_unittests",
]
if (rtc_desktop_capture_supported) {
@ -231,6 +239,7 @@ if (rtc_include_tests) {
data = modules_unittests_resources
if (is_android) {
use_default_launcher = false
deps += [
"../sdk/android:libjingle_peerconnection_java",
"//testing/android/native_test:native_test_support",

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,8 @@
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTER_FUNCTIONS_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTER_FUNCTIONS_H_
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);

View File

@ -25,8 +25,8 @@
* Post-filtering:
* y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
*
* Note that |lag| is a floating number so we perform an interpolation to
* obtain the correct |lag|.
* Note that `lag` is a floating number so we perform an interpolation to
* obtain the correct `lag`.
*
*/
@ -86,7 +86,7 @@ typedef enum {
* buffer : a buffer where the sum of previous inputs and outputs
* are stored.
* damper_state : the state of the damping filter. The filter is defined by
* |kDampFilter|.
* `kDampFilter`.
* interpol_coeff : pointer to a set of coefficient which are used to utilize
* fractional pitch by interpolation.
* gain : pitch-gain to be applied to the current segment of input.
@ -140,9 +140,9 @@ static void FilterSegment(const double* in_data, PitchFilterParam* parameters,
int j;
double sum;
double sum2;
/* Index of |parameters->buffer| where the output is written to. */
/* Index of `parameters->buffer` where the output is written to. */
int pos = parameters->index + PITCH_BUFFSIZE;
/* Index of |parameters->buffer| where samples are read for fractional-lag
/* Index of `parameters->buffer` where samples are read for fractional-lag
* computation. */
int pos_lag = pos - parameters->lag_offset;
@ -174,9 +174,9 @@ static void FilterSegment(const double* in_data, PitchFilterParam* parameters,
/* Filter for fractional pitch. */
sum2 = 0.0;
for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) {
/* |lag_index + m| is always larger than or equal to zero, see how
/* `lag_index + m` is always larger than or equal to zero, see how
* m_tmp is computed. This is equivalent to assume samples outside
* |out_dg[j]| are zero. */
* `out_dg[j]` are zero. */
sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m];
}
/* Add the contribution of differential gain change. */
@ -353,7 +353,7 @@ static void FilterFrame(const double* in_data, PitchFiltstr* filter_state,
if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
/* Filter the lookahead segment, this is treated as the last sub-frame. So
* set |pf_param| to last sub-frame. */
* set `pf_param` to last sub-frame. */
filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
filter_parameters.num_samples = QLOOKAHEAD;
FilterSegment(in_data, &filter_parameters, out_data, out_dg);

View File

@ -19,15 +19,6 @@ config("apm_debug_dump") {
}
}
rtc_library("config") {
visibility = [ ":*" ]
sources = [
"include/config.cc",
"include/config.h",
]
deps = [ "../../rtc_base/system:rtc_export" ]
}
rtc_library("api") {
visibility = [ "*" ]
sources = [
@ -37,20 +28,23 @@ rtc_library("api") {
deps = [
":audio_frame_view",
":audio_processing_statistics",
":config",
"../../api:array_view",
"../../api:scoped_refptr",
"../../api/audio:aec3_config",
"../../api/audio:audio_frame_api",
"../../api/audio:echo_control",
"../../rtc_base:deprecation",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:macromagic",
"../../rtc_base:refcount",
"../../rtc_base:stringutils",
"../../rtc_base/system:arch",
"../../rtc_base/system:file_wrapper",
"../../rtc_base/system:rtc_export",
"agc:gain_control_interface",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_frame_proxies") {
@ -119,7 +113,41 @@ rtc_source_set("aec_dump_interface") {
deps = [
":api",
":audio_frame_view",
"../../rtc_base:deprecation",
]
absl_deps = [
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("gain_controller2") {
configs += [ ":apm_debug_dump" ]
sources = [
"gain_controller2.cc",
"gain_controller2.h",
]
defines = []
deps = [
":aec_dump_interface",
":api",
":apm_logging",
":audio_buffer",
":audio_frame_view",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../rtc_base:stringutils",
"../../system_wrappers:field_trial",
"agc2:adaptive_digital_gain_controller",
"agc2:common",
"agc2:cpu_features",
"agc2:fixed_digital",
"agc2:gain_applier",
"agc2:input_volume_controller",
"agc2:noise_level_estimator",
"agc2:saturation_protector",
"agc2:speech_level_estimator",
"agc2:vad_wrapper",
]
}
@ -130,28 +158,11 @@ rtc_library("audio_processing") {
"audio_processing_builder_impl.cc",
"audio_processing_impl.cc",
"audio_processing_impl.h",
"common.h",
"echo_control_mobile_impl.cc",
"echo_control_mobile_impl.h",
"echo_detector/circular_buffer.cc",
"echo_detector/circular_buffer.h",
"echo_detector/mean_variance_estimator.cc",
"echo_detector/mean_variance_estimator.h",
"echo_detector/moving_max.cc",
"echo_detector/moving_max.h",
"echo_detector/normalized_covariance_estimator.cc",
"echo_detector/normalized_covariance_estimator.h",
"gain_control_impl.cc",
"gain_control_impl.h",
"gain_controller2.cc",
"gain_controller2.h",
"level_estimator.cc",
"level_estimator.h",
"render_queue_item_verifier.h",
"residual_echo_detector.cc",
"residual_echo_detector.h",
"typing_detection.cc",
"typing_detection.h",
]
defines = []
@ -163,13 +174,13 @@ rtc_library("audio_processing") {
":audio_frame_proxies",
":audio_frame_view",
":audio_processing_statistics",
":config",
":gain_controller2",
":high_pass_filter",
":optionally_built_submodule_creators",
":rms_level",
":voice_detection",
"../../api:array_view",
"../../api:function_view",
"../../api:make_ref_counted",
"../../api/audio:aec3_config",
"../../api/audio:audio_frame_api",
"../../api/audio:echo_control",
@ -177,15 +188,20 @@ rtc_library("audio_processing") {
"../../common_audio:common_audio_c",
"../../common_audio/third_party/ooura:fft_size_256",
"../../rtc_base:checks",
"../../rtc_base:deprecation",
"../../rtc_base:event_tracer",
"../../rtc_base:gtest_prod",
"../../rtc_base:ignore_wundef",
"../../rtc_base:refcount",
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:safe_minmax",
"../../rtc_base:sanitizer",
"../../rtc_base:swap_queue",
"../../rtc_base:timeutils",
"../../rtc_base/experiments:field_trial_parser",
"../../rtc_base/synchronization:mutex",
"../../rtc_base/system:rtc_export",
"../../system_wrappers",
"../../system_wrappers:denormal_disabler",
"../../system_wrappers:field_trial",
"../../system_wrappers:metrics",
"aec3",
@ -194,20 +210,21 @@ rtc_library("audio_processing") {
"agc",
"agc:gain_control_interface",
"agc:legacy_agc",
"agc2:adaptive_digital",
"agc2:fixed_digital",
"agc2:gain_applier",
"agc2:input_volume_stats_reporter",
"capture_levels_adjuster",
"ns",
"transient:transient_suppressor_api",
"vad",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
deps += [
"../../common_audio",
"../../common_audio:fir_filter",
"../../common_audio:fir_filter_factory",
"../../rtc_base:rtc_base_approved",
"../../system_wrappers",
]
@ -218,18 +235,30 @@ rtc_library("audio_processing") {
}
}
rtc_library("voice_detection") {
rtc_library("residual_echo_detector") {
poisonous = [ "default_echo_detector" ]
configs += [ ":apm_debug_dump" ]
sources = [
"voice_detection.cc",
"voice_detection.h",
"echo_detector/circular_buffer.cc",
"echo_detector/circular_buffer.h",
"echo_detector/mean_variance_estimator.cc",
"echo_detector/mean_variance_estimator.h",
"echo_detector/moving_max.cc",
"echo_detector/moving_max.h",
"echo_detector/normalized_covariance_estimator.cc",
"echo_detector/normalized_covariance_estimator.h",
"residual_echo_detector.cc",
"residual_echo_detector.h",
]
deps = [
":api",
":audio_buffer",
"../../api/audio:audio_frame_api",
"../../common_audio:common_audio_c",
":apm_logging",
"../../api:array_view",
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../system_wrappers:metrics",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("optionally_built_submodule_creators") {
@ -289,7 +318,11 @@ rtc_library("apm_logging") {
"../../api:array_view",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:stringutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
defines = []
}
@ -306,8 +339,10 @@ if (rtc_include_tests) {
":audio_processing_statistics",
"../../test:test_support",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
if (!build_with_chromium) {
group("audio_processing_tests") {
testonly = true
deps = [
@ -333,14 +368,15 @@ if (rtc_include_tests) {
sources = [
"audio_buffer_unittest.cc",
"audio_frame_view_unittest.cc",
"config_unittest.cc",
"echo_control_mobile_unittest.cc",
"gain_controller2_unittest.cc",
"splitting_filter_unittest.cc",
"test/echo_canceller3_config_json_unittest.cc",
"test/fake_recording_device_unittest.cc",
]
deps = [
":aec3_config_json",
":analog_mic_simulation",
":api",
":apm_logging",
@ -348,42 +384,58 @@ if (rtc_include_tests) {
":audio_frame_view",
":audio_processing",
":audioproc_test_utils",
":config",
":gain_controller2",
":high_pass_filter",
":mocks",
":voice_detection",
"../../api:array_view",
"../../api:make_ref_counted",
"../../api:scoped_refptr",
"../../api/audio:aec3_config",
"../../api/audio:aec3_factory",
"../../api/audio:echo_detector_creator",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base",
"../../rtc_base:checks",
"../../rtc_base:gtest_prod",
"../../rtc_base:ignore_wundef",
"../../rtc_base:macromagic",
"../../rtc_base:platform_thread",
"../../rtc_base:protobuf_utils",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:random",
"../../rtc_base:rtc_base_tests_utils",
"../../rtc_base:rtc_event",
"../../rtc_base:safe_conversions",
"../../rtc_base:safe_minmax",
"../../rtc_base:stringutils",
"../../rtc_base:swap_queue",
"../../rtc_base:task_queue_for_test",
"../../rtc_base:threading",
"../../rtc_base/synchronization:mutex",
"../../rtc_base/system:arch",
"../../rtc_base/system:file_wrapper",
"../../system_wrappers",
"../../system_wrappers:denormal_disabler",
"../../test:field_trial",
"../../test:fileutils",
"../../test:rtc_expect_death",
"../../test:test_support",
"../audio_coding:neteq_input_audio_tools",
"aec_dump:mock_aec_dump_unittests",
"agc:agc_unittests",
"agc2:adaptive_digital_unittests",
"agc2:adaptive_digital_gain_controller_unittest",
"agc2:biquad_filter_unittests",
"agc2:fixed_digital_unittests",
"agc2:gain_applier_unittest",
"agc2:input_volume_controller_unittests",
"agc2:input_volume_stats_reporter_unittests",
"agc2:noise_estimator_unittests",
"agc2:rnn_vad_with_level_unittests",
"agc2:saturation_protector_unittest",
"agc2:speech_level_estimator_unittest",
"agc2:test_utils",
"agc2:vad_wrapper_unittests",
"agc2/rnn_vad:unittests",
"capture_levels_adjuster",
"capture_levels_adjuster:capture_levels_adjuster_unittests",
"test/conversational_speech:unittest",
"transient:transient_suppression_unittests",
"utility:legacy_delay_estimator_unittest",
@ -391,7 +443,10 @@ if (rtc_include_tests) {
"vad:vad_unittests",
"//testing/gtest",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
defines = []
@ -409,6 +464,7 @@ if (rtc_include_tests) {
":audioproc_test_utils",
":audioproc_unittest_proto",
":optionally_built_submodule_creators",
":residual_echo_detector",
":rms_level",
":runtime_settings_protobuf_utils",
"../../api/audio:audio_frame_api",
@ -430,7 +486,6 @@ if (rtc_include_tests) {
"echo_detector/normalized_covariance_estimator_unittest.cc",
"gain_control_unittest.cc",
"high_pass_filter_unittest.cc",
"level_estimator_unittest.cc",
"residual_echo_detector_unittest.cc",
"rms_level_unittest.cc",
"test/debug_dump_replayer.cc",
@ -441,10 +496,10 @@ if (rtc_include_tests) {
"test/echo_canceller_test_tools_unittest.cc",
"test/echo_control_mock.h",
"test/test_utils.h",
"voice_detection_unittest.cc",
]
}
}
}
rtc_library("audio_processing_perf_tests") {
testonly = true
@ -455,12 +510,18 @@ if (rtc_include_tests) {
":audio_processing",
":audioproc_test_utils",
"../../api:array_view",
"../../api/numerics",
"../../api/test/metrics:global_metrics_logger_and_exporter",
"../../api/test/metrics:metric",
"../../rtc_base:platform_thread",
"../../rtc_base:protobuf_utils",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:random",
"../../rtc_base:rtc_event",
"../../rtc_base:safe_conversions",
"../../system_wrappers",
"../../test:perf_test",
"../../test:test_support",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
rtc_library("analog_mic_simulation") {
@ -473,14 +534,15 @@ if (rtc_include_tests) {
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:logging",
"../../rtc_base:safe_conversions",
"../../rtc_base:safe_minmax",
"agc:gain_map",
"agc2:gain_map",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
if (rtc_enable_protobuf) {
if (rtc_enable_protobuf && !build_with_chromium) {
rtc_library("audioproc_f_impl") {
testonly = true
configs += [ ":apm_debug_dump" ]
@ -498,6 +560,7 @@ if (rtc_include_tests) {
]
deps = [
":aec3_config_json",
":analog_mic_simulation",
":api",
":apm_logging",
@ -506,15 +569,18 @@ if (rtc_include_tests) {
":audioproc_protobuf_utils",
":audioproc_test_utils",
":runtime_settings_protobuf_utils",
"../../api/audio:aec3_config_json",
"../../api/audio:aec3_factory",
"../../api/audio:echo_detector_creator",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../rtc_base:logging",
"../../rtc_base:protobuf_utils",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:rtc_json",
"../../rtc_base:safe_conversions",
"../../rtc_base:stringutils",
"../../rtc_base:task_queue_for_test",
"../../rtc_base:timeutils",
"../../rtc_base/system:file_wrapper",
"../../system_wrappers",
"../../system_wrappers:field_trial",
@ -549,7 +615,6 @@ if (rtc_include_tests) {
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../rtc_base:protobuf_utils",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:arch",
]
}
@ -599,7 +664,7 @@ rtc_library("audioproc_test_utils") {
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:random",
"../../rtc_base/system:arch",
"../../system_wrappers",
"../../test:fileutils",
@ -607,5 +672,26 @@ rtc_library("audioproc_test_utils") {
"../audio_coding:neteq_input_audio_tools",
"//testing/gtest",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("aec3_config_json") {
visibility = [ "*" ]
testonly = true
sources = [
"test/echo_canceller3_config_json.cc",
"test/echo_canceller3_config_json.h",
]
deps = [
"../../api/audio:aec3_config",
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../rtc_base:rtc_json",
"../../rtc_base:stringutils",
"../../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}

Some files were not shown because too many files have changed in this diff Show More