Bump to WebRTC M120 release
Some API deprecation -- ExperimentalAgc and ExperimentalNs are gone. We're continuing to carry iSAC even though it's gone upstream, but maybe we'll want to drop that soon.
This commit is contained in:
@ -13,11 +13,12 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "common_audio/include/audio_util.h"
|
||||
#include "modules/audio_processing/agc/gain_control.h"
|
||||
#include "modules/audio_processing/agc/gain_map_internal.h"
|
||||
#include "modules/audio_processing/agc2/adaptive_mode_level_estimator_agc.h"
|
||||
#include "rtc_base/atomic_ops.h"
|
||||
#include "modules/audio_processing/agc2/gain_map_internal.h"
|
||||
#include "modules/audio_processing/agc2/input_volume_stats_reporter.h"
|
||||
#include "modules/audio_processing/include/audio_frame_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
@ -28,69 +29,65 @@ namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Amount the microphone level is lowered with every clipping event.
|
||||
const int kClippedLevelStep = 15;
|
||||
// Proportion of clipped samples required to declare a clipping event.
|
||||
const float kClippedRatioThreshold = 0.1f;
|
||||
// Time in frames to wait after a clipping event before checking again.
|
||||
const int kClippedWaitFrames = 300;
|
||||
|
||||
// Amount of error we tolerate in the microphone level (presumably due to OS
|
||||
// quantization) before we assume the user has manually adjusted the microphone.
|
||||
const int kLevelQuantizationSlack = 25;
|
||||
constexpr int kLevelQuantizationSlack = 25;
|
||||
|
||||
const int kDefaultCompressionGain = 7;
|
||||
const int kMaxCompressionGain = 12;
|
||||
const int kMinCompressionGain = 2;
|
||||
constexpr int kDefaultCompressionGain = 7;
|
||||
constexpr int kMaxCompressionGain = 12;
|
||||
constexpr int kMinCompressionGain = 2;
|
||||
// Controls the rate of compression changes towards the target.
|
||||
const float kCompressionGainStep = 0.05f;
|
||||
constexpr float kCompressionGainStep = 0.05f;
|
||||
|
||||
const int kMaxMicLevel = 255;
|
||||
constexpr int kMaxMicLevel = 255;
|
||||
static_assert(kGainMapSize > kMaxMicLevel, "gain map too small");
|
||||
const int kMinMicLevel = 12;
|
||||
constexpr int kMinMicLevel = 12;
|
||||
|
||||
// Prevent very large microphone level changes.
|
||||
const int kMaxResidualGainChange = 15;
|
||||
constexpr int kMaxResidualGainChange = 15;
|
||||
|
||||
// Maximum additional gain allowed to compensate for microphone level
|
||||
// restrictions from clipping events.
|
||||
const int kSurplusCompressionGain = 6;
|
||||
constexpr int kSurplusCompressionGain = 6;
|
||||
|
||||
// Returns whether a fall-back solution to choose the maximum level should be
|
||||
// chosen.
|
||||
bool UseMaxAnalogChannelLevel() {
|
||||
return field_trial::IsEnabled("WebRTC-UseMaxAnalogAgcChannelLevel");
|
||||
}
|
||||
// Target speech level (dBFs) and speech probability threshold used to compute
|
||||
// the RMS error override in `GetSpeechLevelErrorDb()`. These are only used for
|
||||
// computing the error override and they are not passed to `agc_`.
|
||||
// TODO(webrtc:7494): Move these to a config and pass in the ctor.
|
||||
constexpr float kOverrideTargetSpeechLevelDbfs = -18.0f;
|
||||
constexpr float kOverrideSpeechProbabilitySilenceThreshold = 0.5f;
|
||||
// The minimum number of frames between `UpdateGain()` calls.
|
||||
// TODO(webrtc:7494): Move this to a config and pass in the ctor with
|
||||
// kOverrideWaitFrames = 100. Default value zero needed for the unit tests.
|
||||
constexpr int kOverrideWaitFrames = 0;
|
||||
|
||||
// Returns kMinMicLevel if no field trial exists or if it has been disabled.
|
||||
// Returns a value between 0 and 255 depending on the field-trial string.
|
||||
// Example: 'WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-80' => returns 80.
|
||||
int GetMinMicLevel() {
|
||||
RTC_LOG(LS_INFO) << "[agc] GetMinMicLevel";
|
||||
using AnalogAgcConfig =
|
||||
AudioProcessing::Config::GainController1::AnalogGainController;
|
||||
|
||||
// If the "WebRTC-Audio-2ndAgcMinMicLevelExperiment" field trial is specified,
|
||||
// parses it and returns a value between 0 and 255 depending on the field-trial
|
||||
// string. Returns an unspecified value if the field trial is not specified, if
|
||||
// disabled or if it cannot be parsed. Example:
|
||||
// 'WebRTC-Audio-2ndAgcMinMicLevelExperiment/Enabled-80' => returns 80.
|
||||
absl::optional<int> GetMinMicLevelOverride() {
|
||||
constexpr char kMinMicLevelFieldTrial[] =
|
||||
"WebRTC-Audio-AgcMinMicLevelExperiment";
|
||||
"WebRTC-Audio-2ndAgcMinMicLevelExperiment";
|
||||
if (!webrtc::field_trial::IsEnabled(kMinMicLevelFieldTrial)) {
|
||||
RTC_LOG(LS_INFO) << "[agc] Using default min mic level: " << kMinMicLevel;
|
||||
return kMinMicLevel;
|
||||
return absl::nullopt;
|
||||
}
|
||||
const auto field_trial_string =
|
||||
webrtc::field_trial::FindFullName(kMinMicLevelFieldTrial);
|
||||
int min_mic_level = -1;
|
||||
sscanf(field_trial_string.c_str(), "Enabled-%d", &min_mic_level);
|
||||
if (min_mic_level >= 0 && min_mic_level <= 255) {
|
||||
RTC_LOG(LS_INFO) << "[agc] Experimental min mic level: " << min_mic_level;
|
||||
return min_mic_level;
|
||||
} else {
|
||||
RTC_LOG(LS_WARNING) << "[agc] Invalid parameter for "
|
||||
<< kMinMicLevelFieldTrial << ", ignored.";
|
||||
return kMinMicLevel;
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
int ClampLevel(int mic_level, int min_mic_level) {
|
||||
return rtc::SafeClamp(mic_level, min_mic_level, kMaxMicLevel);
|
||||
}
|
||||
|
||||
int LevelFromGainError(int gain_error, int level, int min_mic_level) {
|
||||
RTC_DCHECK_GE(level, 0);
|
||||
RTC_DCHECK_LE(level, kMaxMicLevel);
|
||||
@ -124,7 +121,7 @@ float ComputeClippedRatio(const float* const* audio,
|
||||
int num_clipped_in_ch = 0;
|
||||
for (size_t i = 0; i < samples_per_channel; ++i) {
|
||||
RTC_DCHECK(audio[ch]);
|
||||
if (audio[ch][i] >= 32767.f || audio[ch][i] <= -32768.f) {
|
||||
if (audio[ch][i] >= 32767.0f || audio[ch][i] <= -32768.0f) {
|
||||
++num_clipped_in_ch;
|
||||
}
|
||||
}
|
||||
@ -133,29 +130,49 @@ float ComputeClippedRatio(const float* const* audio,
|
||||
return static_cast<float>(num_clipped) / (samples_per_channel);
|
||||
}
|
||||
|
||||
void LogClippingMetrics(int clipping_rate) {
|
||||
RTC_LOG(LS_INFO) << "Input clipping rate: " << clipping_rate << "%";
|
||||
RTC_HISTOGRAM_COUNTS_LINEAR(/*name=*/"WebRTC.Audio.Agc.InputClippingRate",
|
||||
/*sample=*/clipping_rate, /*min=*/0, /*max=*/100,
|
||||
/*bucket_count=*/50);
|
||||
}
|
||||
|
||||
// Computes the speech level error in dB. `speech_level_dbfs` is required to be
|
||||
// in the range [-90.0f, 30.0f] and `speech_probability` in the range
|
||||
// [0.0f, 1.0f].
|
||||
int GetSpeechLevelErrorDb(float speech_level_dbfs, float speech_probability) {
|
||||
constexpr float kMinSpeechLevelDbfs = -90.0f;
|
||||
constexpr float kMaxSpeechLevelDbfs = 30.0f;
|
||||
RTC_DCHECK_GE(speech_level_dbfs, kMinSpeechLevelDbfs);
|
||||
RTC_DCHECK_LE(speech_level_dbfs, kMaxSpeechLevelDbfs);
|
||||
RTC_DCHECK_GE(speech_probability, 0.0f);
|
||||
RTC_DCHECK_LE(speech_probability, 1.0f);
|
||||
|
||||
if (speech_probability < kOverrideSpeechProbabilitySilenceThreshold) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const float speech_level = rtc::SafeClamp<float>(
|
||||
speech_level_dbfs, kMinSpeechLevelDbfs, kMaxSpeechLevelDbfs);
|
||||
|
||||
return std::round(kOverrideTargetSpeechLevelDbfs - speech_level);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
MonoAgc::MonoAgc(ApmDataDumper* data_dumper,
|
||||
int startup_min_level,
|
||||
int clipped_level_min,
|
||||
bool use_agc2_level_estimation,
|
||||
bool disable_digital_adaptive,
|
||||
int min_mic_level)
|
||||
: min_mic_level_(min_mic_level),
|
||||
disable_digital_adaptive_(disable_digital_adaptive),
|
||||
agc_(std::make_unique<Agc>()),
|
||||
max_level_(kMaxMicLevel),
|
||||
max_compression_gain_(kMaxCompressionGain),
|
||||
target_compression_(kDefaultCompressionGain),
|
||||
compression_(target_compression_),
|
||||
compression_accumulator_(compression_),
|
||||
startup_min_level_(ClampLevel(startup_min_level, min_mic_level_)),
|
||||
clipped_level_min_(clipped_level_min) {
|
||||
if (use_agc2_level_estimation) {
|
||||
agc_ = std::make_unique<AdaptiveModeLevelEstimatorAgc>(data_dumper);
|
||||
} else {
|
||||
agc_ = std::make_unique<Agc>();
|
||||
}
|
||||
}
|
||||
clipped_level_min_(clipped_level_min) {}
|
||||
|
||||
MonoAgc::~MonoAgc() = default;
|
||||
|
||||
@ -165,13 +182,14 @@ void MonoAgc::Initialize() {
|
||||
target_compression_ = disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
|
||||
compression_ = disable_digital_adaptive_ ? 0 : target_compression_;
|
||||
compression_accumulator_ = compression_;
|
||||
capture_muted_ = false;
|
||||
capture_output_used_ = true;
|
||||
check_volume_on_next_process_ = true;
|
||||
frames_since_update_gain_ = 0;
|
||||
is_first_frame_ = true;
|
||||
}
|
||||
|
||||
void MonoAgc::Process(const int16_t* audio,
|
||||
size_t samples_per_channel,
|
||||
int sample_rate_hz) {
|
||||
void MonoAgc::Process(rtc::ArrayView<const int16_t> audio,
|
||||
absl::optional<int> rms_error_override) {
|
||||
new_compression_to_set_ = absl::nullopt;
|
||||
|
||||
if (check_volume_on_next_process_) {
|
||||
@ -181,34 +199,60 @@ void MonoAgc::Process(const int16_t* audio,
|
||||
CheckVolumeAndReset();
|
||||
}
|
||||
|
||||
agc_->Process(audio, samples_per_channel, sample_rate_hz);
|
||||
agc_->Process(audio);
|
||||
|
||||
// Always check if `agc_` has a new error available. If yes, `agc_` gets
|
||||
// reset.
|
||||
// TODO(webrtc:7494) Replace the `agc_` call `GetRmsErrorDb()` with `Reset()`
|
||||
// if an error override is used.
|
||||
int rms_error = 0;
|
||||
bool update_gain = agc_->GetRmsErrorDb(&rms_error);
|
||||
if (rms_error_override.has_value()) {
|
||||
if (is_first_frame_ || frames_since_update_gain_ < kOverrideWaitFrames) {
|
||||
update_gain = false;
|
||||
} else {
|
||||
rms_error = *rms_error_override;
|
||||
update_gain = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (update_gain) {
|
||||
UpdateGain(rms_error);
|
||||
}
|
||||
|
||||
UpdateGain();
|
||||
if (!disable_digital_adaptive_) {
|
||||
UpdateCompressor();
|
||||
}
|
||||
|
||||
is_first_frame_ = false;
|
||||
if (frames_since_update_gain_ < kOverrideWaitFrames) {
|
||||
++frames_since_update_gain_;
|
||||
}
|
||||
}
|
||||
|
||||
void MonoAgc::HandleClipping() {
|
||||
void MonoAgc::HandleClipping(int clipped_level_step) {
|
||||
RTC_DCHECK_GT(clipped_level_step, 0);
|
||||
// Always decrease the maximum level, even if the current level is below
|
||||
// threshold.
|
||||
SetMaxLevel(std::max(clipped_level_min_, max_level_ - kClippedLevelStep));
|
||||
SetMaxLevel(std::max(clipped_level_min_, max_level_ - clipped_level_step));
|
||||
if (log_to_histograms_) {
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.AgcClippingAdjustmentAllowed",
|
||||
level_ - kClippedLevelStep >= clipped_level_min_);
|
||||
level_ - clipped_level_step >= clipped_level_min_);
|
||||
}
|
||||
if (level_ > clipped_level_min_) {
|
||||
// Don't try to adjust the level if we're already below the limit. As
|
||||
// a consequence, if the user has brought the level above the limit, we
|
||||
// will still not react until the postproc updates the level.
|
||||
SetLevel(std::max(clipped_level_min_, level_ - kClippedLevelStep));
|
||||
SetLevel(std::max(clipped_level_min_, level_ - clipped_level_step));
|
||||
// Reset the AGCs for all channels since the level has changed.
|
||||
agc_->Reset();
|
||||
frames_since_update_gain_ = 0;
|
||||
is_first_frame_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
void MonoAgc::SetLevel(int new_level) {
|
||||
int voe_level = stream_analog_level_;
|
||||
int voe_level = recommended_input_volume_;
|
||||
if (voe_level == 0) {
|
||||
RTC_DLOG(LS_INFO)
|
||||
<< "[agc] VolumeCallbacks returned level=0, taking no action.";
|
||||
@ -220,6 +264,10 @@ void MonoAgc::SetLevel(int new_level) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Detect manual input volume adjustments by checking if the current level
|
||||
// `voe_level` is outside of the `[level_ - kLevelQuantizationSlack, level_ +
|
||||
// kLevelQuantizationSlack]` range where `level_` is the last input volume
|
||||
// known by this gain controller.
|
||||
if (voe_level > level_ + kLevelQuantizationSlack ||
|
||||
voe_level < level_ - kLevelQuantizationSlack) {
|
||||
RTC_DLOG(LS_INFO) << "[agc] Mic volume was manually adjusted. Updating "
|
||||
@ -234,7 +282,8 @@ void MonoAgc::SetLevel(int new_level) {
|
||||
// was manually adjusted. The compressor will still provide some of the
|
||||
// desired gain change.
|
||||
agc_->Reset();
|
||||
|
||||
frames_since_update_gain_ = 0;
|
||||
is_first_frame_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -243,7 +292,7 @@ void MonoAgc::SetLevel(int new_level) {
|
||||
return;
|
||||
}
|
||||
|
||||
stream_analog_level_ = new_level;
|
||||
recommended_input_volume_ = new_level;
|
||||
RTC_DLOG(LS_INFO) << "[agc] voe_level=" << voe_level << ", level_=" << level_
|
||||
<< ", new_level=" << new_level;
|
||||
level_ = new_level;
|
||||
@ -252,7 +301,7 @@ void MonoAgc::SetLevel(int new_level) {
|
||||
void MonoAgc::SetMaxLevel(int level) {
|
||||
RTC_DCHECK_GE(level, clipped_level_min_);
|
||||
max_level_ = level;
|
||||
// Scale the |kSurplusCompressionGain| linearly across the restricted
|
||||
// Scale the `kSurplusCompressionGain` linearly across the restricted
|
||||
// level range.
|
||||
max_compression_gain_ =
|
||||
kMaxCompressionGain + std::floor((1.f * kMaxMicLevel - max_level_) /
|
||||
@ -263,23 +312,23 @@ void MonoAgc::SetMaxLevel(int level) {
|
||||
<< ", max_compression_gain_=" << max_compression_gain_;
|
||||
}
|
||||
|
||||
void MonoAgc::SetCaptureMuted(bool muted) {
|
||||
if (capture_muted_ == muted) {
|
||||
void MonoAgc::HandleCaptureOutputUsedChange(bool capture_output_used) {
|
||||
if (capture_output_used_ == capture_output_used) {
|
||||
return;
|
||||
}
|
||||
capture_muted_ = muted;
|
||||
capture_output_used_ = capture_output_used;
|
||||
|
||||
if (!muted) {
|
||||
// When we unmute, we should reset things to be safe.
|
||||
if (capture_output_used) {
|
||||
// When we start using the output, we should reset things to be safe.
|
||||
check_volume_on_next_process_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
int MonoAgc::CheckVolumeAndReset() {
|
||||
int level = stream_analog_level_;
|
||||
int level = recommended_input_volume_;
|
||||
// Reasons for taking action at startup:
|
||||
// 1) A person starting a call is expected to be heard.
|
||||
// 2) Independent of interpretation of |level| == 0 we should raise it so the
|
||||
// 2) Independent of interpretation of `level` == 0 we should raise it so the
|
||||
// AGC can do its job properly.
|
||||
if (level == 0 && !startup_) {
|
||||
RTC_DLOG(LS_INFO)
|
||||
@ -293,31 +342,33 @@ int MonoAgc::CheckVolumeAndReset() {
|
||||
}
|
||||
RTC_DLOG(LS_INFO) << "[agc] Initial GetMicVolume()=" << level;
|
||||
|
||||
int minLevel = startup_ ? startup_min_level_ : min_mic_level_;
|
||||
if (level < minLevel) {
|
||||
level = minLevel;
|
||||
if (level < min_mic_level_) {
|
||||
level = min_mic_level_;
|
||||
RTC_DLOG(LS_INFO) << "[agc] Initial volume too low, raising to " << level;
|
||||
stream_analog_level_ = level;
|
||||
recommended_input_volume_ = level;
|
||||
}
|
||||
agc_->Reset();
|
||||
level_ = level;
|
||||
startup_ = false;
|
||||
frames_since_update_gain_ = 0;
|
||||
is_first_frame_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Requests the RMS error from AGC and distributes the required gain change
|
||||
// between the digital compression stage and volume slider. We use the
|
||||
// compressor first, providing a slack region around the current slider
|
||||
// position to reduce movement.
|
||||
// Distributes the required gain change between the digital compression stage
|
||||
// and volume slider. We use the compressor first, providing a slack region
|
||||
// around the current slider position to reduce movement.
|
||||
//
|
||||
// If the slider needs to be moved, we check first if the user has adjusted
|
||||
// it, in which case we take no action and cache the updated level.
|
||||
void MonoAgc::UpdateGain() {
|
||||
int rms_error = 0;
|
||||
if (!agc_->GetRmsErrorDb(&rms_error)) {
|
||||
// No error update ready.
|
||||
return;
|
||||
}
|
||||
void MonoAgc::UpdateGain(int rms_error_db) {
|
||||
int rms_error = rms_error_db;
|
||||
|
||||
// Always reset the counter regardless of whether the gain is changed
|
||||
// or not. This matches with the bahvior of `agc_` where the histogram is
|
||||
// reset every time an RMS error is successfully read.
|
||||
frames_since_update_gain_ = 0;
|
||||
|
||||
// The compressor will always add at least kMinCompressionGain. In effect,
|
||||
// this adjusts our target gain upward by the same amount and rms_error
|
||||
// needs to reflect that.
|
||||
@ -357,22 +408,12 @@ void MonoAgc::UpdateGain() {
|
||||
int old_level = level_;
|
||||
SetLevel(LevelFromGainError(residual_gain, level_, min_mic_level_));
|
||||
if (old_level != level_) {
|
||||
// level_ was updated by SetLevel; log the new value.
|
||||
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.AgcSetLevel", level_, 1,
|
||||
kMaxMicLevel, 50);
|
||||
// Reset the AGC since the level has changed.
|
||||
agc_->Reset();
|
||||
}
|
||||
}
|
||||
|
||||
void MonoAgc::UpdateCompressor() {
|
||||
calls_since_last_gain_log_++;
|
||||
if (calls_since_last_gain_log_ == 100) {
|
||||
calls_since_last_gain_log_ = 0;
|
||||
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.DigitalGainApplied",
|
||||
compression_, 0, kMaxCompressionGain,
|
||||
kMaxCompressionGain + 1);
|
||||
}
|
||||
if (compression_ == target_compression_) {
|
||||
return;
|
||||
}
|
||||
@ -397,57 +438,66 @@ void MonoAgc::UpdateCompressor() {
|
||||
|
||||
// Set the new compression gain.
|
||||
if (new_compression != compression_) {
|
||||
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.DigitalGainUpdated",
|
||||
new_compression, 0, kMaxCompressionGain,
|
||||
kMaxCompressionGain + 1);
|
||||
compression_ = new_compression;
|
||||
compression_accumulator_ = new_compression;
|
||||
new_compression_to_set_ = compression_;
|
||||
}
|
||||
}
|
||||
|
||||
int AgcManagerDirect::instance_counter_ = 0;
|
||||
std::atomic<int> AgcManagerDirect::instance_counter_(0);
|
||||
|
||||
AgcManagerDirect::AgcManagerDirect(Agc* agc,
|
||||
int startup_min_level,
|
||||
int clipped_level_min,
|
||||
int sample_rate_hz)
|
||||
: AgcManagerDirect(/*num_capture_channels*/ 1,
|
||||
startup_min_level,
|
||||
clipped_level_min,
|
||||
/*use_agc2_level_estimation*/ false,
|
||||
/*disable_digital_adaptive*/ false,
|
||||
sample_rate_hz) {
|
||||
AgcManagerDirect::AgcManagerDirect(
|
||||
const AudioProcessing::Config::GainController1::AnalogGainController&
|
||||
analog_config,
|
||||
Agc* agc)
|
||||
: AgcManagerDirect(/*num_capture_channels=*/1, analog_config) {
|
||||
RTC_DCHECK(channel_agcs_[0]);
|
||||
RTC_DCHECK(agc);
|
||||
channel_agcs_[0]->set_agc(agc);
|
||||
}
|
||||
|
||||
AgcManagerDirect::AgcManagerDirect(int num_capture_channels,
|
||||
int startup_min_level,
|
||||
int clipped_level_min,
|
||||
bool use_agc2_level_estimation,
|
||||
bool disable_digital_adaptive,
|
||||
int sample_rate_hz)
|
||||
: data_dumper_(
|
||||
new ApmDataDumper(rtc::AtomicOps::Increment(&instance_counter_))),
|
||||
use_min_channel_level_(!UseMaxAnalogChannelLevel()),
|
||||
sample_rate_hz_(sample_rate_hz),
|
||||
const AnalogAgcConfig& analog_config)
|
||||
: analog_controller_enabled_(analog_config.enabled),
|
||||
min_mic_level_override_(GetMinMicLevelOverride()),
|
||||
data_dumper_(new ApmDataDumper(instance_counter_.fetch_add(1) + 1)),
|
||||
num_capture_channels_(num_capture_channels),
|
||||
disable_digital_adaptive_(disable_digital_adaptive),
|
||||
frames_since_clipped_(kClippedWaitFrames),
|
||||
capture_muted_(false),
|
||||
disable_digital_adaptive_(!analog_config.enable_digital_adaptive),
|
||||
frames_since_clipped_(analog_config.clipped_wait_frames),
|
||||
capture_output_used_(true),
|
||||
clipped_level_step_(analog_config.clipped_level_step),
|
||||
clipped_ratio_threshold_(analog_config.clipped_ratio_threshold),
|
||||
clipped_wait_frames_(analog_config.clipped_wait_frames),
|
||||
channel_agcs_(num_capture_channels),
|
||||
new_compressions_to_set_(num_capture_channels) {
|
||||
const int min_mic_level = GetMinMicLevel();
|
||||
new_compressions_to_set_(num_capture_channels),
|
||||
clipping_predictor_(
|
||||
CreateClippingPredictor(num_capture_channels,
|
||||
analog_config.clipping_predictor)),
|
||||
use_clipping_predictor_step_(
|
||||
!!clipping_predictor_ &&
|
||||
analog_config.clipping_predictor.use_predicted_step),
|
||||
clipping_rate_log_(0.0f),
|
||||
clipping_rate_log_counter_(0) {
|
||||
RTC_LOG(LS_INFO) << "[agc] analog controller enabled: "
|
||||
<< (analog_controller_enabled_ ? "yes" : "no");
|
||||
const int min_mic_level = min_mic_level_override_.value_or(kMinMicLevel);
|
||||
RTC_LOG(LS_INFO) << "[agc] Min mic level: " << min_mic_level
|
||||
<< " (overridden: "
|
||||
<< (min_mic_level_override_.has_value() ? "yes" : "no")
|
||||
<< ")";
|
||||
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
|
||||
ApmDataDumper* data_dumper_ch = ch == 0 ? data_dumper_.get() : nullptr;
|
||||
|
||||
channel_agcs_[ch] = std::make_unique<MonoAgc>(
|
||||
data_dumper_ch, startup_min_level, clipped_level_min,
|
||||
use_agc2_level_estimation, disable_digital_adaptive_, min_mic_level);
|
||||
data_dumper_ch, analog_config.clipped_level_min,
|
||||
disable_digital_adaptive_, min_mic_level);
|
||||
}
|
||||
RTC_DCHECK_LT(0, channel_agcs_.size());
|
||||
RTC_DCHECK(!channel_agcs_.empty());
|
||||
RTC_DCHECK_GT(clipped_level_step_, 0);
|
||||
RTC_DCHECK_LE(clipped_level_step_, 255);
|
||||
RTC_DCHECK_GT(clipped_ratio_threshold_, 0.0f);
|
||||
RTC_DCHECK_LT(clipped_ratio_threshold_, 1.0f);
|
||||
RTC_DCHECK_GT(clipped_wait_frames_, 0);
|
||||
channel_agcs_[0]->ActivateLogging();
|
||||
}
|
||||
|
||||
@ -459,48 +509,47 @@ void AgcManagerDirect::Initialize() {
|
||||
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
|
||||
channel_agcs_[ch]->Initialize();
|
||||
}
|
||||
capture_muted_ = false;
|
||||
capture_output_used_ = true;
|
||||
|
||||
AggregateChannelLevels();
|
||||
clipping_rate_log_ = 0.0f;
|
||||
clipping_rate_log_counter_ = 0;
|
||||
}
|
||||
|
||||
void AgcManagerDirect::SetupDigitalGainControl(
|
||||
GainControl* gain_control) const {
|
||||
RTC_DCHECK(gain_control);
|
||||
if (gain_control->set_mode(GainControl::kFixedDigital) != 0) {
|
||||
GainControl& gain_control) const {
|
||||
if (gain_control.set_mode(GainControl::kFixedDigital) != 0) {
|
||||
RTC_LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed.";
|
||||
}
|
||||
const int target_level_dbfs = disable_digital_adaptive_ ? 0 : 2;
|
||||
if (gain_control->set_target_level_dbfs(target_level_dbfs) != 0) {
|
||||
if (gain_control.set_target_level_dbfs(target_level_dbfs) != 0) {
|
||||
RTC_LOG(LS_ERROR) << "set_target_level_dbfs() failed.";
|
||||
}
|
||||
const int compression_gain_db =
|
||||
disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
|
||||
if (gain_control->set_compression_gain_db(compression_gain_db) != 0) {
|
||||
if (gain_control.set_compression_gain_db(compression_gain_db) != 0) {
|
||||
RTC_LOG(LS_ERROR) << "set_compression_gain_db() failed.";
|
||||
}
|
||||
const bool enable_limiter = !disable_digital_adaptive_;
|
||||
if (gain_control->enable_limiter(enable_limiter) != 0) {
|
||||
if (gain_control.enable_limiter(enable_limiter) != 0) {
|
||||
RTC_LOG(LS_ERROR) << "enable_limiter() failed.";
|
||||
}
|
||||
}
|
||||
|
||||
void AgcManagerDirect::AnalyzePreProcess(const AudioBuffer* audio) {
|
||||
void AgcManagerDirect::AnalyzePreProcess(const AudioBuffer& audio_buffer) {
|
||||
const float* const* audio = audio_buffer.channels_const();
|
||||
size_t samples_per_channel = audio_buffer.num_frames();
|
||||
RTC_DCHECK(audio);
|
||||
AnalyzePreProcess(audio->channels_const(), audio->num_frames());
|
||||
}
|
||||
|
||||
void AgcManagerDirect::AnalyzePreProcess(const float* const* audio,
|
||||
size_t samples_per_channel) {
|
||||
RTC_DCHECK(audio);
|
||||
AggregateChannelLevels();
|
||||
if (capture_muted_) {
|
||||
if (!capture_output_used_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (frames_since_clipped_ < kClippedWaitFrames) {
|
||||
++frames_since_clipped_;
|
||||
return;
|
||||
if (!!clipping_predictor_) {
|
||||
AudioFrameView<const float> frame = AudioFrameView<const float>(
|
||||
audio, num_capture_channels_, static_cast<int>(samples_per_channel));
|
||||
clipping_predictor_->Analyze(frame);
|
||||
}
|
||||
|
||||
// Check for clipped samples, as the AGC has difficulty detecting pitch
|
||||
@ -514,55 +563,108 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio,
|
||||
// gain is increased, through SetMaxLevel().
|
||||
float clipped_ratio =
|
||||
ComputeClippedRatio(audio, num_capture_channels_, samples_per_channel);
|
||||
clipping_rate_log_ = std::max(clipped_ratio, clipping_rate_log_);
|
||||
clipping_rate_log_counter_++;
|
||||
constexpr int kNumFramesIn30Seconds = 3000;
|
||||
if (clipping_rate_log_counter_ == kNumFramesIn30Seconds) {
|
||||
LogClippingMetrics(std::round(100.0f * clipping_rate_log_));
|
||||
clipping_rate_log_ = 0.0f;
|
||||
clipping_rate_log_counter_ = 0;
|
||||
}
|
||||
|
||||
if (clipped_ratio > kClippedRatioThreshold) {
|
||||
if (frames_since_clipped_ < clipped_wait_frames_) {
|
||||
++frames_since_clipped_;
|
||||
return;
|
||||
}
|
||||
|
||||
const bool clipping_detected = clipped_ratio > clipped_ratio_threshold_;
|
||||
bool clipping_predicted = false;
|
||||
int predicted_step = 0;
|
||||
if (!!clipping_predictor_) {
|
||||
for (int channel = 0; channel < num_capture_channels_; ++channel) {
|
||||
const auto step = clipping_predictor_->EstimateClippedLevelStep(
|
||||
channel, recommended_input_volume_, clipped_level_step_,
|
||||
channel_agcs_[channel]->min_mic_level(), kMaxMicLevel);
|
||||
if (step.has_value()) {
|
||||
predicted_step = std::max(predicted_step, step.value());
|
||||
clipping_predicted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (clipping_detected) {
|
||||
RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio="
|
||||
<< clipped_ratio;
|
||||
}
|
||||
int step = clipped_level_step_;
|
||||
if (clipping_predicted) {
|
||||
predicted_step = std::max(predicted_step, clipped_level_step_);
|
||||
RTC_DLOG(LS_INFO) << "[agc] Clipping predicted. step=" << predicted_step;
|
||||
if (use_clipping_predictor_step_) {
|
||||
step = predicted_step;
|
||||
}
|
||||
}
|
||||
if (clipping_detected ||
|
||||
(clipping_predicted && use_clipping_predictor_step_)) {
|
||||
for (auto& state_ch : channel_agcs_) {
|
||||
state_ch->HandleClipping();
|
||||
state_ch->HandleClipping(step);
|
||||
}
|
||||
frames_since_clipped_ = 0;
|
||||
if (!!clipping_predictor_) {
|
||||
clipping_predictor_->Reset();
|
||||
}
|
||||
}
|
||||
AggregateChannelLevels();
|
||||
}
|
||||
|
||||
void AgcManagerDirect::Process(const AudioBuffer* audio) {
|
||||
AggregateChannelLevels();
|
||||
void AgcManagerDirect::Process(const AudioBuffer& audio_buffer) {
|
||||
Process(audio_buffer, /*speech_probability=*/absl::nullopt,
|
||||
/*speech_level_dbfs=*/absl::nullopt);
|
||||
}
|
||||
|
||||
if (capture_muted_) {
|
||||
void AgcManagerDirect::Process(const AudioBuffer& audio_buffer,
|
||||
absl::optional<float> speech_probability,
|
||||
absl::optional<float> speech_level_dbfs) {
|
||||
AggregateChannelLevels();
|
||||
const int volume_after_clipping_handling = recommended_input_volume_;
|
||||
|
||||
if (!capture_output_used_) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t num_frames_per_band = audio_buffer.num_frames_per_band();
|
||||
absl::optional<int> rms_error_override = absl::nullopt;
|
||||
if (speech_probability.has_value() && speech_level_dbfs.has_value()) {
|
||||
rms_error_override =
|
||||
GetSpeechLevelErrorDb(*speech_level_dbfs, *speech_probability);
|
||||
}
|
||||
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
|
||||
int16_t* audio_use = nullptr;
|
||||
std::array<int16_t, AudioBuffer::kMaxSampleRate / 100> audio_data;
|
||||
int num_frames_per_band;
|
||||
if (audio) {
|
||||
FloatS16ToS16(audio->split_bands_const_f(ch)[0],
|
||||
audio->num_frames_per_band(), audio_data.data());
|
||||
audio_use = audio_data.data();
|
||||
num_frames_per_band = audio->num_frames_per_band();
|
||||
} else {
|
||||
// Only used for testing.
|
||||
// TODO(peah): Change unittests to only allow on non-null audio input.
|
||||
num_frames_per_band = 320;
|
||||
}
|
||||
channel_agcs_[ch]->Process(audio_use, num_frames_per_band, sample_rate_hz_);
|
||||
int16_t* audio_use = audio_data.data();
|
||||
FloatS16ToS16(audio_buffer.split_bands_const_f(ch)[0], num_frames_per_band,
|
||||
audio_use);
|
||||
channel_agcs_[ch]->Process({audio_use, num_frames_per_band},
|
||||
rms_error_override);
|
||||
new_compressions_to_set_[ch] = channel_agcs_[ch]->new_compression();
|
||||
}
|
||||
|
||||
AggregateChannelLevels();
|
||||
if (volume_after_clipping_handling != recommended_input_volume_) {
|
||||
// The recommended input volume was adjusted in order to match the target
|
||||
// level.
|
||||
UpdateHistogramOnRecommendedInputVolumeChangeToMatchTarget(
|
||||
recommended_input_volume_);
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<int> AgcManagerDirect::GetDigitalComressionGain() {
|
||||
return new_compressions_to_set_[channel_controlling_gain_];
|
||||
}
|
||||
|
||||
void AgcManagerDirect::SetCaptureMuted(bool muted) {
|
||||
void AgcManagerDirect::HandleCaptureOutputUsedChange(bool capture_output_used) {
|
||||
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
|
||||
channel_agcs_[ch]->SetCaptureMuted(muted);
|
||||
channel_agcs_[ch]->HandleCaptureOutputUsedChange(capture_output_used);
|
||||
}
|
||||
capture_muted_ = muted;
|
||||
capture_output_used_ = capture_output_used;
|
||||
}
|
||||
|
||||
float AgcManagerDirect::voice_probability() const {
|
||||
@ -575,6 +677,10 @@ float AgcManagerDirect::voice_probability() const {
|
||||
}
|
||||
|
||||
void AgcManagerDirect::set_stream_analog_level(int level) {
|
||||
if (!analog_controller_enabled_) {
|
||||
recommended_input_volume_ = level;
|
||||
}
|
||||
|
||||
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
|
||||
channel_agcs_[ch]->set_stream_analog_level(level);
|
||||
}
|
||||
@ -583,25 +689,25 @@ void AgcManagerDirect::set_stream_analog_level(int level) {
|
||||
}
|
||||
|
||||
void AgcManagerDirect::AggregateChannelLevels() {
|
||||
stream_analog_level_ = channel_agcs_[0]->stream_analog_level();
|
||||
int new_recommended_input_volume =
|
||||
channel_agcs_[0]->recommended_analog_level();
|
||||
channel_controlling_gain_ = 0;
|
||||
if (use_min_channel_level_) {
|
||||
for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
|
||||
int level = channel_agcs_[ch]->stream_analog_level();
|
||||
if (level < stream_analog_level_) {
|
||||
stream_analog_level_ = level;
|
||||
channel_controlling_gain_ = static_cast<int>(ch);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
|
||||
int level = channel_agcs_[ch]->stream_analog_level();
|
||||
if (level > stream_analog_level_) {
|
||||
stream_analog_level_ = level;
|
||||
channel_controlling_gain_ = static_cast<int>(ch);
|
||||
}
|
||||
for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
|
||||
int level = channel_agcs_[ch]->recommended_analog_level();
|
||||
if (level < new_recommended_input_volume) {
|
||||
new_recommended_input_volume = level;
|
||||
channel_controlling_gain_ = static_cast<int>(ch);
|
||||
}
|
||||
}
|
||||
|
||||
if (min_mic_level_override_.has_value() && new_recommended_input_volume > 0) {
|
||||
new_recommended_input_volume =
|
||||
std::max(new_recommended_input_volume, *min_mic_level_override_);
|
||||
}
|
||||
|
||||
if (analog_controller_enabled_) {
|
||||
recommended_input_volume_ = new_recommended_input_volume;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
Reference in New Issue
Block a user