Update common_audio
Corresponds to upstream commit 524e9b043e7e86fd72353b987c9d5f6a1ebf83e1 Update notes: * Moved src/ to webrtc/ to easily diff against the third_party/webrtc in the chromium tree * ARM/NEON/MIPS support is not yet hooked up * Tests have not been copied
This commit is contained in:
10
webrtc/modules/audio_processing/agc/Makefile.am
Normal file
10
webrtc/modules/audio_processing/agc/Makefile.am
Normal file
@ -0,0 +1,10 @@
|
||||
noinst_LTLIBRARIES = libagc.la
|
||||
|
||||
libagc_la_SOURCES = interface/gain_control.h \
|
||||
analog_agc.c \
|
||||
analog_agc.h \
|
||||
digital_agc.c \
|
||||
digital_agc.h
|
||||
libagc_la_CFLAGS = $(AM_CFLAGS) $(COMMON_CFLAGS) \
|
||||
-I$(top_srcdir)/src/common_audio/signal_processing_library/main/interface \
|
||||
-I$(top_srcdir)/src/modules/audio_processing/utility
|
34
webrtc/modules/audio_processing/agc/agc.gypi
Normal file
34
webrtc/modules/audio_processing/agc/agc.gypi
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
{
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'agc',
|
||||
'type': '<(library)',
|
||||
'dependencies': [
|
||||
'<(webrtc_root)/common_audio/common_audio.gyp:spl',
|
||||
],
|
||||
'include_dirs': [
|
||||
'interface',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'interface',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
'interface/gain_control.h',
|
||||
'analog_agc.c',
|
||||
'analog_agc.h',
|
||||
'digital_agc.c',
|
||||
'digital_agc.h',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
1709
webrtc/modules/audio_processing/agc/analog_agc.c
Normal file
1709
webrtc/modules/audio_processing/agc/analog_agc.c
Normal file
File diff suppressed because it is too large
Load Diff
133
webrtc/modules/audio_processing/agc/analog_agc.h
Normal file
133
webrtc/modules/audio_processing/agc/analog_agc.h
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
|
||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "gain_control.h"
|
||||
#include "digital_agc.h"
|
||||
|
||||
//#define AGC_DEBUG
|
||||
//#define MIC_LEVEL_FEEDBACK
|
||||
#ifdef AGC_DEBUG
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
/* Analog Automatic Gain Control variables:
|
||||
* Constant declarations (inner limits inside which no changes are done)
|
||||
* In the beginning the range is narrower to widen as soon as the measure
|
||||
* 'Rxx160_LP' is inside it. Currently the starting limits are -22.2+/-1dBm0
|
||||
* and the final limits -22.2+/-2.5dBm0. These levels makes the speech signal
|
||||
* go towards -25.4dBm0 (-31.4dBov). Tuned with wbfile-31.4dBov.pcm
|
||||
* The limits are created by running the AGC with a file having the desired
|
||||
* signal level and thereafter plotting Rxx160_LP in the dBm0-domain defined
|
||||
* by out=10*log10(in/260537279.7); Set the target level to the average level
|
||||
* of our measure Rxx160_LP. Remember that the levels are in blocks of 16 in
|
||||
* Q(-7). (Example matlab code: round(db2pow(-21.2)*16/2^7) )
|
||||
*/
|
||||
#define RXX_BUFFER_LEN 10
|
||||
|
||||
static const WebRtc_Word16 kMsecSpeechInner = 520;
|
||||
static const WebRtc_Word16 kMsecSpeechOuter = 340;
|
||||
|
||||
static const WebRtc_Word16 kNormalVadThreshold = 400;
|
||||
|
||||
static const WebRtc_Word16 kAlphaShortTerm = 6; // 1 >> 6 = 0.0156
|
||||
static const WebRtc_Word16 kAlphaLongTerm = 10; // 1 >> 10 = 0.000977
|
||||
|
||||
typedef struct
|
||||
{
|
||||
// Configurable parameters/variables
|
||||
WebRtc_UWord32 fs; // Sampling frequency
|
||||
WebRtc_Word16 compressionGaindB; // Fixed gain level in dB
|
||||
WebRtc_Word16 targetLevelDbfs; // Target level in -dBfs of envelope (default -3)
|
||||
WebRtc_Word16 agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig)
|
||||
WebRtc_UWord8 limiterEnable; // Enabling limiter (on/off (default off))
|
||||
WebRtcAgc_config_t defaultConfig;
|
||||
WebRtcAgc_config_t usedConfig;
|
||||
|
||||
// General variables
|
||||
WebRtc_Word16 initFlag;
|
||||
WebRtc_Word16 lastError;
|
||||
|
||||
// Target level parameters
|
||||
// Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7)
|
||||
WebRtc_Word32 analogTargetLevel; // = RXX_BUFFER_LEN * 846805; -22 dBfs
|
||||
WebRtc_Word32 startUpperLimit; // = RXX_BUFFER_LEN * 1066064; -21 dBfs
|
||||
WebRtc_Word32 startLowerLimit; // = RXX_BUFFER_LEN * 672641; -23 dBfs
|
||||
WebRtc_Word32 upperPrimaryLimit; // = RXX_BUFFER_LEN * 1342095; -20 dBfs
|
||||
WebRtc_Word32 lowerPrimaryLimit; // = RXX_BUFFER_LEN * 534298; -24 dBfs
|
||||
WebRtc_Word32 upperSecondaryLimit;// = RXX_BUFFER_LEN * 2677832; -17 dBfs
|
||||
WebRtc_Word32 lowerSecondaryLimit;// = RXX_BUFFER_LEN * 267783; -27 dBfs
|
||||
WebRtc_UWord16 targetIdx; // Table index for corresponding target level
|
||||
#ifdef MIC_LEVEL_FEEDBACK
|
||||
WebRtc_UWord16 targetIdxOffset; // Table index offset for level compensation
|
||||
#endif
|
||||
WebRtc_Word16 analogTarget; // Digital reference level in ENV scale
|
||||
|
||||
// Analog AGC specific variables
|
||||
WebRtc_Word32 filterState[8]; // For downsampling wb to nb
|
||||
WebRtc_Word32 upperLimit; // Upper limit for mic energy
|
||||
WebRtc_Word32 lowerLimit; // Lower limit for mic energy
|
||||
WebRtc_Word32 Rxx160w32; // Average energy for one frame
|
||||
WebRtc_Word32 Rxx16_LPw32; // Low pass filtered subframe energies
|
||||
WebRtc_Word32 Rxx160_LPw32; // Low pass filtered frame energies
|
||||
WebRtc_Word32 Rxx16_LPw32Max; // Keeps track of largest energy subframe
|
||||
WebRtc_Word32 Rxx16_vectorw32[RXX_BUFFER_LEN];// Array with subframe energies
|
||||
WebRtc_Word32 Rxx16w32_array[2][5];// Energy values of microphone signal
|
||||
WebRtc_Word32 env[2][10]; // Envelope values of subframes
|
||||
|
||||
WebRtc_Word16 Rxx16pos; // Current position in the Rxx16_vectorw32
|
||||
WebRtc_Word16 envSum; // Filtered scaled envelope in subframes
|
||||
WebRtc_Word16 vadThreshold; // Threshold for VAD decision
|
||||
WebRtc_Word16 inActive; // Inactive time in milliseconds
|
||||
WebRtc_Word16 msTooLow; // Milliseconds of speech at a too low level
|
||||
WebRtc_Word16 msTooHigh; // Milliseconds of speech at a too high level
|
||||
WebRtc_Word16 changeToSlowMode; // Change to slow mode after some time at target
|
||||
WebRtc_Word16 firstCall; // First call to the process-function
|
||||
WebRtc_Word16 msZero; // Milliseconds of zero input
|
||||
WebRtc_Word16 msecSpeechOuterChange;// Min ms of speech between volume changes
|
||||
WebRtc_Word16 msecSpeechInnerChange;// Min ms of speech between volume changes
|
||||
WebRtc_Word16 activeSpeech; // Milliseconds of active speech
|
||||
WebRtc_Word16 muteGuardMs; // Counter to prevent mute action
|
||||
WebRtc_Word16 inQueue; // 10 ms batch indicator
|
||||
|
||||
// Microphone level variables
|
||||
WebRtc_Word32 micRef; // Remember ref. mic level for virtual mic
|
||||
WebRtc_UWord16 gainTableIdx; // Current position in virtual gain table
|
||||
WebRtc_Word32 micGainIdx; // Gain index of mic level to increase slowly
|
||||
WebRtc_Word32 micVol; // Remember volume between frames
|
||||
WebRtc_Word32 maxLevel; // Max possible vol level, incl dig gain
|
||||
WebRtc_Word32 maxAnalog; // Maximum possible analog volume level
|
||||
WebRtc_Word32 maxInit; // Initial value of "max"
|
||||
WebRtc_Word32 minLevel; // Minimum possible volume level
|
||||
WebRtc_Word32 minOutput; // Minimum output volume level
|
||||
WebRtc_Word32 zeroCtrlMax; // Remember max gain => don't amp low input
|
||||
|
||||
WebRtc_Word16 scale; // Scale factor for internal volume levels
|
||||
#ifdef MIC_LEVEL_FEEDBACK
|
||||
WebRtc_Word16 numBlocksMicLvlSat;
|
||||
WebRtc_UWord8 micLvlSat;
|
||||
#endif
|
||||
// Structs for VAD and digital_agc
|
||||
AgcVad_t vadMic;
|
||||
DigitalAgc_t digitalAgc;
|
||||
|
||||
#ifdef AGC_DEBUG
|
||||
FILE* fpt;
|
||||
FILE* agcLog;
|
||||
WebRtc_Word32 fcount;
|
||||
#endif
|
||||
|
||||
WebRtc_Word16 lowLevelSignal;
|
||||
} Agc_t;
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
|
786
webrtc/modules/audio_processing/agc/digital_agc.c
Normal file
786
webrtc/modules/audio_processing/agc/digital_agc.c
Normal file
@ -0,0 +1,786 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/* digital_agc.c
|
||||
*
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#ifdef AGC_DEBUG
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
#include "digital_agc.h"
|
||||
#include "gain_control.h"
|
||||
|
||||
// To generate the gaintable, copy&paste the following lines to a Matlab window:
|
||||
// MaxGain = 6; MinGain = 0; CompRatio = 3; Knee = 1;
|
||||
// zeros = 0:31; lvl = 2.^(1-zeros);
|
||||
// A = -10*log10(lvl) * (CompRatio - 1) / CompRatio;
|
||||
// B = MaxGain - MinGain;
|
||||
// gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B))))));
|
||||
// fprintf(1, '\t%i, %i, %i, %i,\n', gains);
|
||||
// % Matlab code for plotting the gain and input/output level characteristic (copy/paste the following 3 lines):
|
||||
// in = 10*log10(lvl); out = 20*log10(gains/65536);
|
||||
// subplot(121); plot(in, out); axis([-30, 0, -5, 20]); grid on; xlabel('Input (dB)'); ylabel('Gain (dB)');
|
||||
// subplot(122); plot(in, in+out); axis([-30, 0, -30, 5]); grid on; xlabel('Input (dB)'); ylabel('Output (dB)');
|
||||
// zoom on;
|
||||
|
||||
// Generator table for y=log2(1+e^x) in Q8.
|
||||
static const WebRtc_UWord16 kGenFuncTable[128] = {
|
||||
256, 485, 786, 1126, 1484, 1849, 2217, 2586,
|
||||
2955, 3324, 3693, 4063, 4432, 4801, 5171, 5540,
|
||||
5909, 6279, 6648, 7017, 7387, 7756, 8125, 8495,
|
||||
8864, 9233, 9603, 9972, 10341, 10711, 11080, 11449,
|
||||
11819, 12188, 12557, 12927, 13296, 13665, 14035, 14404,
|
||||
14773, 15143, 15512, 15881, 16251, 16620, 16989, 17359,
|
||||
17728, 18097, 18466, 18836, 19205, 19574, 19944, 20313,
|
||||
20682, 21052, 21421, 21790, 22160, 22529, 22898, 23268,
|
||||
23637, 24006, 24376, 24745, 25114, 25484, 25853, 26222,
|
||||
26592, 26961, 27330, 27700, 28069, 28438, 28808, 29177,
|
||||
29546, 29916, 30285, 30654, 31024, 31393, 31762, 32132,
|
||||
32501, 32870, 33240, 33609, 33978, 34348, 34717, 35086,
|
||||
35456, 35825, 36194, 36564, 36933, 37302, 37672, 38041,
|
||||
38410, 38780, 39149, 39518, 39888, 40257, 40626, 40996,
|
||||
41365, 41734, 42104, 42473, 42842, 43212, 43581, 43950,
|
||||
44320, 44689, 45058, 45428, 45797, 46166, 46536, 46905
|
||||
};
|
||||
|
||||
static const WebRtc_Word16 kAvgDecayTime = 250; // frames; < 3000
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16
|
||||
WebRtc_Word16 digCompGaindB, // Q0
|
||||
WebRtc_Word16 targetLevelDbfs,// Q0
|
||||
WebRtc_UWord8 limiterEnable,
|
||||
WebRtc_Word16 analogTarget) // Q0
|
||||
{
|
||||
// This function generates the compressor gain table used in the fixed digital part.
|
||||
WebRtc_UWord32 tmpU32no1, tmpU32no2, absInLevel, logApprox;
|
||||
WebRtc_Word32 inLevel, limiterLvl;
|
||||
WebRtc_Word32 tmp32, tmp32no1, tmp32no2, numFIX, den, y32;
|
||||
const WebRtc_UWord16 kLog10 = 54426; // log2(10) in Q14
|
||||
const WebRtc_UWord16 kLog10_2 = 49321; // 10*log10(2) in Q14
|
||||
const WebRtc_UWord16 kLogE_1 = 23637; // log2(e) in Q14
|
||||
WebRtc_UWord16 constMaxGain;
|
||||
WebRtc_UWord16 tmpU16, intPart, fracPart;
|
||||
const WebRtc_Word16 kCompRatio = 3;
|
||||
const WebRtc_Word16 kSoftLimiterLeft = 1;
|
||||
WebRtc_Word16 limiterOffset = 0; // Limiter offset
|
||||
WebRtc_Word16 limiterIdx, limiterLvlX;
|
||||
WebRtc_Word16 constLinApprox, zeroGainLvl, maxGain, diffGain;
|
||||
WebRtc_Word16 i, tmp16, tmp16no1;
|
||||
int zeros, zerosScale;
|
||||
|
||||
// Constants
|
||||
// kLogE_1 = 23637; // log2(e) in Q14
|
||||
// kLog10 = 54426; // log2(10) in Q14
|
||||
// kLog10_2 = 49321; // 10*log10(2) in Q14
|
||||
|
||||
// Calculate maximum digital gain and zero gain level
|
||||
tmp32no1 = WEBRTC_SPL_MUL_16_16(digCompGaindB - analogTarget, kCompRatio - 1);
|
||||
tmp16no1 = analogTarget - targetLevelDbfs;
|
||||
tmp16no1 += WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
|
||||
maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs));
|
||||
tmp32no1 = WEBRTC_SPL_MUL_16_16(maxGain, kCompRatio);
|
||||
zeroGainLvl = digCompGaindB;
|
||||
zeroGainLvl -= WebRtcSpl_DivW32W16ResW16(tmp32no1 + ((kCompRatio - 1) >> 1),
|
||||
kCompRatio - 1);
|
||||
if ((digCompGaindB <= analogTarget) && (limiterEnable))
|
||||
{
|
||||
zeroGainLvl += (analogTarget - digCompGaindB + kSoftLimiterLeft);
|
||||
limiterOffset = 0;
|
||||
}
|
||||
|
||||
// Calculate the difference between maximum gain and gain at 0dB0v:
|
||||
// diffGain = maxGain + (compRatio-1)*zeroGainLvl/compRatio
|
||||
// = (compRatio-1)*digCompGaindB/compRatio
|
||||
tmp32no1 = WEBRTC_SPL_MUL_16_16(digCompGaindB, kCompRatio - 1);
|
||||
diffGain = WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
|
||||
if (diffGain < 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Calculate the limiter level and index:
|
||||
// limiterLvlX = analogTarget - limiterOffset
|
||||
// limiterLvl = targetLevelDbfs + limiterOffset/compRatio
|
||||
limiterLvlX = analogTarget - limiterOffset;
|
||||
limiterIdx = 2
|
||||
+ WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)limiterLvlX, 13),
|
||||
WEBRTC_SPL_RSHIFT_U16(kLog10_2, 1));
|
||||
tmp16no1 = WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio);
|
||||
limiterLvl = targetLevelDbfs + tmp16no1;
|
||||
|
||||
// Calculate (through table lookup):
|
||||
// constMaxGain = log2(1+2^(log2(e)*diffGain)); (in Q8)
|
||||
constMaxGain = kGenFuncTable[diffGain]; // in Q8
|
||||
|
||||
// Calculate a parameter used to approximate the fractional part of 2^x with a
|
||||
// piecewise linear function in Q14:
|
||||
// constLinApprox = round(3/2*(4*(3-2*sqrt(2))/(log(2)^2)-0.5)*2^14);
|
||||
constLinApprox = 22817; // in Q14
|
||||
|
||||
// Calculate a denominator used in the exponential part to convert from dB to linear scale:
|
||||
// den = 20*constMaxGain (in Q8)
|
||||
den = WEBRTC_SPL_MUL_16_U16(20, constMaxGain); // in Q8
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
{
|
||||
// Calculate scaled input level (compressor):
|
||||
// inLevel = fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio)
|
||||
tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(kCompRatio - 1, i - 1); // Q0
|
||||
tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1; // Q14
|
||||
inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio); // Q14
|
||||
|
||||
// Calculate diffGain-inLevel, to map using the genFuncTable
|
||||
inLevel = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)diffGain, 14) - inLevel; // Q14
|
||||
|
||||
// Make calculations on abs(inLevel) and compensate for the sign afterwards.
|
||||
absInLevel = (WebRtc_UWord32)WEBRTC_SPL_ABS_W32(inLevel); // Q14
|
||||
|
||||
// LUT with interpolation
|
||||
intPart = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_U32(absInLevel, 14);
|
||||
fracPart = (WebRtc_UWord16)(absInLevel & 0x00003FFF); // extract the fractional part
|
||||
tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8
|
||||
tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22
|
||||
tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)kGenFuncTable[intPart], 14); // Q22
|
||||
logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14
|
||||
// Compensate for negative exponent using the relation:
|
||||
// log2(1 + 2^-x) = log2(1 + 2^x) - x
|
||||
if (inLevel < 0)
|
||||
{
|
||||
zeros = WebRtcSpl_NormU32(absInLevel);
|
||||
zerosScale = 0;
|
||||
if (zeros < 15)
|
||||
{
|
||||
// Not enough space for multiplication
|
||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(absInLevel, 15 - zeros); // Q(zeros-1)
|
||||
tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no2, kLogE_1); // Q(zeros+13)
|
||||
if (zeros < 9)
|
||||
{
|
||||
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 9 - zeros); // Q(zeros+13)
|
||||
zerosScale = 9 - zeros;
|
||||
} else
|
||||
{
|
||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, zeros - 9); // Q22
|
||||
}
|
||||
} else
|
||||
{
|
||||
tmpU32no2 = WEBRTC_SPL_UMUL_32_16(absInLevel, kLogE_1); // Q28
|
||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 6); // Q22
|
||||
}
|
||||
logApprox = 0;
|
||||
if (tmpU32no2 < tmpU32no1)
|
||||
{
|
||||
logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1 - tmpU32no2, 8 - zerosScale); //Q14
|
||||
}
|
||||
}
|
||||
numFIX = WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_U16(maxGain, constMaxGain), 6); // Q14
|
||||
numFIX -= WEBRTC_SPL_MUL_32_16((WebRtc_Word32)logApprox, diffGain); // Q14
|
||||
|
||||
// Calculate ratio
|
||||
// Shift numFIX as much as possible
|
||||
zeros = WebRtcSpl_NormW32(numFIX);
|
||||
numFIX = WEBRTC_SPL_LSHIFT_W32(numFIX, zeros); // Q(14+zeros)
|
||||
|
||||
// Shift den so we end up in Qy1
|
||||
tmp32no1 = WEBRTC_SPL_SHIFT_W32(den, zeros - 8); // Q(zeros)
|
||||
if (numFIX < 0)
|
||||
{
|
||||
numFIX -= WEBRTC_SPL_RSHIFT_W32(tmp32no1, 1);
|
||||
} else
|
||||
{
|
||||
numFIX += WEBRTC_SPL_RSHIFT_W32(tmp32no1, 1);
|
||||
}
|
||||
y32 = WEBRTC_SPL_DIV(numFIX, tmp32no1); // in Q14
|
||||
if (limiterEnable && (i < limiterIdx))
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL_16_U16(i - 1, kLog10_2); // Q14
|
||||
tmp32 -= WEBRTC_SPL_LSHIFT_W32(limiterLvl, 14); // Q14
|
||||
y32 = WebRtcSpl_DivW32W16(tmp32 + 10, 20);
|
||||
}
|
||||
if (y32 > 39000)
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL(y32 >> 1, kLog10) + 4096; // in Q27
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 13); // in Q14
|
||||
} else
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL(y32, kLog10) + 8192; // in Q28
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 14); // in Q14
|
||||
}
|
||||
tmp32 += WEBRTC_SPL_LSHIFT_W32(16, 14); // in Q14 (Make sure final output is in Q16)
|
||||
|
||||
// Calculate power
|
||||
if (tmp32 > 0)
|
||||
{
|
||||
intPart = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 14);
|
||||
fracPart = (WebRtc_UWord16)(tmp32 & 0x00003FFF); // in Q14
|
||||
if (WEBRTC_SPL_RSHIFT_W32(fracPart, 13))
|
||||
{
|
||||
tmp16 = WEBRTC_SPL_LSHIFT_W16(2, 14) - constLinApprox;
|
||||
tmp32no2 = WEBRTC_SPL_LSHIFT_W32(1, 14) - fracPart;
|
||||
tmp32no2 = WEBRTC_SPL_MUL_32_16(tmp32no2, tmp16);
|
||||
tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 13);
|
||||
tmp32no2 = WEBRTC_SPL_LSHIFT_W32(1, 14) - tmp32no2;
|
||||
} else
|
||||
{
|
||||
tmp16 = constLinApprox - WEBRTC_SPL_LSHIFT_W16(1, 14);
|
||||
tmp32no2 = WEBRTC_SPL_MUL_32_16(fracPart, tmp16);
|
||||
tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 13);
|
||||
}
|
||||
fracPart = (WebRtc_UWord16)tmp32no2;
|
||||
gainTable[i] = WEBRTC_SPL_LSHIFT_W32(1, intPart)
|
||||
+ WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14);
|
||||
} else
|
||||
{
|
||||
gainTable[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *stt, WebRtc_Word16 agcMode)
|
||||
{
|
||||
|
||||
if (agcMode == kAgcModeFixedDigital)
|
||||
{
|
||||
// start at minimum to find correct gain faster
|
||||
stt->capacitorSlow = 0;
|
||||
} else
|
||||
{
|
||||
// start out with 0 dB gain
|
||||
stt->capacitorSlow = 134217728; // (WebRtc_Word32)(0.125f * 32768.0f * 32768.0f);
|
||||
}
|
||||
stt->capacitorFast = 0;
|
||||
stt->gain = 65536;
|
||||
stt->gatePrevious = 0;
|
||||
stt->agcMode = agcMode;
|
||||
#ifdef AGC_DEBUG
|
||||
stt->frameCounter = 0;
|
||||
#endif
|
||||
|
||||
// initialize VADs
|
||||
WebRtcAgc_InitVad(&stt->vadNearend);
|
||||
WebRtcAgc_InitVad(&stt->vadFarend);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const WebRtc_Word16 *in_far,
|
||||
WebRtc_Word16 nrSamples)
|
||||
{
|
||||
// Check for valid pointer
|
||||
if (&stt->vadFarend == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// VAD for far end
|
||||
WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *in_near,
|
||||
const WebRtc_Word16 *in_near_H, WebRtc_Word16 *out,
|
||||
WebRtc_Word16 *out_H, WebRtc_UWord32 FS,
|
||||
WebRtc_Word16 lowlevelSignal)
|
||||
{
|
||||
// array for gains (one value per ms, incl start & end)
|
||||
WebRtc_Word32 gains[11];
|
||||
|
||||
WebRtc_Word32 out_tmp, tmp32;
|
||||
WebRtc_Word32 env[10];
|
||||
WebRtc_Word32 nrg, max_nrg;
|
||||
WebRtc_Word32 cur_level;
|
||||
WebRtc_Word32 gain32, delta;
|
||||
WebRtc_Word16 logratio;
|
||||
WebRtc_Word16 lower_thr, upper_thr;
|
||||
WebRtc_Word16 zeros, zeros_fast, frac;
|
||||
WebRtc_Word16 decay;
|
||||
WebRtc_Word16 gate, gain_adj;
|
||||
WebRtc_Word16 k, n;
|
||||
WebRtc_Word16 L, L2; // samples/subframe
|
||||
|
||||
// determine number of samples per ms
|
||||
if (FS == 8000)
|
||||
{
|
||||
L = 8;
|
||||
L2 = 3;
|
||||
} else if (FS == 16000)
|
||||
{
|
||||
L = 16;
|
||||
L2 = 4;
|
||||
} else if (FS == 32000)
|
||||
{
|
||||
L = 16;
|
||||
L2 = 4;
|
||||
} else
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(andrew): again, we don't need input and output pointers...
|
||||
if (in_near != out)
|
||||
{
|
||||
// Only needed if they don't already point to the same place.
|
||||
memcpy(out, in_near, 10 * L * sizeof(WebRtc_Word16));
|
||||
}
|
||||
if (FS == 32000)
|
||||
{
|
||||
if (in_near_H != out_H)
|
||||
{
|
||||
memcpy(out_H, in_near_H, 10 * L * sizeof(WebRtc_Word16));
|
||||
}
|
||||
}
|
||||
// VAD for near end
|
||||
logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out, L * 10);
|
||||
|
||||
// Account for far end VAD
|
||||
if (stt->vadFarend.counter > 10)
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(3, logratio);
|
||||
logratio = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 - stt->vadFarend.logRatio, 2);
|
||||
}
|
||||
|
||||
// Determine decay factor depending on VAD
|
||||
// upper_thr = 1.0f;
|
||||
// lower_thr = 0.25f;
|
||||
upper_thr = 1024; // Q10
|
||||
lower_thr = 0; // Q10
|
||||
if (logratio > upper_thr)
|
||||
{
|
||||
// decay = -2^17 / DecayTime; -> -65
|
||||
decay = -65;
|
||||
} else if (logratio < lower_thr)
|
||||
{
|
||||
decay = 0;
|
||||
} else
|
||||
{
|
||||
// decay = (WebRtc_Word16)(((lower_thr - logratio)
|
||||
// * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10);
|
||||
// SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr)) -> 65
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16((lower_thr - logratio), 65);
|
||||
decay = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 10);
|
||||
}
|
||||
|
||||
// adjust decay factor for long silence (detected as low standard deviation)
|
||||
// This is only done in the adaptive modes
|
||||
if (stt->agcMode != kAgcModeFixedDigital)
|
||||
{
|
||||
if (stt->vadNearend.stdLongTerm < 4000)
|
||||
{
|
||||
decay = 0;
|
||||
} else if (stt->vadNearend.stdLongTerm < 8096)
|
||||
{
|
||||
// decay = (WebRtc_Word16)(((stt->vadNearend.stdLongTerm - 4000) * decay) >> 12);
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16((stt->vadNearend.stdLongTerm - 4000), decay);
|
||||
decay = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
|
||||
}
|
||||
|
||||
if (lowlevelSignal != 0)
|
||||
{
|
||||
decay = 0;
|
||||
}
|
||||
}
|
||||
#ifdef AGC_DEBUG
|
||||
stt->frameCounter++;
|
||||
fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100, logratio, decay, stt->vadNearend.stdLongTerm);
|
||||
#endif
|
||||
// Find max amplitude per sub frame
|
||||
// iterate over sub frames
|
||||
for (k = 0; k < 10; k++)
|
||||
{
|
||||
// iterate over samples
|
||||
max_nrg = 0;
|
||||
for (n = 0; n < L; n++)
|
||||
{
|
||||
nrg = WEBRTC_SPL_MUL_16_16(out[k * L + n], out[k * L + n]);
|
||||
if (nrg > max_nrg)
|
||||
{
|
||||
max_nrg = nrg;
|
||||
}
|
||||
}
|
||||
env[k] = max_nrg;
|
||||
}
|
||||
|
||||
// Calculate gain per sub frame
|
||||
gains[0] = stt->gain;
|
||||
for (k = 0; k < 10; k++)
|
||||
{
|
||||
// Fast envelope follower
|
||||
// decay time = -131000 / -1000 = 131 (ms)
|
||||
stt->capacitorFast = AGC_SCALEDIFF32(-1000, stt->capacitorFast, stt->capacitorFast);
|
||||
if (env[k] > stt->capacitorFast)
|
||||
{
|
||||
stt->capacitorFast = env[k];
|
||||
}
|
||||
// Slow envelope follower
|
||||
if (env[k] > stt->capacitorSlow)
|
||||
{
|
||||
// increase capacitorSlow
|
||||
stt->capacitorSlow
|
||||
= AGC_SCALEDIFF32(500, (env[k] - stt->capacitorSlow), stt->capacitorSlow);
|
||||
} else
|
||||
{
|
||||
// decrease capacitorSlow
|
||||
stt->capacitorSlow
|
||||
= AGC_SCALEDIFF32(decay, stt->capacitorSlow, stt->capacitorSlow);
|
||||
}
|
||||
|
||||
// use maximum of both capacitors as current level
|
||||
if (stt->capacitorFast > stt->capacitorSlow)
|
||||
{
|
||||
cur_level = stt->capacitorFast;
|
||||
} else
|
||||
{
|
||||
cur_level = stt->capacitorSlow;
|
||||
}
|
||||
// Translate signal level into gain, using a piecewise linear approximation
|
||||
// find number of leading zeros
|
||||
zeros = WebRtcSpl_NormU32((WebRtc_UWord32)cur_level);
|
||||
if (cur_level == 0)
|
||||
{
|
||||
zeros = 31;
|
||||
}
|
||||
tmp32 = (WEBRTC_SPL_LSHIFT_W32(cur_level, zeros) & 0x7FFFFFFF);
|
||||
frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12
|
||||
tmp32 = WEBRTC_SPL_MUL((stt->gainTable[zeros-1] - stt->gainTable[zeros]), frac);
|
||||
gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
|
||||
#ifdef AGC_DEBUG
|
||||
if (k == 0)
|
||||
{
|
||||
fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level, stt->capacitorFast, stt->capacitorSlow, zeros);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Gate processing (lower gain during absence of speech)
|
||||
zeros = WEBRTC_SPL_LSHIFT_W16(zeros, 9) - WEBRTC_SPL_RSHIFT_W16(frac, 3);
|
||||
// find number of leading zeros
|
||||
zeros_fast = WebRtcSpl_NormU32((WebRtc_UWord32)stt->capacitorFast);
|
||||
if (stt->capacitorFast == 0)
|
||||
{
|
||||
zeros_fast = 31;
|
||||
}
|
||||
tmp32 = (WEBRTC_SPL_LSHIFT_W32(stt->capacitorFast, zeros_fast) & 0x7FFFFFFF);
|
||||
zeros_fast = WEBRTC_SPL_LSHIFT_W16(zeros_fast, 9);
|
||||
zeros_fast -= (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 22);
|
||||
|
||||
gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm;
|
||||
|
||||
if (gate < 0)
|
||||
{
|
||||
stt->gatePrevious = 0;
|
||||
} else
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(stt->gatePrevious, 7);
|
||||
gate = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((WebRtc_Word32)gate + tmp32, 3);
|
||||
stt->gatePrevious = gate;
|
||||
}
|
||||
// gate < 0 -> no gate
|
||||
// gate > 2500 -> max gate
|
||||
if (gate > 0)
|
||||
{
|
||||
if (gate < 2500)
|
||||
{
|
||||
gain_adj = WEBRTC_SPL_RSHIFT_W16(2500 - gate, 5);
|
||||
} else
|
||||
{
|
||||
gain_adj = 0;
|
||||
}
|
||||
for (k = 0; k < 10; k++)
|
||||
{
|
||||
if ((gains[k + 1] - stt->gainTable[0]) > 8388608)
|
||||
{
|
||||
// To prevent wraparound
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32((gains[k+1] - stt->gainTable[0]), 8);
|
||||
tmp32 = WEBRTC_SPL_MUL(tmp32, (178 + gain_adj));
|
||||
} else
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL((gains[k+1] - stt->gainTable[0]), (178 + gain_adj));
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 8);
|
||||
}
|
||||
gains[k + 1] = stt->gainTable[0] + tmp32;
|
||||
}
|
||||
}
|
||||
|
||||
// Limit gain to avoid overload distortion
|
||||
for (k = 0; k < 10; k++)
|
||||
{
|
||||
// To prevent wrap around
|
||||
zeros = 10;
|
||||
if (gains[k + 1] > 47453132)
|
||||
{
|
||||
zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]);
|
||||
}
|
||||
gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
|
||||
gain32 = WEBRTC_SPL_MUL(gain32, gain32);
|
||||
// check for overflow
|
||||
while (AGC_MUL32(WEBRTC_SPL_RSHIFT_W32(env[k], 12) + 1, gain32)
|
||||
> WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)32767, 2 * (1 - zeros + 10)))
|
||||
{
|
||||
// multiply by 253/256 ==> -0.1 dB
|
||||
if (gains[k + 1] > 8388607)
|
||||
{
|
||||
// Prevent wrap around
|
||||
gains[k + 1] = WEBRTC_SPL_MUL(WEBRTC_SPL_RSHIFT_W32(gains[k+1], 8), 253);
|
||||
} else
|
||||
{
|
||||
gains[k + 1] = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(gains[k+1], 253), 8);
|
||||
}
|
||||
gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
|
||||
gain32 = WEBRTC_SPL_MUL(gain32, gain32);
|
||||
}
|
||||
}
|
||||
// gain reductions should be done 1 ms earlier than gain increases
|
||||
for (k = 1; k < 10; k++)
|
||||
{
|
||||
if (gains[k] > gains[k + 1])
|
||||
{
|
||||
gains[k] = gains[k + 1];
|
||||
}
|
||||
}
|
||||
// save start gain for next frame
|
||||
stt->gain = gains[10];
|
||||
|
||||
// Apply gain
|
||||
// handle first sub frame separately
|
||||
delta = WEBRTC_SPL_LSHIFT_W32(gains[1] - gains[0], (4 - L2));
|
||||
gain32 = WEBRTC_SPL_LSHIFT_W32(gains[0], 4);
|
||||
// iterate over samples
|
||||
for (n = 0; n < L; n++)
|
||||
{
|
||||
// For lower band
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
|
||||
out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
if (out_tmp > 4095)
|
||||
{
|
||||
out[n] = (WebRtc_Word16)32767;
|
||||
} else if (out_tmp < -4096)
|
||||
{
|
||||
out[n] = (WebRtc_Word16)-32768;
|
||||
} else
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4));
|
||||
out[n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
}
|
||||
// For higher band
|
||||
if (FS == 32000)
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[n],
|
||||
WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
|
||||
out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
if (out_tmp > 4095)
|
||||
{
|
||||
out_H[n] = (WebRtc_Word16)32767;
|
||||
} else if (out_tmp < -4096)
|
||||
{
|
||||
out_H[n] = (WebRtc_Word16)-32768;
|
||||
} else
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[n],
|
||||
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
|
||||
out_H[n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
}
|
||||
}
|
||||
//
|
||||
|
||||
gain32 += delta;
|
||||
}
|
||||
// iterate over subframes
|
||||
for (k = 1; k < 10; k++)
|
||||
{
|
||||
delta = WEBRTC_SPL_LSHIFT_W32(gains[k+1] - gains[k], (4 - L2));
|
||||
gain32 = WEBRTC_SPL_LSHIFT_W32(gains[k], 4);
|
||||
// iterate over samples
|
||||
for (n = 0; n < L; n++)
|
||||
{
|
||||
// For lower band
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[k * L + n],
|
||||
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
|
||||
out[k * L + n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
// For higher band
|
||||
if (FS == 32000)
|
||||
{
|
||||
tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[k * L + n],
|
||||
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
|
||||
out_H[k * L + n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
|
||||
}
|
||||
gain32 += delta;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void WebRtcAgc_InitVad(AgcVad_t *state)
|
||||
{
|
||||
WebRtc_Word16 k;
|
||||
|
||||
state->HPstate = 0; // state of high pass filter
|
||||
state->logRatio = 0; // log( P(active) / P(inactive) )
|
||||
// average input level (Q10)
|
||||
state->meanLongTerm = WEBRTC_SPL_LSHIFT_W16(15, 10);
|
||||
|
||||
// variance of input level (Q8)
|
||||
state->varianceLongTerm = WEBRTC_SPL_LSHIFT_W32(500, 8);
|
||||
|
||||
state->stdLongTerm = 0; // standard deviation of input level in dB
|
||||
// short-term average input level (Q10)
|
||||
state->meanShortTerm = WEBRTC_SPL_LSHIFT_W16(15, 10);
|
||||
|
||||
// short-term variance of input level (Q8)
|
||||
state->varianceShortTerm = WEBRTC_SPL_LSHIFT_W32(500, 8);
|
||||
|
||||
state->stdShortTerm = 0; // short-term standard deviation of input level in dB
|
||||
state->counter = 3; // counts updates
|
||||
for (k = 0; k < 8; k++)
|
||||
{
|
||||
// downsampling filter
|
||||
state->downState[k] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
|
||||
const WebRtc_Word16 *in, // (i) Speech signal
|
||||
WebRtc_Word16 nrSamples) // (i) number of samples
|
||||
{
|
||||
WebRtc_Word32 out, nrg, tmp32, tmp32b;
|
||||
WebRtc_UWord16 tmpU16;
|
||||
WebRtc_Word16 k, subfr, tmp16;
|
||||
WebRtc_Word16 buf1[8];
|
||||
WebRtc_Word16 buf2[4];
|
||||
WebRtc_Word16 HPstate;
|
||||
WebRtc_Word16 zeros, dB;
|
||||
|
||||
// process in 10 sub frames of 1 ms (to save on memory)
|
||||
nrg = 0;
|
||||
HPstate = state->HPstate;
|
||||
for (subfr = 0; subfr < 10; subfr++)
|
||||
{
|
||||
// downsample to 4 kHz
|
||||
if (nrSamples == 160)
|
||||
{
|
||||
for (k = 0; k < 8; k++)
|
||||
{
|
||||
tmp32 = (WebRtc_Word32)in[2 * k] + (WebRtc_Word32)in[2 * k + 1];
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 1);
|
||||
buf1[k] = (WebRtc_Word16)tmp32;
|
||||
}
|
||||
in += 16;
|
||||
|
||||
WebRtcSpl_DownsampleBy2(buf1, 8, buf2, state->downState);
|
||||
} else
|
||||
{
|
||||
WebRtcSpl_DownsampleBy2(in, 8, buf2, state->downState);
|
||||
in += 8;
|
||||
}
|
||||
|
||||
// high pass filter and compute energy
|
||||
for (k = 0; k < 4; k++)
|
||||
{
|
||||
out = buf2[k] + HPstate;
|
||||
tmp32 = WEBRTC_SPL_MUL(600, out);
|
||||
HPstate = (WebRtc_Word16)(WEBRTC_SPL_RSHIFT_W32(tmp32, 10) - buf2[k]);
|
||||
tmp32 = WEBRTC_SPL_MUL(out, out);
|
||||
nrg += WEBRTC_SPL_RSHIFT_W32(tmp32, 6);
|
||||
}
|
||||
}
|
||||
state->HPstate = HPstate;
|
||||
|
||||
// find number of leading zeros
|
||||
if (!(0xFFFF0000 & nrg))
|
||||
{
|
||||
zeros = 16;
|
||||
} else
|
||||
{
|
||||
zeros = 0;
|
||||
}
|
||||
if (!(0xFF000000 & (nrg << zeros)))
|
||||
{
|
||||
zeros += 8;
|
||||
}
|
||||
if (!(0xF0000000 & (nrg << zeros)))
|
||||
{
|
||||
zeros += 4;
|
||||
}
|
||||
if (!(0xC0000000 & (nrg << zeros)))
|
||||
{
|
||||
zeros += 2;
|
||||
}
|
||||
if (!(0x80000000 & (nrg << zeros)))
|
||||
{
|
||||
zeros += 1;
|
||||
}
|
||||
|
||||
// energy level (range {-32..30}) (Q10)
|
||||
dB = WEBRTC_SPL_LSHIFT_W16(15 - zeros, 11);
|
||||
|
||||
// Update statistics
|
||||
|
||||
if (state->counter < kAvgDecayTime)
|
||||
{
|
||||
// decay time = AvgDecTime * 10 ms
|
||||
state->counter++;
|
||||
}
|
||||
|
||||
// update short-term estimate of mean energy level (Q10)
|
||||
tmp32 = (WEBRTC_SPL_MUL_16_16(state->meanShortTerm, 15) + (WebRtc_Word32)dB);
|
||||
state->meanShortTerm = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 4);
|
||||
|
||||
// update short-term estimate of variance in energy level (Q8)
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
|
||||
tmp32 += WEBRTC_SPL_MUL(state->varianceShortTerm, 15);
|
||||
state->varianceShortTerm = WEBRTC_SPL_RSHIFT_W32(tmp32, 4);
|
||||
|
||||
// update short-term estimate of standard deviation in energy level (Q10)
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(state->meanShortTerm, state->meanShortTerm);
|
||||
tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceShortTerm, 12) - tmp32;
|
||||
state->stdShortTerm = (WebRtc_Word16)WebRtcSpl_Sqrt(tmp32);
|
||||
|
||||
// update long-term estimate of mean energy level (Q10)
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->counter) + (WebRtc_Word32)dB;
|
||||
state->meanLongTerm = WebRtcSpl_DivW32W16ResW16(tmp32,
|
||||
WEBRTC_SPL_ADD_SAT_W16(state->counter, 1));
|
||||
|
||||
// update long-term estimate of variance in energy level (Q8)
|
||||
tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
|
||||
tmp32 += WEBRTC_SPL_MUL(state->varianceLongTerm, state->counter);
|
||||
state->varianceLongTerm = WebRtcSpl_DivW32W16(tmp32,
|
||||
WEBRTC_SPL_ADD_SAT_W16(state->counter, 1));
|
||||
|
||||
// update long-term estimate of standard deviation in energy level (Q10)
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->meanLongTerm);
|
||||
tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceLongTerm, 12) - tmp32;
|
||||
state->stdLongTerm = (WebRtc_Word16)WebRtcSpl_Sqrt(tmp32);
|
||||
|
||||
// update voice activity measure (Q10)
|
||||
tmp16 = WEBRTC_SPL_LSHIFT_W16(3, 12);
|
||||
tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, (dB - state->meanLongTerm));
|
||||
tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm);
|
||||
tmpU16 = WEBRTC_SPL_LSHIFT_U16((WebRtc_UWord16)13, 12);
|
||||
tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16);
|
||||
tmp32 += WEBRTC_SPL_RSHIFT_W32(tmp32b, 10);
|
||||
|
||||
state->logRatio = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 6);
|
||||
|
||||
// limit
|
||||
if (state->logRatio > 2048)
|
||||
{
|
||||
state->logRatio = 2048;
|
||||
}
|
||||
if (state->logRatio < -2048)
|
||||
{
|
||||
state->logRatio = -2048;
|
||||
}
|
||||
|
||||
return state->logRatio; // Q10
|
||||
}
|
76
webrtc/modules/audio_processing/agc/digital_agc.h
Normal file
76
webrtc/modules/audio_processing/agc/digital_agc.h
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
|
||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
|
||||
|
||||
#ifdef AGC_DEBUG
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
#include "typedefs.h"
|
||||
#include "signal_processing_library.h"
|
||||
|
||||
// the 32 most significant bits of A(19) * B(26) >> 13
|
||||
#define AGC_MUL32(A, B) (((B)>>13)*(A) + ( ((0x00001FFF & (B))*(A)) >> 13 ))
|
||||
// C + the 32 most significant bits of A * B
|
||||
#define AGC_SCALEDIFF32(A, B, C) ((C) + ((B)>>16)*(A) + ( ((0x0000FFFF & (B))*(A)) >> 16 ))
|
||||
|
||||
typedef struct
|
||||
{
|
||||
WebRtc_Word32 downState[8];
|
||||
WebRtc_Word16 HPstate;
|
||||
WebRtc_Word16 counter;
|
||||
WebRtc_Word16 logRatio; // log( P(active) / P(inactive) ) (Q10)
|
||||
WebRtc_Word16 meanLongTerm; // Q10
|
||||
WebRtc_Word32 varianceLongTerm; // Q8
|
||||
WebRtc_Word16 stdLongTerm; // Q10
|
||||
WebRtc_Word16 meanShortTerm; // Q10
|
||||
WebRtc_Word32 varianceShortTerm; // Q8
|
||||
WebRtc_Word16 stdShortTerm; // Q10
|
||||
} AgcVad_t; // total = 54 bytes
|
||||
|
||||
typedef struct
|
||||
{
|
||||
WebRtc_Word32 capacitorSlow;
|
||||
WebRtc_Word32 capacitorFast;
|
||||
WebRtc_Word32 gain;
|
||||
WebRtc_Word32 gainTable[32];
|
||||
WebRtc_Word16 gatePrevious;
|
||||
WebRtc_Word16 agcMode;
|
||||
AgcVad_t vadNearend;
|
||||
AgcVad_t vadFarend;
|
||||
#ifdef AGC_DEBUG
|
||||
FILE* logFile;
|
||||
int frameCounter;
|
||||
#endif
|
||||
} DigitalAgc_t;
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *digitalAgcInst, WebRtc_Word16 agcMode);
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *digitalAgcInst, const WebRtc_Word16 *inNear,
|
||||
const WebRtc_Word16 *inNear_H, WebRtc_Word16 *out,
|
||||
WebRtc_Word16 *out_H, WebRtc_UWord32 FS,
|
||||
WebRtc_Word16 lowLevelSignal);
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_AddFarendToDigital(DigitalAgc_t *digitalAgcInst, const WebRtc_Word16 *inFar,
|
||||
WebRtc_Word16 nrSamples);
|
||||
|
||||
void WebRtcAgc_InitVad(AgcVad_t *vadInst);
|
||||
|
||||
WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *vadInst, // (i) VAD state
|
||||
const WebRtc_Word16 *in, // (i) Speech signal
|
||||
WebRtc_Word16 nrSamples); // (i) number of samples
|
||||
|
||||
WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16
|
||||
WebRtc_Word16 compressionGaindB, // Q0 (in dB)
|
||||
WebRtc_Word16 targetLevelDbfs,// Q0 (in dB)
|
||||
WebRtc_UWord8 limiterEnable, WebRtc_Word16 analogTarget);
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
|
273
webrtc/modules/audio_processing/agc/interface/gain_control.h
Normal file
273
webrtc/modules/audio_processing/agc/interface/gain_control.h
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_INTERFACE_GAIN_CONTROL_H_
|
||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_INTERFACE_GAIN_CONTROL_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
// Errors
|
||||
#define AGC_UNSPECIFIED_ERROR 18000
|
||||
#define AGC_UNSUPPORTED_FUNCTION_ERROR 18001
|
||||
#define AGC_UNINITIALIZED_ERROR 18002
|
||||
#define AGC_NULL_POINTER_ERROR 18003
|
||||
#define AGC_BAD_PARAMETER_ERROR 18004
|
||||
|
||||
// Warnings
|
||||
#define AGC_BAD_PARAMETER_WARNING 18050
|
||||
|
||||
enum
|
||||
{
|
||||
kAgcModeUnchanged,
|
||||
kAgcModeAdaptiveAnalog,
|
||||
kAgcModeAdaptiveDigital,
|
||||
kAgcModeFixedDigital
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
kAgcFalse = 0,
|
||||
kAgcTrue
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
WebRtc_Word16 targetLevelDbfs; // default 3 (-3 dBOv)
|
||||
WebRtc_Word16 compressionGaindB; // default 9 dB
|
||||
WebRtc_UWord8 limiterEnable; // default kAgcTrue (on)
|
||||
} WebRtcAgc_config_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This function processes a 10/20ms frame of far-end speech to determine
|
||||
* if there is active speech. Far-end speech length can be either 10ms or
|
||||
* 20ms. The length of the input speech vector must be given in samples
|
||||
* (80/160 when FS=8000, and 160/320 when FS=16000 or FS=32000).
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance.
|
||||
* - inFar : Far-end input speech vector (10 or 20ms)
|
||||
* - samples : Number of samples in input vector
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_AddFarend(void* agcInst,
|
||||
const WebRtc_Word16* inFar,
|
||||
WebRtc_Word16 samples);
|
||||
|
||||
/*
|
||||
* This function processes a 10/20ms frame of microphone speech to determine
|
||||
* if there is active speech. Microphone speech length can be either 10ms or
|
||||
* 20ms. The length of the input speech vector must be given in samples
|
||||
* (80/160 when FS=8000, and 160/320 when FS=16000 or FS=32000). For very low
|
||||
* input levels, the input signal is increased in level by multiplying and
|
||||
* overwriting the samples in inMic[].
|
||||
*
|
||||
* This function should be called before any further processing of the
|
||||
* near-end microphone signal.
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance.
|
||||
* - inMic : Microphone input speech vector (10 or 20 ms) for
|
||||
* L band
|
||||
* - inMic_H : Microphone input speech vector (10 or 20 ms) for
|
||||
* H band
|
||||
* - samples : Number of samples in input vector
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_AddMic(void* agcInst,
|
||||
WebRtc_Word16* inMic,
|
||||
WebRtc_Word16* inMic_H,
|
||||
WebRtc_Word16 samples);
|
||||
|
||||
/*
|
||||
* This function replaces the analog microphone with a virtual one.
|
||||
* It is a digital gain applied to the input signal and is used in the
|
||||
* agcAdaptiveDigital mode where no microphone level is adjustable.
|
||||
* Microphone speech length can be either 10ms or 20ms. The length of the
|
||||
* input speech vector must be given in samples (80/160 when FS=8000, and
|
||||
* 160/320 when FS=16000 or FS=32000).
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance.
|
||||
* - inMic : Microphone input speech vector for (10 or 20 ms)
|
||||
* L band
|
||||
* - inMic_H : Microphone input speech vector for (10 or 20 ms)
|
||||
* H band
|
||||
* - samples : Number of samples in input vector
|
||||
* - micLevelIn : Input level of microphone (static)
|
||||
*
|
||||
* Output:
|
||||
* - inMic : Microphone output after processing (L band)
|
||||
* - inMic_H : Microphone output after processing (H band)
|
||||
* - micLevelOut : Adjusted microphone level after processing
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_VirtualMic(void* agcInst,
|
||||
WebRtc_Word16* inMic,
|
||||
WebRtc_Word16* inMic_H,
|
||||
WebRtc_Word16 samples,
|
||||
WebRtc_Word32 micLevelIn,
|
||||
WebRtc_Word32* micLevelOut);
|
||||
|
||||
/*
|
||||
* This function processes a 10/20ms frame and adjusts (normalizes) the gain
|
||||
* both analog and digitally. The gain adjustments are done only during
|
||||
* active periods of speech. The input speech length can be either 10ms or
|
||||
* 20ms and the output is of the same length. The length of the speech
|
||||
* vectors must be given in samples (80/160 when FS=8000, and 160/320 when
|
||||
* FS=16000 or FS=32000). The echo parameter can be used to ensure the AGC will
|
||||
* not adjust upward in the presence of echo.
|
||||
*
|
||||
* This function should be called after processing the near-end microphone
|
||||
* signal, in any case after any echo cancellation.
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance
|
||||
* - inNear : Near-end input speech vector (10 or 20 ms) for
|
||||
* L band
|
||||
* - inNear_H : Near-end input speech vector (10 or 20 ms) for
|
||||
* H band
|
||||
* - samples : Number of samples in input/output vector
|
||||
* - inMicLevel : Current microphone volume level
|
||||
* - echo : Set to 0 if the signal passed to add_mic is
|
||||
* almost certainly free of echo; otherwise set
|
||||
* to 1. If you have no information regarding echo
|
||||
* set to 0.
|
||||
*
|
||||
* Output:
|
||||
* - outMicLevel : Adjusted microphone volume level
|
||||
* - out : Gain-adjusted near-end speech vector (L band)
|
||||
* : May be the same vector as the input.
|
||||
* - out_H : Gain-adjusted near-end speech vector (H band)
|
||||
* - saturationWarning : A returned value of 1 indicates a saturation event
|
||||
* has occurred and the volume cannot be further
|
||||
* reduced. Otherwise will be set to 0.
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_Process(void* agcInst,
|
||||
const WebRtc_Word16* inNear,
|
||||
const WebRtc_Word16* inNear_H,
|
||||
WebRtc_Word16 samples,
|
||||
WebRtc_Word16* out,
|
||||
WebRtc_Word16* out_H,
|
||||
WebRtc_Word32 inMicLevel,
|
||||
WebRtc_Word32* outMicLevel,
|
||||
WebRtc_Word16 echo,
|
||||
WebRtc_UWord8* saturationWarning);
|
||||
|
||||
/*
|
||||
* This function sets the config parameters (targetLevelDbfs,
|
||||
* compressionGaindB and limiterEnable).
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance
|
||||
* - config : config struct
|
||||
*
|
||||
* Output:
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_set_config(void* agcInst, WebRtcAgc_config_t config);
|
||||
|
||||
/*
|
||||
* This function returns the config parameters (targetLevelDbfs,
|
||||
* compressionGaindB and limiterEnable).
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance
|
||||
*
|
||||
* Output:
|
||||
* - config : config struct
|
||||
*
|
||||
* Return value:
|
||||
* : 0 - Normal operation.
|
||||
* : -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_get_config(void* agcInst, WebRtcAgc_config_t* config);
|
||||
|
||||
/*
|
||||
* This function creates an AGC instance, which will contain the state
|
||||
* information for one (duplex) channel.
|
||||
*
|
||||
* Return value : AGC instance if successful
|
||||
* : 0 (i.e., a NULL pointer) if unsuccessful
|
||||
*/
|
||||
int WebRtcAgc_Create(void **agcInst);
|
||||
|
||||
/*
|
||||
* This function frees the AGC instance created at the beginning.
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance.
|
||||
*
|
||||
* Return value : 0 - Ok
|
||||
* -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_Free(void *agcInst);
|
||||
|
||||
/*
|
||||
* This function initializes an AGC instance.
|
||||
*
|
||||
* Input:
|
||||
* - agcInst : AGC instance.
|
||||
* - minLevel : Minimum possible mic level
|
||||
* - maxLevel : Maximum possible mic level
|
||||
* - agcMode : 0 - Unchanged
|
||||
* : 1 - Adaptive Analog Automatic Gain Control -3dBOv
|
||||
* : 2 - Adaptive Digital Automatic Gain Control -3dBOv
|
||||
* : 3 - Fixed Digital Gain 0dB
|
||||
* - fs : Sampling frequency
|
||||
*
|
||||
* Return value : 0 - Ok
|
||||
* -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_Init(void *agcInst,
|
||||
WebRtc_Word32 minLevel,
|
||||
WebRtc_Word32 maxLevel,
|
||||
WebRtc_Word16 agcMode,
|
||||
WebRtc_UWord32 fs);
|
||||
|
||||
/*
|
||||
* This function returns a text string containing the version.
|
||||
*
|
||||
* Input:
|
||||
* - length : Length of the char array pointed to by version
|
||||
* Output:
|
||||
* - version : Pointer to a char array of to which the version
|
||||
* : string will be copied.
|
||||
*
|
||||
* Return value : 0 - OK
|
||||
* -1 - Error
|
||||
*/
|
||||
int WebRtcAgc_Version(WebRtc_Word8 *versionStr, WebRtc_Word16 length);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_INTERFACE_GAIN_CONTROL_H_
|
Reference in New Issue
Block a user