Update common_audio

Corresponds to upstream commit 524e9b043e7e86fd72353b987c9d5f6a1ebf83e1

Update notes:

 * Moved src/ to webrtc/ to easily diff against the third_party/webrtc
   in the chromium tree

 * ARM/NEON/MIPS support is not yet hooked up

 * Tests have not been copied
This commit is contained in:
Arun Raghavan
2015-10-13 12:16:16 +05:30
parent 9413986e79
commit c4fb4e38de
230 changed files with 11201 additions and 8656 deletions

View File

@ -0,0 +1,30 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- BEGIN_INCLUDE(manifest) -->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.example.native_activity"
android:versionCode="1"
android:versionName="1.0">
<!-- This is the platform API where NativeActivity was introduced. -->
<uses-sdk android:minSdkVersion="8" />
<!-- This .apk has no Java code itself, so set hasCode to false. -->
<application android:label="@string/app_name" android:hasCode="false" android:debuggable="true">
<!-- Our activity is the built-in NativeActivity framework class.
This will take care of integrating with our NDK code. -->
<activity android:name="android.app.NativeActivity"
android:label="@string/app_name"
android:configChanges="orientation|keyboardHidden">
<!-- Tell NativeActivity the name of or .so -->
<meta-data android:name="android.app.lib_name"
android:value="apmtest-activity" />
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>
<!-- END_INCLUDE(manifest) -->

View File

@ -0,0 +1,11 @@
# This file is automatically generated by Android Tools.
# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
#
# This file must be checked in Version Control Systems.
#
# To customize properties used by the Ant build system use,
# "build.properties", and override values to adapt the script to your
# project structure.
# Project target.
target=android-9

View File

@ -0,0 +1 @@
APP_PLATFORM := android-9

View File

@ -0,0 +1,307 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
//BEGIN_INCLUDE(all)
#include <jni.h>
#include <errno.h>
#include <EGL/egl.h>
#include <GLES/gl.h>
#include <android/sensor.h>
#include <android/log.h>
#include <android_native_app_glue.h>
#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "native-activity", __VA_ARGS__))
#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "native-activity", __VA_ARGS__))
/**
* Our saved state data.
*/
struct saved_state {
float angle;
int32_t x;
int32_t y;
};
/**
* Shared state for our app.
*/
struct engine {
struct android_app* app;
ASensorManager* sensorManager;
const ASensor* accelerometerSensor;
ASensorEventQueue* sensorEventQueue;
int animating;
EGLDisplay display;
EGLSurface surface;
EGLContext context;
int32_t width;
int32_t height;
struct saved_state state;
};
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(struct engine* engine) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_NONE
};
EGLint w, h, dummy, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
eglInitialize(display, 0, 0);
/* Here, the application chooses the configuration it desires. In this
* sample, we have a very simplified selection process, where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display, attribs, &config, 1, &numConfigs);
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
context = eglCreateContext(display, config, NULL, NULL);
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
LOGW("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display, surface, EGL_WIDTH, &w);
eglQuerySurface(display, surface, EGL_HEIGHT, &h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
// Initialize GL state.
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
glEnable(GL_CULL_FACE);
glShadeModel(GL_SMOOTH);
glDisable(GL_DEPTH_TEST);
return 0;
}
/**
* Just the current frame in the display.
*/
static void engine_draw_frame(struct engine* engine) {
if (engine->display == NULL) {
// No display.
return;
}
// Just fill the screen with a color.
glClearColor(((float)engine->state.x)/engine->width, engine->state.angle,
((float)engine->state.y)/engine->height, 1);
glClear(GL_COLOR_BUFFER_BIT);
eglSwapBuffers(engine->display, engine->surface);
}
/**
* Tear down the EGL context currently associated with the display.
*/
static void engine_term_display(struct engine* engine) {
if (engine->display != EGL_NO_DISPLAY) {
eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
if (engine->context != EGL_NO_CONTEXT) {
eglDestroyContext(engine->display, engine->context);
}
if (engine->surface != EGL_NO_SURFACE) {
eglDestroySurface(engine->display, engine->surface);
}
eglTerminate(engine->display);
}
engine->animating = 0;
engine->display = EGL_NO_DISPLAY;
engine->context = EGL_NO_CONTEXT;
engine->surface = EGL_NO_SURFACE;
}
/**
* Process the next input event.
*/
static int32_t engine_handle_input(struct android_app* app, AInputEvent* event) {
struct engine* engine = (struct engine*)app->userData;
if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
engine->animating = 1;
engine->state.x = AMotionEvent_getX(event, 0);
engine->state.y = AMotionEvent_getY(event, 0);
return 1;
}
return 0;
}
/**
* Process the next main command.
*/
static void engine_handle_cmd(struct android_app* app, int32_t cmd) {
struct engine* engine = (struct engine*)app->userData;
switch (cmd) {
case APP_CMD_SAVE_STATE:
// The system has asked us to save our current state. Do so.
engine->app->savedState = malloc(sizeof(struct saved_state));
*((struct saved_state*)engine->app->savedState) = engine->state;
engine->app->savedStateSize = sizeof(struct saved_state);
break;
case APP_CMD_INIT_WINDOW:
// The window is being shown, get it ready.
if (engine->app->window != NULL) {
engine_init_display(engine);
engine_draw_frame(engine);
}
break;
case APP_CMD_TERM_WINDOW:
// The window is being hidden or closed, clean it up.
engine_term_display(engine);
break;
case APP_CMD_GAINED_FOCUS:
// When our app gains focus, we start monitoring the accelerometer.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_enableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
// We'd like to get 60 events per second (in us).
ASensorEventQueue_setEventRate(engine->sensorEventQueue,
engine->accelerometerSensor, (1000L/60)*1000);
}
break;
case APP_CMD_LOST_FOCUS:
// When our app loses focus, we stop monitoring the accelerometer.
// This is to avoid consuming battery while not being used.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_disableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
}
// Also stop animating.
engine->animating = 0;
engine_draw_frame(engine);
break;
}
}
/**
* This is the main entry point of a native application that is using
* android_native_app_glue. It runs in its own thread, with its own
* event loop for receiving input events and doing other things.
*/
void android_main(struct android_app* state) {
struct engine engine;
// Make sure glue isn't stripped.
app_dummy();
memset(&engine, 0, sizeof(engine));
state->userData = &engine;
state->onAppCmd = engine_handle_cmd;
state->onInputEvent = engine_handle_input;
engine.app = state;
// Prepare to monitor accelerometer
engine.sensorManager = ASensorManager_getInstance();
engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
ASENSOR_TYPE_ACCELEROMETER);
engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
state->looper, LOOPER_ID_USER, NULL, NULL);
if (state->savedState != NULL) {
// We are starting with a previous saved state; restore from it.
engine.state = *(struct saved_state*)state->savedState;
}
// loop waiting for stuff to do.
while (1) {
// Read all pending events.
int ident;
int events;
struct android_poll_source* source;
// If not animating, we will block forever waiting for events.
// If animating, we loop until all events are read, then continue
// to draw the next frame of animation.
while ((ident=ALooper_pollAll(engine.animating ? 0 : -1, NULL, &events,
(void**)&source)) >= 0) {
// Process this event.
if (source != NULL) {
source->process(state, source);
}
// If a sensor has data, process it now.
if (ident == LOOPER_ID_USER) {
if (engine.accelerometerSensor != NULL) {
ASensorEvent event;
while (ASensorEventQueue_getEvents(engine.sensorEventQueue,
&event, 1) > 0) {
LOGI("accelerometer: x=%f y=%f z=%f",
event.acceleration.x, event.acceleration.y,
event.acceleration.z);
}
}
}
// Check if we are exiting.
if (state->destroyRequested != 0) {
engine_term_display(&engine);
return;
}
}
if (engine.animating) {
// Done with events; draw next animation frame.
engine.state.angle += .01f;
if (engine.state.angle > 1) {
engine.state.angle = 0;
}
// Drawing is throttled to the screen update rate, so there
// is no need to do timing here.
engine_draw_frame(&engine);
}
}
}
//END_INCLUDE(all)

View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">apmtest</string>
</resources>

View File

@ -0,0 +1,355 @@
function apmtest(task, testname, filepath, casenumber, legacy)
%APMTEST is a tool to process APM file sets and easily display the output.
% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
% 'test' Processes the files to produce test output.
% 'list' Prints a list of cases in the test set, preceded by their
% CASENUMBERs.
% 'show' Uses spclab to show the test case specified by the
% CASENUMBER parameter.
%
% using a set of test files determined by TESTNAME:
% 'all' All tests.
% 'apm' The standard APM test set (default).
% 'apmm' The mobile APM test set.
% 'aec' The AEC test set.
% 'aecm' The AECM test set.
% 'agc' The AGC test set.
% 'ns' The NS test set.
% 'vad' The VAD test set.
%
% FILEPATH specifies the path to the test data files.
%
% CASENUMBER can be used to select a single test case. Omit CASENUMBER,
% or set to zero, to use all test cases.
%
if nargin < 5 || isempty(legacy)
% Set to true to run old VQE recordings.
legacy = false;
end
if nargin < 4 || isempty(casenumber)
casenumber = 0;
end
if nargin < 3 || isempty(filepath)
filepath = 'data/';
end
if nargin < 2 || isempty(testname)
testname = 'all';
end
if nargin < 1 || isempty(task)
task = 'test';
end
if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
error(['TASK ' task ' is not recognized']);
end
if casenumber == 0 && strcmp(task, 'show')
error(['CASENUMBER must be specified for TASK ' task]);
end
inpath = [filepath 'input/'];
outpath = [filepath 'output/'];
refpath = [filepath 'reference/'];
if strcmp(testname, 'all')
tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
else
tests = {testname};
end
if legacy
progname = './test';
else
progname = './process_test';
end
global farFile;
global nearFile;
global eventFile;
global delayFile;
global driftFile;
if legacy
farFile = 'vqeFar.pcm';
nearFile = 'vqeNear.pcm';
eventFile = 'vqeEvent.dat';
delayFile = 'vqeBuf.dat';
driftFile = 'vqeDrift.dat';
else
farFile = 'apm_far.pcm';
nearFile = 'apm_near.pcm';
eventFile = 'apm_event.dat';
delayFile = 'apm_delay.dat';
driftFile = 'apm_drift.dat';
end
simulateMode = false;
nErr = 0;
nCases = 0;
for i=1:length(tests)
simulateMode = false;
if strcmp(tests{i}, 'apm')
testdir = ['apm/'];
outfile = ['out'];
if legacy
opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];
else
opt = ['--no_progress -hpf' ...
' -aec --drift_compensation -agc --fixed_digital' ...
' -ns --ns_moderate -vad'];
end
elseif strcmp(tests{i}, 'apm-swb')
simulateMode = true;
testdir = ['apm-swb/'];
outfile = ['out'];
if legacy
opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];
else
opt = ['--no_progress -fs 32000 -hpf' ...
' -aec --drift_compensation -agc --adaptive_digital' ...
' -ns --ns_moderate -vad'];
end
elseif strcmp(tests{i}, 'apmm')
testdir = ['apmm/'];
outfile = ['out'];
opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...
'--ns_moderate'];
else
error(['TESTNAME ' tests{i} ' is not recognized']);
end
inpathtest = [inpath testdir];
outpathtest = [outpath testdir];
refpathtest = [refpath testdir];
if ~exist(inpathtest,'dir')
error(['Input directory ' inpathtest ' does not exist']);
end
if ~exist(refpathtest,'dir')
warning(['Reference directory ' refpathtest ' does not exist']);
end
[status, errMsg] = mkdir(outpathtest);
if (status == 0)
error(errMsg);
end
[nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...
progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
if strcmp(task, 'test') || strcmp(task, 'show')
system(['rm ' farFile]);
system(['rm ' nearFile]);
if simulateMode == false
system(['rm ' eventFile]);
system(['rm ' delayFile]);
system(['rm ' driftFile]);
end
end
end
if ~strcmp(task, 'list')
if nErr == 0
fprintf(1, '\nAll files are bit-exact to reference\n', nErr);
else
fprintf(1, '\n%d files are NOT bit-exact to reference\n', nErr);
end
end
function [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...
outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...
legacy)
global farFile;
global nearFile;
global eventFile;
global delayFile;
global driftFile;
dirs = dir(inpath);
nDirs = 0;
nErrOut = nErr;
for i=3:length(dirs) % skip . and ..
nDirs = nDirs + dirs(i).isdir;
end
if nDirs == 0
nCases = nCases + 1;
if casenumber == nCases || casenumber == 0
if strcmp(task, 'list')
fprintf([num2str(nCases) '. ' outfile '\n'])
else
vadoutfile = ['vad_' outfile '.dat'];
outfile = [outfile '.pcm'];
% Check for VAD test
vadTest = 0;
if ~isempty(findstr(opt, '-vad'))
vadTest = 1;
if legacy
opt = [opt ' ' outpath vadoutfile];
else
opt = [opt ' --vad_out_file ' outpath vadoutfile];
end
end
if exist([inpath 'vqeFar.pcm'])
system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);
elseif exist([inpath 'apm_far.pcm'])
system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);
end
if exist([inpath 'vqeNear.pcm'])
system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);
elseif exist([inpath 'apm_near.pcm'])
system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);
end
if exist([inpath 'vqeEvent.dat'])
system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
elseif exist([inpath 'apm_event.dat'])
system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
end
if exist([inpath 'vqeBuf.dat'])
system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
elseif exist([inpath 'apm_delay.dat'])
system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
end
if exist([inpath 'vqeSkew.dat'])
system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);
elseif exist([inpath 'vqeDrift.dat'])
system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);
elseif exist([inpath 'apm_drift.dat'])
system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);
end
if simulateMode == false
command = [progname ' -o ' outpath outfile ' ' opt];
else
if legacy
inputCmd = [' -in ' nearFile];
else
inputCmd = [' -i ' nearFile];
end
if exist([farFile])
if legacy
inputCmd = [' -if ' farFile inputCmd];
else
inputCmd = [' -ir ' farFile inputCmd];
end
end
command = [progname inputCmd ' -o ' outpath outfile ' ' opt];
end
% This prevents MATLAB from using its own C libraries.
shellcmd = ['bash -c "unset LD_LIBRARY_PATH;'];
fprintf([command '\n']);
[status, result] = system([shellcmd command '"']);
fprintf(result);
fprintf(['Reference file: ' refpath outfile '\n']);
if vadTest == 1
equal_to_ref = are_files_equal([outpath vadoutfile], ...
[refpath vadoutfile], ...
'int8');
if ~equal_to_ref
nErr = nErr + 1;
end
end
[equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...
[refpath outfile], ...
'int16');
if ~equal_to_ref
nErr = nErr + 1;
end
if strcmp(task, 'show')
% Assume the last init gives the sample rate of interest.
str_idx = strfind(result, 'Sample rate:');
fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));
fprintf('Using %d Hz\n', fs);
if exist([farFile])
spclab(fs, farFile, nearFile, [refpath outfile], ...
[outpath outfile], diffvector);
%spclab(fs, diffvector);
else
spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...
diffvector);
%spclab(fs, diffvector);
end
end
end
end
else
for i=3:length(dirs)
if dirs(i).isdir
[nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...
refpath,[outfile '_' dirs(i).name], progname, opt, ...
simulateMode, nErr, nCases, task, casenumber, legacy);
end
end
end
nErrOut = nErr;
function [are_equal, diffvector] = ...
are_files_equal(newfile, reffile, precision, diffvector)
are_equal = false;
diffvector = 0;
if ~exist(newfile,'file')
warning(['Output file ' newfile ' does not exist']);
return
end
if ~exist(reffile,'file')
warning(['Reference file ' reffile ' does not exist']);
return
end
fid = fopen(newfile,'rb');
new = fread(fid,inf,precision);
fclose(fid);
fid = fopen(reffile,'rb');
ref = fread(fid,inf,precision);
fclose(fid);
if length(new) ~= length(ref)
warning('Reference is not the same length as output');
minlength = min(length(new), length(ref));
new = new(1:minlength);
ref = ref(1:minlength);
end
diffvector = new - ref;
if isequal(new, ref)
fprintf([newfile ' is bit-exact to reference\n']);
are_equal = true;
else
if isempty(new)
warning([newfile ' is empty']);
return
end
snr = snrseg(new,ref,80);
fprintf('\n');
are_equal = false;
end

View File

@ -0,0 +1,948 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <string.h>
#ifdef WEBRTC_ANDROID
#include <sys/stat.h>
#endif
#include "gtest/gtest.h"
#include "audio_processing.h"
#include "cpu_features_wrapper.h"
#include "module_common_types.h"
#include "tick_util.h"
#ifdef WEBRTC_ANDROID
#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
#else
#include "webrtc/audio_processing/debug.pb.h"
#endif
using webrtc::AudioFrame;
using webrtc::AudioProcessing;
using webrtc::EchoCancellation;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::TickInterval;
using webrtc::TickTime;
using webrtc::audioproc::Event;
using webrtc::audioproc::Init;
using webrtc::audioproc::ReverseStream;
using webrtc::audioproc::Stream;
namespace {
// Returns true on success, false on error or end-of-file.
bool ReadMessageFromFile(FILE* file,
::google::protobuf::MessageLite* msg) {
// The "wire format" for the size is little-endian.
// Assume process_test is running on a little-endian machine.
int32_t size;
if (fread(&size, sizeof(int32_t), 1, file) != 1) {
return false;
}
if (size <= 0) {
return false;
}
size_t usize = static_cast<size_t>(size);
char array[usize];
if (fread(array, sizeof(char), usize, file) != usize) {
return false;
}
msg->Clear();
return msg->ParseFromArray(array, usize);
}
void PrintStat(const AudioProcessing::Statistic& stat) {
printf("%d, %d, %d\n", stat.average,
stat.maximum,
stat.minimum);
}
void usage() {
printf(
"Usage: process_test [options] [-pb PROTOBUF_FILE]\n"
" [-ir REVERSE_FILE] [-i PRIMARY_FILE] [-o OUT_FILE]\n");
printf(
"process_test is a test application for AudioProcessing.\n\n"
"When a protobuf debug file is available, specify it with -pb.\n"
"Alternately, when -ir or -i is used, the specified files will be\n"
"processed directly in a simulation mode. Otherwise the full set of\n"
"legacy test files is expected to be present in the working directory.\n");
printf("\n");
printf("Options\n");
printf("General configuration (only used for the simulation mode):\n");
printf(" -fs SAMPLE_RATE_HZ\n");
printf(" -ch CHANNELS_IN CHANNELS_OUT\n");
printf(" -rch REVERSE_CHANNELS\n");
printf("\n");
printf("Component configuration:\n");
printf(
"All components are disabled by default. Each block below begins with a\n"
"flag to enable the component with default settings. The subsequent flags\n"
"in the block are used to provide configuration settings.\n");
printf("\n -aec Echo cancellation\n");
printf(" --drift_compensation\n");
printf(" --no_drift_compensation\n");
printf(" --no_echo_metrics\n");
printf(" --no_delay_logging\n");
printf("\n -aecm Echo control mobile\n");
printf(" --aecm_echo_path_in_file FILE\n");
printf(" --aecm_echo_path_out_file FILE\n");
printf("\n -agc Gain control\n");
printf(" --analog\n");
printf(" --adaptive_digital\n");
printf(" --fixed_digital\n");
printf(" --target_level LEVEL\n");
printf(" --compression_gain GAIN\n");
printf(" --limiter\n");
printf(" --no_limiter\n");
printf("\n -hpf High pass filter\n");
printf("\n -ns Noise suppression\n");
printf(" --ns_low\n");
printf(" --ns_moderate\n");
printf(" --ns_high\n");
printf(" --ns_very_high\n");
printf("\n -vad Voice activity detection\n");
printf(" --vad_out_file FILE\n");
printf("\n");
printf("Modifiers:\n");
printf(" --noasm Disable SSE optimization.\n");
printf(" --perf Measure performance.\n");
printf(" --quiet Suppress text output.\n");
printf(" --no_progress Suppress progress.\n");
printf(" --version Print version information and exit.\n");
}
// void function for gtest.
void void_main(int argc, char* argv[]) {
if (argc > 1 && strcmp(argv[1], "--help") == 0) {
usage();
return;
}
if (argc < 2) {
printf("Did you mean to run without arguments?\n");
printf("Try `process_test --help' for more information.\n\n");
}
AudioProcessing* apm = AudioProcessing::Create(0);
ASSERT_TRUE(apm != NULL);
WebRtc_Word8 version[1024];
WebRtc_UWord32 version_bytes_remaining = sizeof(version);
WebRtc_UWord32 version_position = 0;
const char* pb_filename = NULL;
const char* far_filename = NULL;
const char* near_filename = NULL;
const char* out_filename = NULL;
const char* vad_out_filename = NULL;
const char* aecm_echo_path_in_filename = NULL;
const char* aecm_echo_path_out_filename = NULL;
int32_t sample_rate_hz = 16000;
int32_t device_sample_rate_hz = 16000;
int num_capture_input_channels = 1;
int num_capture_output_channels = 1;
int num_render_channels = 1;
int samples_per_channel = sample_rate_hz / 100;
bool simulating = false;
bool perf_testing = false;
bool verbose = true;
bool progress = true;
//bool interleaved = true;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-pb") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify protobuf filename after -pb";
pb_filename = argv[i];
} else if (strcmp(argv[i], "-ir") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after -ir";
far_filename = argv[i];
simulating = true;
} else if (strcmp(argv[i], "-i") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after -i";
near_filename = argv[i];
simulating = true;
} else if (strcmp(argv[i], "-o") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after -o";
out_filename = argv[i];
} else if (strcmp(argv[i], "-fs") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify sample rate after -fs";
ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
samples_per_channel = sample_rate_hz / 100;
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
} else if (strcmp(argv[i], "-ch") == 0) {
i++;
ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
i++;
ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
ASSERT_EQ(apm->kNoError,
apm->set_num_channels(num_capture_input_channels,
num_capture_output_channels));
} else if (strcmp(argv[i], "-rch") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify number of channels after -rch";
ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
ASSERT_EQ(apm->kNoError,
apm->set_num_reverse_channels(num_render_channels));
} else if (strcmp(argv[i], "-aec") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_metrics(true));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_delay_logging(true));
} else if (strcmp(argv[i], "--drift_compensation") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
// TODO(ajm): this is enabled in the VQE test app by default. Investigate
// why it can give better performance despite passing zeros.
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_drift_compensation(true));
} else if (strcmp(argv[i], "--no_drift_compensation") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_drift_compensation(false));
} else if (strcmp(argv[i], "--no_echo_metrics") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_metrics(false));
} else if (strcmp(argv[i], "--no_delay_logging") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->enable_delay_logging(false));
} else if (strcmp(argv[i], "-aecm") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
} else if (strcmp(argv[i], "--aecm_echo_path_in_file") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_in_file";
aecm_echo_path_in_filename = argv[i];
} else if (strcmp(argv[i], "--aecm_echo_path_out_file") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_out_file";
aecm_echo_path_out_filename = argv[i];
} else if (strcmp(argv[i], "-agc") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
} else if (strcmp(argv[i], "--analog") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
} else if (strcmp(argv[i], "--adaptive_digital") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
} else if (strcmp(argv[i], "--fixed_digital") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_mode(GainControl::kFixedDigital));
} else if (strcmp(argv[i], "--target_level") == 0) {
i++;
int level;
ASSERT_EQ(1, sscanf(argv[i], "%d", &level));
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_target_level_dbfs(level));
} else if (strcmp(argv[i], "--compression_gain") == 0) {
i++;
int gain;
ASSERT_EQ(1, sscanf(argv[i], "%d", &gain));
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_compression_gain_db(gain));
} else if (strcmp(argv[i], "--limiter") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->enable_limiter(true));
} else if (strcmp(argv[i], "--no_limiter") == 0) {
ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->gain_control()->enable_limiter(false));
} else if (strcmp(argv[i], "-hpf") == 0) {
ASSERT_EQ(apm->kNoError, apm->high_pass_filter()->Enable(true));
} else if (strcmp(argv[i], "-ns") == 0) {
ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
} else if (strcmp(argv[i], "--ns_low") == 0) {
ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->noise_suppression()->set_level(NoiseSuppression::kLow));
} else if (strcmp(argv[i], "--ns_moderate") == 0) {
ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->noise_suppression()->set_level(NoiseSuppression::kModerate));
} else if (strcmp(argv[i], "--ns_high") == 0) {
ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->noise_suppression()->set_level(NoiseSuppression::kHigh));
} else if (strcmp(argv[i], "--ns_very_high") == 0) {
ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
ASSERT_EQ(apm->kNoError,
apm->noise_suppression()->set_level(NoiseSuppression::kVeryHigh));
} else if (strcmp(argv[i], "-vad") == 0) {
ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
} else if (strcmp(argv[i], "--vad_out_file") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
vad_out_filename = argv[i];
} else if (strcmp(argv[i], "--noasm") == 0) {
WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
// We need to reinitialize here if components have already been enabled.
ASSERT_EQ(apm->kNoError, apm->Initialize());
} else if (strcmp(argv[i], "--perf") == 0) {
perf_testing = true;
} else if (strcmp(argv[i], "--quiet") == 0) {
verbose = false;
progress = false;
} else if (strcmp(argv[i], "--no_progress") == 0) {
progress = false;
} else if (strcmp(argv[i], "--version") == 0) {
ASSERT_EQ(apm->kNoError, apm->Version(version,
version_bytes_remaining,
version_position));
printf("%s\n", version);
return;
} else if (strcmp(argv[i], "--debug_recording") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --debug_recording";
ASSERT_EQ(apm->kNoError, apm->StartDebugRecording(argv[i]));
} else {
FAIL() << "Unrecognized argument " << argv[i];
}
}
// If we're reading a protobuf file, ensure a simulation hasn't also
// been requested (which makes no sense...)
ASSERT_FALSE(pb_filename && simulating);
if (verbose) {
printf("Sample rate: %d Hz\n", sample_rate_hz);
printf("Primary channels: %d (in), %d (out)\n",
num_capture_input_channels,
num_capture_output_channels);
printf("Reverse channels: %d \n", num_render_channels);
}
const char far_file_default[] = "apm_far.pcm";
const char near_file_default[] = "apm_near.pcm";
const char out_file_default[] = "out.pcm";
const char event_filename[] = "apm_event.dat";
const char delay_filename[] = "apm_delay.dat";
const char drift_filename[] = "apm_drift.dat";
const char vad_file_default[] = "vad_out.dat";
if (!simulating) {
far_filename = far_file_default;
near_filename = near_file_default;
}
if (!out_filename) {
out_filename = out_file_default;
}
if (!vad_out_filename) {
vad_out_filename = vad_file_default;
}
FILE* pb_file = NULL;
FILE* far_file = NULL;
FILE* near_file = NULL;
FILE* out_file = NULL;
FILE* event_file = NULL;
FILE* delay_file = NULL;
FILE* drift_file = NULL;
FILE* vad_out_file = NULL;
FILE* aecm_echo_path_in_file = NULL;
FILE* aecm_echo_path_out_file = NULL;
if (pb_filename) {
pb_file = fopen(pb_filename, "rb");
ASSERT_TRUE(NULL != pb_file) << "Unable to open protobuf file "
<< pb_filename;
} else {
if (far_filename) {
far_file = fopen(far_filename, "rb");
ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
<< far_filename;
}
near_file = fopen(near_filename, "rb");
ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
<< near_filename;
if (!simulating) {
event_file = fopen(event_filename, "rb");
ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
<< event_filename;
delay_file = fopen(delay_filename, "rb");
ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
<< delay_filename;
drift_file = fopen(drift_filename, "rb");
ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
<< drift_filename;
}
}
out_file = fopen(out_filename, "wb");
ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
<< out_filename;
int near_size_samples = 0;
if (pb_file) {
struct stat st;
stat(pb_filename, &st);
// Crude estimate, but should be good enough.
near_size_samples = st.st_size / 3 / sizeof(int16_t);
} else {
struct stat st;
stat(near_filename, &st);
near_size_samples = st.st_size / sizeof(int16_t);
}
if (apm->voice_detection()->is_enabled()) {
vad_out_file = fopen(vad_out_filename, "wb");
ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
<< vad_out_file;
}
if (aecm_echo_path_in_filename != NULL) {
aecm_echo_path_in_file = fopen(aecm_echo_path_in_filename, "rb");
ASSERT_TRUE(NULL != aecm_echo_path_in_file) << "Unable to open file "
<< aecm_echo_path_in_filename;
const size_t path_size =
apm->echo_control_mobile()->echo_path_size_bytes();
unsigned char echo_path[path_size];
ASSERT_EQ(path_size, fread(echo_path,
sizeof(unsigned char),
path_size,
aecm_echo_path_in_file));
EXPECT_EQ(apm->kNoError,
apm->echo_control_mobile()->SetEchoPath(echo_path, path_size));
fclose(aecm_echo_path_in_file);
aecm_echo_path_in_file = NULL;
}
if (aecm_echo_path_out_filename != NULL) {
aecm_echo_path_out_file = fopen(aecm_echo_path_out_filename, "wb");
ASSERT_TRUE(NULL != aecm_echo_path_out_file) << "Unable to open file "
<< aecm_echo_path_out_filename;
}
size_t read_count = 0;
int reverse_count = 0;
int primary_count = 0;
int near_read_samples = 0;
TickInterval acc_ticks;
AudioFrame far_frame;
far_frame._frequencyInHz = sample_rate_hz;
AudioFrame near_frame;
near_frame._frequencyInHz = sample_rate_hz;
int delay_ms = 0;
int drift_samples = 0;
int capture_level = 127;
int8_t stream_has_voice = 0;
TickTime t0 = TickTime::Now();
TickTime t1 = t0;
WebRtc_Word64 max_time_us = 0;
WebRtc_Word64 max_time_reverse_us = 0;
WebRtc_Word64 min_time_us = 1e6;
WebRtc_Word64 min_time_reverse_us = 1e6;
// TODO(ajm): Ideally we would refactor this block into separate functions,
// but for now we want to share the variables.
if (pb_file) {
Event event_msg;
while (ReadMessageFromFile(pb_file, &event_msg)) {
std::ostringstream trace_stream;
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
<< primary_count << " (primary)";
SCOPED_TRACE(trace_stream.str());
if (event_msg.type() == Event::INIT) {
ASSERT_TRUE(event_msg.has_init());
const Init msg = event_msg.init();
ASSERT_TRUE(msg.has_sample_rate());
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(msg.sample_rate()));
ASSERT_TRUE(msg.has_device_sample_rate());
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz(
msg.device_sample_rate()));
ASSERT_TRUE(msg.has_num_input_channels());
ASSERT_TRUE(msg.has_num_output_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_channels(msg.num_input_channels(),
msg.num_output_channels()));
ASSERT_TRUE(msg.has_num_reverse_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_reverse_channels(msg.num_reverse_channels()));
samples_per_channel = msg.sample_rate() / 100;
far_frame._frequencyInHz = msg.sample_rate();
far_frame._payloadDataLengthInSamples =
msg.num_reverse_channels() * samples_per_channel;
near_frame._frequencyInHz = msg.sample_rate();
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
printf(" Sample rate: %d Hz\n", sample_rate_hz);
}
} else if (event_msg.type() == Event::REVERSE_STREAM) {
ASSERT_TRUE(event_msg.has_reverse_stream());
const ReverseStream msg = event_msg.reverse_stream();
reverse_count++;
ASSERT_TRUE(msg.has_data());
ASSERT_EQ(sizeof(int16_t) * far_frame._payloadDataLengthInSamples,
msg.data().size());
memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_reverse_us) {
max_time_reverse_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_reverse_us) {
min_time_reverse_us = tick_diff.Microseconds();
}
}
} else if (event_msg.type() == Event::STREAM) {
ASSERT_TRUE(event_msg.has_stream());
const Stream msg = event_msg.stream();
primary_count++;
near_frame._audioChannel = apm->num_input_channels();
near_frame._payloadDataLengthInSamples =
apm->num_input_channels() * samples_per_channel;
ASSERT_TRUE(msg.has_input_data());
ASSERT_EQ(sizeof(int16_t) * near_frame._payloadDataLengthInSamples,
msg.input_data().size());
memcpy(near_frame._payloadData,
msg.input_data().data(),
msg.input_data().size());
near_read_samples += near_frame._payloadDataLengthInSamples;
if (progress && primary_count % 100 == 0) {
printf("%.0f%% complete\r",
(near_read_samples * 100.0) / near_size_samples);
fflush(stdout);
}
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_stream_analog_level(msg.level()));
ASSERT_EQ(apm->kNoError,
apm->set_stream_delay_ms(msg.delay()));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_stream_drift_samples(msg.drift()));
int err = apm->ProcessStream(&near_frame);
if (err == apm->kBadStreamParameterWarning) {
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
}
ASSERT_TRUE(err == apm->kNoError ||
err == apm->kBadStreamParameterWarning);
capture_level = apm->gain_control()->stream_analog_level();
stream_has_voice =
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
if (vad_out_file != NULL) {
ASSERT_EQ(1u, fwrite(&stream_has_voice,
sizeof(stream_has_voice),
1,
vad_out_file));
}
if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
ASSERT_EQ(msg.level(), capture_level);
}
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_us) {
max_time_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_us) {
min_time_us = tick_diff.Microseconds();
}
}
ASSERT_EQ(near_frame._payloadDataLengthInSamples,
fwrite(near_frame._payloadData,
sizeof(int16_t),
near_frame._payloadDataLengthInSamples,
out_file));
}
}
ASSERT_TRUE(feof(pb_file));
} else {
enum Events {
kInitializeEvent,
kRenderEvent,
kCaptureEvent,
kResetEventDeprecated
};
int16_t event = 0;
while (simulating || feof(event_file) == 0) {
std::ostringstream trace_stream;
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
<< primary_count << " (primary)";
SCOPED_TRACE(trace_stream.str());
if (simulating) {
if (far_file == NULL) {
event = kCaptureEvent;
} else {
if (event == kRenderEvent) {
event = kCaptureEvent;
} else {
event = kRenderEvent;
}
}
} else {
read_count = fread(&event, sizeof(event), 1, event_file);
if (read_count != 1) {
break;
}
}
if (event == kInitializeEvent || event == kResetEventDeprecated) {
ASSERT_EQ(1u,
fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
samples_per_channel = sample_rate_hz / 100;
ASSERT_EQ(1u,
fread(&device_sample_rate_hz,
sizeof(device_sample_rate_hz),
1,
event_file));
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz(
device_sample_rate_hz));
far_frame._frequencyInHz = sample_rate_hz;
near_frame._frequencyInHz = sample_rate_hz;
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
printf(" Sample rate: %d Hz\n", sample_rate_hz);
}
} else if (event == kRenderEvent) {
reverse_count++;
far_frame._audioChannel = num_render_channels;
far_frame._payloadDataLengthInSamples =
num_render_channels * samples_per_channel;
read_count = fread(far_frame._payloadData,
sizeof(WebRtc_Word16),
far_frame._payloadDataLengthInSamples,
far_file);
if (simulating) {
if (read_count != far_frame._payloadDataLengthInSamples) {
// Read an equal amount from the near file to avoid errors due to
// not reaching end-of-file.
EXPECT_EQ(0, fseek(near_file, read_count * sizeof(WebRtc_Word16),
SEEK_CUR));
break; // This is expected.
}
} else {
ASSERT_EQ(read_count,
far_frame._payloadDataLengthInSamples);
}
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_reverse_us) {
max_time_reverse_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_reverse_us) {
min_time_reverse_us = tick_diff.Microseconds();
}
}
} else if (event == kCaptureEvent) {
primary_count++;
near_frame._audioChannel = num_capture_input_channels;
near_frame._payloadDataLengthInSamples =
num_capture_input_channels * samples_per_channel;
read_count = fread(near_frame._payloadData,
sizeof(WebRtc_Word16),
near_frame._payloadDataLengthInSamples,
near_file);
near_read_samples += read_count;
if (progress && primary_count % 100 == 0) {
printf("%.0f%% complete\r",
(near_read_samples * 100.0) / near_size_samples);
fflush(stdout);
}
if (simulating) {
if (read_count != near_frame._payloadDataLengthInSamples) {
break; // This is expected.
}
delay_ms = 0;
drift_samples = 0;
} else {
ASSERT_EQ(read_count,
near_frame._payloadDataLengthInSamples);
// TODO(ajm): sizeof(delay_ms) for current files?
ASSERT_EQ(1u,
fread(&delay_ms, 2, 1, delay_file));
ASSERT_EQ(1u,
fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
}
if (perf_testing) {
t0 = TickTime::Now();
}
// TODO(ajm): fake an analog gain while simulating.
int capture_level_in = capture_level;
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_stream_analog_level(capture_level));
ASSERT_EQ(apm->kNoError,
apm->set_stream_delay_ms(delay_ms));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
int err = apm->ProcessStream(&near_frame);
if (err == apm->kBadStreamParameterWarning) {
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
}
ASSERT_TRUE(err == apm->kNoError ||
err == apm->kBadStreamParameterWarning);
capture_level = apm->gain_control()->stream_analog_level();
stream_has_voice =
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
if (vad_out_file != NULL) {
ASSERT_EQ(1u, fwrite(&stream_has_voice,
sizeof(stream_has_voice),
1,
vad_out_file));
}
if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
ASSERT_EQ(capture_level_in, capture_level);
}
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_us) {
max_time_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_us) {
min_time_us = tick_diff.Microseconds();
}
}
ASSERT_EQ(near_frame._payloadDataLengthInSamples,
fwrite(near_frame._payloadData,
sizeof(WebRtc_Word16),
near_frame._payloadDataLengthInSamples,
out_file));
}
else {
FAIL() << "Event " << event << " is unrecognized";
}
}
}
printf("100%% complete\r");
if (aecm_echo_path_out_file != NULL) {
const size_t path_size =
apm->echo_control_mobile()->echo_path_size_bytes();
unsigned char echo_path[path_size];
apm->echo_control_mobile()->GetEchoPath(echo_path, path_size);
ASSERT_EQ(path_size, fwrite(echo_path,
sizeof(unsigned char),
path_size,
aecm_echo_path_out_file));
fclose(aecm_echo_path_out_file);
aecm_echo_path_out_file = NULL;
}
if (verbose) {
printf("\nProcessed frames: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
if (apm->echo_cancellation()->are_metrics_enabled()) {
EchoCancellation::Metrics metrics;
apm->echo_cancellation()->GetMetrics(&metrics);
printf("\n--Echo metrics--\n");
printf("(avg, max, min)\n");
printf("ERL: ");
PrintStat(metrics.echo_return_loss);
printf("ERLE: ");
PrintStat(metrics.echo_return_loss_enhancement);
printf("ANLP: ");
PrintStat(metrics.a_nlp);
}
if (apm->echo_cancellation()->is_delay_logging_enabled()) {
int median = 0;
int std = 0;
apm->echo_cancellation()->GetDelayMetrics(&median, &std);
printf("\n--Delay metrics--\n");
printf("Median: %3d\n", median);
printf("Standard deviation: %3d\n", std);
}
}
if (!pb_file) {
int8_t temp_int8;
if (far_file) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
}
read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
if (!simulating) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
}
}
if (perf_testing) {
if (primary_count > 0) {
WebRtc_Word64 exec_time = acc_ticks.Milliseconds();
printf("\nTotal time: %.3f s, file time: %.2f s\n",
exec_time * 0.001, primary_count * 0.01);
printf("Time per frame: %.3f ms (average), %.3f ms (max),"
" %.3f ms (min)\n",
(exec_time * 1.0) / primary_count,
(max_time_us + max_time_reverse_us) / 1000.0,
(min_time_us + min_time_reverse_us) / 1000.0);
} else {
printf("Warning: no capture frames\n");
}
}
AudioProcessing::Destroy(apm);
apm = NULL;
}
} // namespace
int main(int argc, char* argv[])
{
void_main(argc, argv);
// Optional, but removes memory leak noise from Valgrind.
google::protobuf::ShutdownProtobufLibrary();
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,50 @@
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;
message Test {
optional int32 num_reverse_channels = 1;
optional int32 num_input_channels = 2;
optional int32 num_output_channels = 3;
optional int32 sample_rate = 4;
message Frame {
}
repeated Frame frame = 5;
optional int32 analog_level_average = 6;
optional int32 max_output_average = 7;
optional int32 has_echo_count = 8;
optional int32 has_voice_count = 9;
optional int32 is_saturated_count = 10;
message Statistic {
optional int32 instant = 1;
optional int32 average = 2;
optional int32 maximum = 3;
optional int32 minimum = 4;
}
message EchoMetrics {
optional Statistic residual_echo_return_loss = 1;
optional Statistic echo_return_loss = 2;
optional Statistic echo_return_loss_enhancement = 3;
optional Statistic a_nlp = 4;
}
optional EchoMetrics echo_metrics = 11;
message DelayMetrics {
optional int32 median = 1;
optional int32 std = 2;
}
optional DelayMetrics delay_metrics = 12;
}
message OutputData {
repeated Test test = 1;
}