Update audio_processing module

Corresponds to upstream commit 524e9b043e7e86fd72353b987c9d5f6a1ebf83e1

Update notes:

 * Pull in third party license file

 * Replace .gypi files with BUILD.gn to keep track of what changes
   upstream

 * Bunch of new filse pulled in as dependencies

 * Won't build yet due to changes needed on top of these
This commit is contained in:
Arun Raghavan 2015-10-13 17:25:22 +05:30
parent 5ae7a5d6cd
commit 753eada3aa
324 changed files with 52533 additions and 16117 deletions

View File

@ -24,8 +24,8 @@ AS_CASE(["x${with_ns_mode}"],
[NS_FIXED=0]) [NS_FIXED=0])
AM_CONDITIONAL(NS_FIXED, [test "x${NS_FIXED}" = "x1"]) AM_CONDITIONAL(NS_FIXED, [test "x${NS_FIXED}" = "x1"])
COMMON_CFLAGS="-DNDEBUG -I\$(top_srcdir)" COMMON_CFLAGS="-DWEBRTC_POSIX -DWEBRTC_LINUX -DNDEBUG -I\$(top_srcdir)"
COMMON_CXXFLAGS="-std=c++11 -DNDEBUG -I\$(top_srcdir)" COMMON_CXXFLAGS="-std=c++11 -DWEBRTC_POSIX -DWEBRTC_LINUX -DNDEBUG -I\$(top_srcdir)"
AC_SUBST([COMMON_CFLAGS]) AC_SUBST([COMMON_CFLAGS])
AC_SUBST([COMMON_CXXFLAGS]) AC_SUBST([COMMON_CXXFLAGS])
@ -33,15 +33,12 @@ AC_CONFIG_FILES([
webrtc-audio-processing.pc webrtc-audio-processing.pc
Makefile Makefile
webrtc/Makefile webrtc/Makefile
webrtc/base/Makefile
webrtc/common_audio/Makefile webrtc/common_audio/Makefile
webrtc/system_wrappers/Makefile webrtc/system_wrappers/Makefile
webrtc/modules/Makefile webrtc/modules/Makefile
webrtc/modules/audio_coding/Makefile
webrtc/modules/audio_processing/Makefile webrtc/modules/audio_processing/Makefile
webrtc/modules/audio_processing/utility/Makefile
webrtc/modules/audio_processing/ns/Makefile
webrtc/modules/audio_processing/aec/Makefile
webrtc/modules/audio_processing/aecm/Makefile
webrtc/modules/audio_processing/agc/Makefile
]) ])
AC_OUTPUT AC_OUTPUT

281
webrtc/BUILD.gn Normal file
View File

@ -0,0 +1,281 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# TODO(kjellander): Rebase this to webrtc/build/common.gypi changes after r6330.
import("//build/config/crypto.gni")
import("//build/config/linux/pkg_config.gni")
import("build/webrtc.gni")
import("//third_party/protobuf/proto_library.gni")
# Contains the defines and includes in common.gypi that are duplicated both as
# target_defaults and direct_dependent_settings.
config("common_inherited_config") {
defines = []
if (build_with_mozilla) {
defines += [ "WEBRTC_MOZILLA_BUILD" ]
}
if (build_with_chromium) {
defines = [ "WEBRTC_CHROMIUM_BUILD" ]
include_dirs = [
# The overrides must be included first as that is the mechanism for
# selecting the override headers in Chromium.
"../webrtc_overrides",
# Allow includes to be prefixed with webrtc/ in case it is not an
# immediate subdirectory of the top-level.
"..",
]
}
if (is_posix) {
defines += [ "WEBRTC_POSIX" ]
}
if (is_ios) {
defines += [
"WEBRTC_MAC",
"WEBRTC_IOS",
]
}
if (is_ios && rtc_use_objc_h264) {
defines += [ "WEBRTC_OBJC_H264" ]
}
if (is_linux) {
defines += [ "WEBRTC_LINUX" ]
}
if (is_mac) {
defines += [ "WEBRTC_MAC" ]
}
if (is_win) {
defines += [ "WEBRTC_WIN" ]
}
if (is_android) {
defines += [
"WEBRTC_LINUX",
"WEBRTC_ANDROID",
]
}
}
if (rtc_have_dbus_glib) {
pkg_config("dbus-glib") {
packages = [ "dbus-glib-1" ]
}
}
config("common_config") {
cflags = []
cflags_cc = []
if (rtc_restrict_logging) {
defines = [ "WEBRTC_RESTRICT_LOGGING" ]
}
if (rtc_have_dbus_glib) {
defines += [ "HAVE_DBUS_GLIB" ]
# TODO(kjellander): Investigate this, it seems like include <dbus/dbus.h>
# is still not found even if the execution of
# build/config/linux/pkg-config.py dbus-glib-1 returns correct include
# dirs on Linux.
all_dependent_configs = [ "dbus-glib" ]
}
if (build_with_chromium) {
defines += [ "LOGGING_INSIDE_WEBRTC" ]
} else {
if (is_posix) {
# -Wextra is currently disabled in Chromium"s common.gypi. Enable
# for targets that can handle it. For Android/arm64 right now
# there will be an "enumeral and non-enumeral type in conditional
# expression" warning in android_tools/ndk_experimental"s version
# of stlport.
# See: https://code.google.com/p/chromium/issues/detail?id=379699
if (current_cpu != "arm64" || !is_android) {
cflags = [
"-Wextra",
# We need to repeat some flags from Chromium"s common.gypi
# here that get overridden by -Wextra.
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-Wno-strict-overflow",
]
cflags_cc = [
"-Wnon-virtual-dtor",
# This is enabled for clang; enable for gcc as well.
"-Woverloaded-virtual",
]
}
}
if (is_clang) {
cflags += [ "-Wthread-safety" ]
}
}
if (current_cpu == "arm64") {
defines += [ "WEBRTC_ARCH_ARM64" ]
defines += [ "WEBRTC_HAS_NEON" ]
}
if (current_cpu == "arm") {
defines += [ "WEBRTC_ARCH_ARM" ]
if (arm_version >= 7) {
defines += [ "WEBRTC_ARCH_ARM_V7" ]
if (arm_use_neon) {
defines += [ "WEBRTC_HAS_NEON" ]
} else if (arm_optionally_use_neon) {
defines += [ "WEBRTC_DETECT_NEON" ]
}
}
}
if (current_cpu == "mipsel") {
defines += [ "MIPS32_LE" ]
if (mips_float_abi == "hard") {
defines += [ "MIPS_FPU_LE" ]
}
if (mips_arch_variant == "r2") {
defines += [ "MIPS32_R2_LE" ]
}
if (mips_dsp_rev == 1) {
defines += [ "MIPS_DSP_R1_LE" ]
} else if (mips_dsp_rev == 2) {
defines += [
"MIPS_DSP_R1_LE",
"MIPS_DSP_R2_LE",
]
}
}
if (is_android && !is_clang) {
# The Android NDK doesn"t provide optimized versions of these
# functions. Ensure they are disabled for all compilers.
cflags += [
"-fno-builtin-cos",
"-fno-builtin-sin",
"-fno-builtin-cosf",
"-fno-builtin-sinf",
]
}
}
source_set("webrtc") {
sources = [
"call.h",
"config.h",
"frame_callback.h",
"transport.h",
]
defines = []
configs += [ ":common_config" ]
public_configs = [ ":common_inherited_config" ]
deps = [
"audio",
":webrtc_common",
"base:rtc_base",
"call",
"common_audio",
"common_video",
"modules/audio_coding",
"modules/audio_conference_mixer",
"modules/audio_device",
"modules/audio_processing",
"modules/bitrate_controller",
"modules/desktop_capture",
"modules/media_file",
"modules/rtp_rtcp",
"modules/utility",
"modules/video_coding",
"modules/video_processing",
"system_wrappers",
"tools",
"video",
"voice_engine",
]
if (build_with_chromium) {
deps += [
"modules/video_capture",
"modules/video_render",
]
}
if (rtc_enable_protobuf) {
defines += [ "ENABLE_RTC_EVENT_LOG" ]
deps += [ ":rtc_event_log_proto" ]
}
}
if (!build_with_chromium) {
executable("webrtc_tests") {
testonly = true
deps = [
":webrtc",
"modules/video_render:video_render_internal_impl",
"modules/video_capture:video_capture_internal_impl",
"test",
]
}
}
source_set("webrtc_common") {
sources = [
"common_types.cc",
"common_types.h",
"config.cc",
"config.h",
"engine_configurations.h",
"typedefs.h",
]
configs += [ ":common_config" ]
public_configs = [ ":common_inherited_config" ]
}
source_set("gtest_prod") {
sources = [
"test/testsupport/gtest_prod_util.h",
]
}
if (rtc_enable_protobuf) {
proto_library("rtc_event_log_proto") {
sources = [
"call/rtc_event_log.proto",
]
proto_out_dir = "webrtc/call"
}
}
source_set("rtc_event_log") {
sources = [
"call/rtc_event_log.cc",
"call/rtc_event_log.h",
]
defines = []
configs += [ ":common_config" ]
public_configs = [ ":common_inherited_config" ]
deps = [
":webrtc_common",
]
if (rtc_enable_protobuf) {
defines += [ "ENABLE_RTC_EVENT_LOG" ]
deps += [ ":rtc_event_log_proto" ]
}
if (is_clang && !is_nacl) {
# Suppress warnings from Chrome's Clang plugins.
# See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
configs -= [ "//build/config/clang:find_bad_constructs" ]
}
}

419
webrtc/LICENSE_THIRD_PARTY Normal file
View File

@ -0,0 +1,419 @@
This source tree contains third party source code which is governed by third
party licenses. Paths to the files and associated licenses are collected here.
Files governed by third party licenses:
common_audio/fft4g.c
common_audio/signal_processing/spl_sqrt_floor.c
common_audio/signal_processing/spl_sqrt_floor_arm.S
modules/audio_coding/codecs/g711/main/source/g711.c
modules/audio_coding/codecs/g711/main/source/g711.h
modules/audio_coding/codecs/g722/main/source/g722_decode.c
modules/audio_coding/codecs/g722/main/source/g722_enc_dec.h
modules/audio_coding/codecs/g722/main/source/g722_encode.c
modules/audio_coding/codecs/isac/main/source/fft.c
modules/audio_device/mac/portaudio/pa_memorybarrier.h
modules/audio_device/mac/portaudio/pa_ringbuffer.c
modules/audio_device/mac/portaudio/pa_ringbuffer.h
modules/audio_processing/aec/aec_rdft.c
system_wrappers/source/condition_variable_event_win.cc
system_wrappers/source/set_thread_name_win.h
system_wrappers/source/spreadsortlib/constants.hpp
system_wrappers/source/spreadsortlib/spreadsort.hpp
Individual licenses for each file:
-------------------------------------------------------------------------------
Files:
common_audio/signal_processing/spl_sqrt_floor.c
common_audio/signal_processing/spl_sqrt_floor_arm.S
License:
/*
* Written by Wilco Dijkstra, 1996. The following email exchange establishes the
* license.
*
* From: Wilco Dijkstra <Wilco.Dijkstra@ntlworld.com>
* Date: Fri, Jun 24, 2011 at 3:20 AM
* Subject: Re: sqrt routine
* To: Kevin Ma <kma@google.com>
* Hi Kevin,
* Thanks for asking. Those routines are public domain (originally posted to
* comp.sys.arm a long time ago), so you can use them freely for any purpose.
* Cheers,
* Wilco
*
* ----- Original Message -----
* From: "Kevin Ma" <kma@google.com>
* To: <Wilco.Dijkstra@ntlworld.com>
* Sent: Thursday, June 23, 2011 11:44 PM
* Subject: Fwd: sqrt routine
* Hi Wilco,
* I saw your sqrt routine from several web sites, including
* http://www.finesse.demon.co.uk/steven/sqrt.html.
* Just wonder if there's any copyright information with your Successive
* approximation routines, or if I can freely use it for any purpose.
* Thanks.
* Kevin
*/
-------------------------------------------------------------------------------
Files:
modules/audio_coding/codecs/g711/main/source/g711.c
modules/audio_coding/codecs/g711/main/source/g711.h
License:
/*
* SpanDSP - a series of DSP components for telephony
*
* g711.h - In line A-law and u-law conversion routines
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2001 Steve Underwood
*
* Despite my general liking of the GPL, I place this code in the
* public domain for the benefit of all mankind - even the slimy
* ones who might try to proprietize my work and use it to my
* detriment.
*/
-------------------------------------------------------------------------------
Files:
modules/audio_coding/codecs/g722/main/source/g722_decode.c
modules/audio_coding/codecs/g722/main/source/g722_enc_dec.h
modules/audio_coding/codecs/g722/main/source/g722_encode.c
License:
/*
* SpanDSP - a series of DSP components for telephony
*
* g722_decode.c - The ITU G.722 codec, decode part.
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2005 Steve Underwood
*
* Despite my general liking of the GPL, I place my own contributions
* to this code in the public domain for the benefit of all mankind -
* even the slimy ones who might try to proprietize my work and use it
* to my detriment.
*
* Based in part on a single channel G.722 codec which is:
*
* Copyright (c) CMU 1993
* Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
*/
-------------------------------------------------------------------------------
Files:
modules/audio_coding/codecs/isac/main/source/fft.c
License:
/*
* Copyright(c)1995,97 Mark Olesen <olesen@me.QueensU.CA>
* Queen's Univ at Kingston (Canada)
*
* Permission to use, copy, modify, and distribute this software for
* any purpose without fee is hereby granted, provided that this
* entire notice is included in all copies of any software which is
* or includes a copy or modification of this software and in all
* copies of the supporting documentation for such software.
*
* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR QUEEN'S
* UNIVERSITY AT KINGSTON MAKES ANY REPRESENTATION OR WARRANTY OF ANY
* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
* FITNESS FOR ANY PARTICULAR PURPOSE.
*
* All of which is to say that you can do what you like with this
* source code provided you don't try to sell it as your own and you
* include an unaltered copy of this message (including the
* copyright).
*
* It is also implicitly understood that bug fixes and improvements
* should make their way back to the general Internet community so
* that everyone benefits.
*/
-------------------------------------------------------------------------------
Files:
modules/audio_device/mac/portaudio/pa_memorybarrier.h
modules/audio_device/mac/portaudio/pa_ringbuffer.c
modules/audio_device/mac/portaudio/pa_ringbuffer.h
License:
/*
* $Id: pa_memorybarrier.h 1240 2007-07-17 13:05:07Z bjornroche $
* Portable Audio I/O Library
* Memory barrier utilities
*
* Author: Bjorn Roche, XO Audio, LLC
*
* This program uses the PortAudio Portable Audio Library.
* For more information see: http://www.portaudio.com
* Copyright (c) 1999-2000 Ross Bencina and Phil Burk
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* The text above constitutes the entire PortAudio license; however,
* the PortAudio community also makes the following non-binding requests:
*
* Any person wishing to distribute modifications to the Software is
* requested to send the modifications to the original developer so that
* they can be incorporated into the canonical version. It is also
* requested that these non-binding requests be included along with the
* license above.
*/
/*
* $Id: pa_ringbuffer.c 1421 2009-11-18 16:09:05Z bjornroche $
* Portable Audio I/O Library
* Ring Buffer utility.
*
* Author: Phil Burk, http://www.softsynth.com
* modified for SMP safety on Mac OS X by Bjorn Roche
* modified for SMP safety on Linux by Leland Lucius
* also, allowed for const where possible
* modified for multiple-byte-sized data elements by Sven Fischer
*
* Note that this is safe only for a single-thread reader and a
* single-thread writer.
*
* This program uses the PortAudio Portable Audio Library.
* For more information see: http://www.portaudio.com
* Copyright (c) 1999-2000 Ross Bencina and Phil Burk
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* The text above constitutes the entire PortAudio license; however,
* the PortAudio community also makes the following non-binding requests:
*
* Any person wishing to distribute modifications to the Software is
* requested to send the modifications to the original developer so that
* they can be incorporated into the canonical version. It is also
* requested that these non-binding requests be included along with the
* license above.
*/
-------------------------------------------------------------------------------
Files:
common_audio/fft4g.c
modules/audio_processing/aec/aec_rdft.c
License:
/*
* http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html
* Copyright Takuya OOURA, 1996-2001
*
* You may use, copy, modify and distribute this code for any purpose (include
* commercial use) and without fee. Please refer to this package when you modify
* this code.
*/
-------------------------------------------------------------------------------
Files:
system_wrappers/source/condition_variable_event_win.cc
Source:
http://www1.cse.wustl.edu/~schmidt/ACE-copying.html
License:
Copyright and Licensing Information for ACE(TM), TAO(TM), CIAO(TM), DAnCE(TM),
and CoSMIC(TM)
ACE(TM), TAO(TM), CIAO(TM), DAnCE>(TM), and CoSMIC(TM) (henceforth referred to
as "DOC software") are copyrighted by Douglas C. Schmidt and his research
group at Washington University, University of California, Irvine, and
Vanderbilt University, Copyright (c) 1993-2009, all rights reserved. Since DOC
software is open-source, freely available software, you are free to use,
modify, copy, and distribute--perpetually and irrevocably--the DOC software
source code and object code produced from the source, as well as copy and
distribute modified versions of this software. You must, however, include this
copyright statement along with any code built using DOC software that you
release. No copyright statement needs to be provided if you just ship binary
executables of your software products.
You can use DOC software in commercial and/or binary software releases and are
under no obligation to redistribute any of your source code that is built
using DOC software. Note, however, that you may not misappropriate the DOC
software code, such as copyrighting it yourself or claiming authorship of the
DOC software code, in a way that will prevent DOC software from being
distributed freely using an open-source development model. You needn't inform
anyone that you're using DOC software in your software, though we encourage
you to let us know so we can promote your project in the DOC software success
stories.
The ACE, TAO, CIAO, DAnCE, and CoSMIC web sites are maintained by the DOC
Group at the Institute for Software Integrated Systems (ISIS) and the Center
for Distributed Object Computing of Washington University, St. Louis for the
development of open-source software as part of the open-source software
community. Submissions are provided by the submitter ``as is'' with no
warranties whatsoever, including any warranty of merchantability,
noninfringement of third party intellectual property, or fitness for any
particular purpose. In no event shall the submitter be liable for any direct,
indirect, special, exemplary, punitive, or consequential damages, including
without limitation, lost profits, even if advised of the possibility of such
damages. Likewise, DOC software is provided as is with no warranties of any
kind, including the warranties of design, merchantability, and fitness for a
particular purpose, noninfringement, or arising from a course of dealing,
usage or trade practice. Washington University, UC Irvine, Vanderbilt
University, their employees, and students shall have no liability with respect
to the infringement of copyrights, trade secrets or any patents by DOC
software or any part thereof. Moreover, in no event will Washington
University, UC Irvine, or Vanderbilt University, their employees, or students
be liable for any lost revenue or profits or other special, indirect and
consequential damages.
DOC software is provided with no support and without any obligation on the
part of Washington University, UC Irvine, Vanderbilt University, their
employees, or students to assist in its use, correction, modification, or
enhancement. A number of companies around the world provide commercial support
for DOC software, however. DOC software is Y2K-compliant, as long as the
underlying OS platform is Y2K-compliant. Likewise, DOC software is compliant
with the new US daylight savings rule passed by Congress as "The Energy Policy
Act of 2005," which established new daylight savings times (DST) rules for the
United States that expand DST as of March 2007. Since DOC software obtains
time/date and calendaring information from operating systems users will not be
affected by the new DST rules as long as they upgrade their operating systems
accordingly.
The names ACE(TM), TAO(TM), CIAO(TM), DAnCE(TM), CoSMIC(TM), Washington
University, UC Irvine, and Vanderbilt University, may not be used to endorse
or promote products or services derived from this source without express
written permission from Washington University, UC Irvine, or Vanderbilt
University. This license grants no permission to call products or services
derived from this source ACE(TM), TAO(TM), CIAO(TM), DAnCE(TM), or CoSMIC(TM),
nor does it grant permission for the name Washington University, UC Irvine, or
Vanderbilt University to appear in their names.
-------------------------------------------------------------------------------
Files:
system_wrappers/source/set_thread_name_win.h
Source:
http://msdn.microsoft.com/en-us/cc300389.aspx#P
License:
This license governs use of code marked as “sample” or “example” available on
this web site without a license agreement, as provided under the section above
titled “NOTICE SPECIFIC TO SOFTWARE AVAILABLE ON THIS WEB SITE.” If you use
such code (the “software”), you accept this license. If you do not accept the
license, do not use the software.
1. Definitions
The terms “reproduce,” “reproduction,” “derivative works,” and “distribution”
have the same meaning here as under U.S. copyright law.
A “contribution” is the original software, or any additions or changes to the
software.
A “contributor” is any person that distributes its contribution under this
license.
“Licensed patents” are a contributors patent claims that read directly on its
contribution.
2. Grant of Rights
(A) Copyright Grant - Subject to the terms of this license, including the
license conditions and limitations in section 3, each contributor grants you a
non-exclusive, worldwide, royalty-free copyright license to reproduce its
contribution, prepare derivative works of its contribution, and distribute its
contribution or any derivative works that you create.
(B) Patent Grant - Subject to the terms of this license, including the license
conditions and limitations in section 3, each contributor grants you a
non-exclusive, worldwide, royalty-free license under its licensed patents to
make, have made, use, sell, offer for sale, import, and/or otherwise dispose
of its contribution in the software or derivative works of the contribution in
the software.
3. Conditions and Limitations
(A) No Trademark License- This license does not grant you rights to use any
contributors name, logo, or trademarks.
(B) If you bring a patent claim against any contributor over patents that you
claim are infringed by the software, your patent license from such contributor
to the software ends automatically.
(C) If you distribute any portion of the software, you must retain all
copyright, patent, trademark, and attribution notices that are present in the
software.
(D) If you distribute any portion of the software in source code form, you may
do so only under this license by including a complete copy of this license
with your distribution. If you distribute any portion of the software in
compiled or object code form, you may only do so under a license that complies
with this license.
(E) The software is licensed “as-is.” You bear the risk of using it. The
contributors give no express warranties, guarantees or conditions. You may
have additional consumer rights under your local laws which this license
cannot change. To the extent permitted under your local laws, the contributors
exclude the implied warranties of merchantability, fitness for a particular
purpose and non-infringement.
(F) Platform Limitation - The licenses granted in sections 2(A) and 2(B)
extend only to the software or derivative works that you create that run on a
Microsoft Windows operating system product.
-------------------------------------------------------------------------------
Files:
system_wrappers/source/spreadsortlib/constants.hpp
system_wrappers/source/spreadsortlib/spreadsort.hpp
License:
/*Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.*/

View File

@ -1 +1,3 @@
SUBDIRS = common_audio system_wrappers modules SUBDIRS = base common_audio system_wrappers modules
noinst_HEADERS = common.h

592
webrtc/base/BUILD.gn Normal file
View File

@ -0,0 +1,592 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//build/config/crypto.gni")
import("//build/config/ui.gni")
import("../build/webrtc.gni")
config("rtc_base_config") {
include_dirs = [
"//third_party/jsoncpp/overrides/include",
"//third_party/jsoncpp/source/include",
]
defines = [
"FEATURE_ENABLE_SSL",
"LOGGING=1",
]
if (is_posix) {
# TODO(henrike): issue 3307, make rtc_base build without disabling
# these flags.
cflags_cc = [ "-Wno-non-virtual-dtor" ]
}
}
config("rtc_base_chromium_config") {
defines = [ "NO_MAIN_THREAD_WRAPPING" ]
}
config("openssl_config") {
defines = [
"SSL_USE_OPENSSL",
"HAVE_OPENSSL_SSL_H",
]
}
config("ios_config") {
libs = [
"CFNetwork.framework",
#"Foundation.framework", # Already included in //build/config:default_libs.
"Security.framework",
"SystemConfiguration.framework",
#"UIKit.framework", # Already included in //build/config:default_libs.
]
}
config("mac_config") {
libs = [
"Cocoa.framework",
#"Foundation.framework", # Already included in //build/config:default_libs.
#"IOKit.framework", # Already included in //build/config:default_libs.
#"Security.framework", # Already included in //build/config:default_libs.
"SystemConfiguration.framework",
]
}
config("mac_x86_config") {
libs = [
#"Carbon.framework", # Already included in //build/config:default_libs.
]
}
if (is_linux && !build_with_chromium) {
# Provides the same functionality as the //crypto:platform target, which
# WebRTC cannot use as we don't sync src/crypto from Chromium.
group("linux_system_ssl") {
if (use_openssl) {
deps = [
"//third_party/boringssl",
]
}
}
}
if (rtc_build_ssl == 0) {
config("external_ssl_library") {
assert(rtc_ssl_root != "",
"You must specify rtc_ssl_root when rtc_build_ssl==0.")
include_dirs = [ rtc_ssl_root ]
}
}
# The subset of rtc_base approved for use outside of libjingle.
static_library("rtc_base_approved") {
configs += [ "..:common_config" ]
public_configs = [ "..:common_inherited_config" ]
sources = [
"atomicops.h",
"bitbuffer.cc",
"bitbuffer.h",
"buffer.cc",
"buffer.h",
"bufferqueue.cc",
"bufferqueue.h",
"bytebuffer.cc",
"bytebuffer.h",
"byteorder.h",
"checks.cc",
"checks.h",
"criticalsection.cc",
"criticalsection.h",
"event.cc",
"event.h",
"event_tracer.cc",
"event_tracer.h",
"exp_filter.cc",
"exp_filter.h",
"md5.cc",
"md5.h",
"md5digest.cc",
"md5digest.h",
"platform_file.cc",
"platform_file.h",
"platform_thread.cc",
"platform_thread.h",
"safe_conversions.h",
"safe_conversions_impl.h",
"scoped_ptr.h",
"stringencode.cc",
"stringencode.h",
"stringutils.cc",
"stringutils.h",
"systeminfo.cc",
"systeminfo.h",
"template_util.h",
"thread_annotations.h",
"thread_checker.h",
"thread_checker_impl.cc",
"thread_checker_impl.h",
"timeutils.cc",
"timeutils.h",
"trace_event.h",
]
if (!build_with_chromium) {
sources += [
"basictypes.h",
"constructormagic.h",
"logging.cc",
"logging.h",
]
}
}
static_library("rtc_base") {
cflags = []
cflags_cc = []
libs = []
deps = [
":rtc_base_approved",
]
configs += [
"..:common_config",
":rtc_base_config",
]
public_configs = [
"..:common_inherited_config",
":rtc_base_config",
]
defines = [ "LOGGING=1" ]
sources = [
"arraysize.h",
"asyncfile.cc",
"asyncfile.h",
"asyncinvoker-inl.h",
"asyncinvoker.cc",
"asyncinvoker.h",
"asyncpacketsocket.cc",
"asyncpacketsocket.h",
"asyncresolverinterface.cc",
"asyncresolverinterface.h",
"asyncsocket.cc",
"asyncsocket.h",
"asynctcpsocket.cc",
"asynctcpsocket.h",
"asyncudpsocket.cc",
"asyncudpsocket.h",
"autodetectproxy.cc",
"autodetectproxy.h",
"base64.cc",
"base64.h",
"basicdefs.h",
"common.cc",
"common.h",
"crc32.cc",
"crc32.h",
"cryptstring.cc",
"cryptstring.h",
"diskcache.cc",
"diskcache.h",
"filerotatingstream.cc",
"filerotatingstream.h",
"fileutils.cc",
"fileutils.h",
"firewallsocketserver.cc",
"firewallsocketserver.h",
"flags.cc",
"flags.h",
"format_macros.h",
"gunit_prod.h",
"helpers.cc",
"helpers.h",
"httpbase.cc",
"httpbase.h",
"httpclient.cc",
"httpclient.h",
"httpcommon-inl.h",
"httpcommon.cc",
"httpcommon.h",
"httprequest.cc",
"httprequest.h",
"iosfilesystem.mm",
"ipaddress.cc",
"ipaddress.h",
"linked_ptr.h",
"mathutils.h",
"messagedigest.cc",
"messagedigest.h",
"messagehandler.cc",
"messagehandler.h",
"messagequeue.cc",
"messagequeue.h",
"nethelpers.cc",
"nethelpers.h",
"network.cc",
"network.h",
"nullsocketserver.h",
"pathutils.cc",
"pathutils.h",
"physicalsocketserver.cc",
"physicalsocketserver.h",
"proxydetect.cc",
"proxydetect.h",
"proxyinfo.cc",
"proxyinfo.h",
"ratelimiter.cc",
"ratelimiter.h",
"ratetracker.cc",
"ratetracker.h",
"rtccertificate.cc",
"rtccertificate.h",
"scoped_autorelease_pool.h",
"scoped_autorelease_pool.mm",
"sha1.cc",
"sha1.h",
"sha1digest.cc",
"sha1digest.h",
"signalthread.cc",
"signalthread.h",
"sigslot.cc",
"sigslot.h",
"sigslotrepeater.h",
"socket.h",
"socketadapters.cc",
"socketadapters.h",
"socketaddress.cc",
"socketaddress.h",
"socketaddresspair.cc",
"socketaddresspair.h",
"socketfactory.h",
"socketpool.cc",
"socketpool.h",
"socketserver.h",
"socketstream.cc",
"socketstream.h",
"ssladapter.cc",
"ssladapter.h",
"sslfingerprint.cc",
"sslfingerprint.h",
"sslidentity.cc",
"sslidentity.h",
"sslsocketfactory.cc",
"sslsocketfactory.h",
"sslstreamadapter.cc",
"sslstreamadapter.h",
"sslstreamadapterhelper.cc",
"sslstreamadapterhelper.h",
"stream.cc",
"stream.h",
"task.cc",
"task.h",
"taskparent.cc",
"taskparent.h",
"taskrunner.cc",
"taskrunner.h",
"thread.cc",
"thread.h",
"timing.cc",
"timing.h",
"urlencode.cc",
"urlencode.h",
"worker.cc",
"worker.h",
]
if (is_posix) {
sources += [
"unixfilesystem.cc",
"unixfilesystem.h",
]
}
if (build_with_chromium) {
sources += [
"../../webrtc_overrides/webrtc/base/logging.cc",
"../../webrtc_overrides/webrtc/base/logging.h",
]
deps += [ "..:webrtc_common" ]
if (is_win) {
sources += [ "../../webrtc_overrides/webrtc/base/win32socketinit.cc" ]
}
include_dirs = [
"../../webrtc_overrides",
"../../boringssl/src/include",
]
public_configs += [ ":rtc_base_chromium_config" ]
} else {
sources += [
"bandwidthsmoother.cc",
"bandwidthsmoother.h",
"bind.h",
"bind.h.pump",
"callback.h",
"callback.h.pump",
"fileutils_mock.h",
"genericslot.h",
"genericslot.h.pump",
"httpserver.cc",
"httpserver.h",
"json.cc",
"json.h",
"logsinks.cc",
"logsinks.h",
"mathutils.h",
"multipart.cc",
"multipart.h",
"natserver.cc",
"natserver.h",
"natsocketfactory.cc",
"natsocketfactory.h",
"nattypes.cc",
"nattypes.h",
"optionsfile.cc",
"optionsfile.h",
"profiler.cc",
"profiler.h",
"proxyserver.cc",
"proxyserver.h",
"refcount.h",
"referencecountedsingletonfactory.h",
"rollingaccumulator.h",
"scoped_ref_ptr.h",
"scopedptrcollection.h",
"sec_buffer.h",
"sharedexclusivelock.cc",
"sharedexclusivelock.h",
"sslconfig.h",
"sslroots.h",
"testclient.cc",
"testclient.h",
"transformadapter.cc",
"transformadapter.h",
"versionparsing.cc",
"versionparsing.h",
"virtualsocketserver.cc",
"virtualsocketserver.h",
"window.h",
"windowpicker.h",
"windowpickerfactory.h",
]
deps += [ "..:webrtc_common" ]
if (is_posix) {
sources += [
"latebindingsymboltable.cc",
"latebindingsymboltable.cc.def",
"latebindingsymboltable.h",
"latebindingsymboltable.h.def",
"posix.cc",
"posix.h",
]
}
if (is_linux) {
sources += [
"dbus.cc",
"dbus.h",
"libdbusglibsymboltable.cc",
"libdbusglibsymboltable.h",
"linuxfdwalk.c",
"linuxfdwalk.h",
]
}
if (is_mac) {
sources += [
"macasyncsocket.cc",
"macasyncsocket.h",
"maccocoasocketserver.h",
"maccocoasocketserver.mm",
"macsocketserver.cc",
"macsocketserver.h",
"macwindowpicker.cc",
"macwindowpicker.h",
]
}
if (is_win) {
sources += [
"diskcache_win32.cc",
"diskcache_win32.h",
"win32regkey.cc",
"win32regkey.h",
"win32socketinit.cc",
"win32socketinit.h",
"win32socketserver.cc",
"win32socketserver.h",
]
}
if (rtc_build_json) {
deps += [ "//third_party/jsoncpp" ]
} else {
include_dirs += [ rtc_jsoncpp_root ]
# When defined changes the include path for json.h to where it is
# expected to be when building json outside of the standalone build.
defines += [ "WEBRTC_EXTERNAL_JSON" ]
}
} # !build_with_chromium
# TODO(henrike): issue 3307, make rtc_base build with the Chromium default
# compiler settings.
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
if (!is_win) {
cflags += [ "-Wno-uninitialized" ]
cflags_cc += [ "-Wno-non-virtual-dtor" ]
}
if (use_openssl) {
public_configs += [ ":openssl_config" ]
if (rtc_build_ssl) {
deps += [ "//third_party/boringssl" ]
} else {
configs += [ "external_ssl_library" ]
}
sources += [
"openssl.h",
"openssladapter.cc",
"openssladapter.h",
"openssldigest.cc",
"openssldigest.h",
"opensslidentity.cc",
"opensslidentity.h",
"opensslstreamadapter.cc",
"opensslstreamadapter.h",
]
}
if (is_android) {
sources += [
"ifaddrs-android.cc",
"ifaddrs-android.h",
]
libs += [
"log",
"GLESv2",
]
}
if (is_ios) {
all_dependent_configs = [ ":ios_config" ]
sources += [
"macconversion.cc",
"macconversion.h",
]
}
if (use_x11) {
sources += [
"x11windowpicker.cc",
"x11windowpicker.h",
]
libs += [
"dl",
"rt",
"Xext",
"X11",
"Xcomposite",
"Xrender",
]
}
if (is_linux) {
libs += [
"dl",
"rt",
]
}
if (is_mac) {
sources += [
"maccocoathreadhelper.h",
"maccocoathreadhelper.mm",
"macconversion.cc",
"macconversion.h",
"macutils.cc",
"macutils.h",
]
all_dependent_configs = [ ":mac_config" ]
if (current_cpu == "x86") {
all_dependent_configs += [ ":mac_x86_config" ]
}
}
if (is_win) {
sources += [
"win32.cc",
"win32.h",
"win32filesystem.cc",
"win32filesystem.h",
"win32securityerrors.cc",
"win32window.cc",
"win32window.h",
"win32windowpicker.cc",
"win32windowpicker.h",
"winfirewall.cc",
"winfirewall.h",
"winping.cc",
"winping.h",
]
libs += [
"crypt32.lib",
"iphlpapi.lib",
"secur32.lib",
]
cflags += [
# Suppress warnings about WIN32_LEAN_AND_MEAN.
"/wd4005",
"/wd4703",
]
defines += [ "_CRT_NONSTDC_NO_DEPRECATE" ]
}
if (is_posix && is_debug) {
# The Chromium build/common.gypi defines this for all posix
# _except_ for ios & mac. We want it there as well, e.g.
# because ASSERT and friends trigger off of it.
defines += [ "_DEBUG" ]
}
if (is_ios || (is_mac && current_cpu != "x86")) {
defines += [ "CARBON_DEPRECATED=YES" ]
}
if (is_linux || is_android) {
sources += [
"linux.cc",
"linux.h",
]
}
if (is_nacl) {
deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
defines += [ "timezone=_timezone" ]
}
}

27
webrtc/base/Makefile.am Normal file
View File

@ -0,0 +1,27 @@
noinst_LTLIBRARIES = libbase.la
noinst_HEADERS = arraysize.h \
basictypes.h \
checks.h \
constructormagic.h \
safe_conversions.h \
safe_conversions_impl.h \
scoped_ptr.h \
template_util.h \
thread_annotations.h
libbase_la_SOURCES = criticalsection.cc \
criticalsection.h \
event.cc \
event.h \
platform_thread.cc \
platform_thread.h \
platform_file.cc \
platform_file.h \
stringutils.cc \
stringutils.h \
thread_checker.h \
thread_checker_impl.cc \
thread_checker_impl.h
libbase_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS)

31
webrtc/base/arraysize.h Normal file
View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_ARRAYSIZE_H_
#define WEBRTC_BASE_ARRAYSIZE_H_
#include <stddef.h>
// This file defines the arraysize() macro and is derived from Chromium's
// base/macros.h.
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
#endif // WEBRTC_BASE_ARRAYSIZE_H_

68
webrtc/base/atomicops.h Normal file
View File

@ -0,0 +1,68 @@
/*
* Copyright 2011 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_ATOMICOPS_H_
#define WEBRTC_BASE_ATOMICOPS_H_
#if defined(WEBRTC_WIN)
// Include winsock2.h before including <windows.h> to maintain consistency with
// win32.h. We can't include win32.h directly here since it pulls in
// headers such as basictypes.h which causes problems in Chromium where webrtc
// exists as two separate projects, webrtc and libjingle.
#include <winsock2.h>
#include <windows.h>
#endif // defined(WEBRTC_WIN)
namespace rtc {
class AtomicOps {
public:
#if defined(WEBRTC_WIN)
// Assumes sizeof(int) == sizeof(LONG), which it is on Win32 and Win64.
static int Increment(volatile int* i) {
return ::InterlockedIncrement(reinterpret_cast<volatile LONG*>(i));
}
static int Decrement(volatile int* i) {
return ::InterlockedDecrement(reinterpret_cast<volatile LONG*>(i));
}
static int AcquireLoad(volatile const int* i) {
return *i;
}
static void ReleaseStore(volatile int* i, int value) {
*i = value;
}
static int CompareAndSwap(volatile int* i, int old_value, int new_value) {
return ::InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(i),
new_value,
old_value);
}
#else
static int Increment(volatile int* i) {
return __sync_add_and_fetch(i, 1);
}
static int Decrement(volatile int* i) {
return __sync_sub_and_fetch(i, 1);
}
static int AcquireLoad(volatile const int* i) {
return __atomic_load_n(i, __ATOMIC_ACQUIRE);
}
static void ReleaseStore(volatile int* i, int value) {
__atomic_store_n(i, value, __ATOMIC_RELEASE);
}
static int CompareAndSwap(volatile int* i, int old_value, int new_value) {
return __sync_val_compare_and_swap(i, old_value, new_value);
}
#endif
};
}
#endif // WEBRTC_BASE_ATOMICOPS_H_

74
webrtc/base/basictypes.h Normal file
View File

@ -0,0 +1,74 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_BASICTYPES_H_
#define WEBRTC_BASE_BASICTYPES_H_
#include <stddef.h> // for NULL, size_t
#include <stdint.h> // for uintptr_t and (u)int_t types.
#ifdef HAVE_CONFIG_H
#include "config.h" // NOLINT
#endif
// Detect compiler is for x86 or x64.
#if defined(__x86_64__) || defined(_M_X64) || \
defined(__i386__) || defined(_M_IX86)
#define CPU_X86 1
#endif
// Detect compiler is for arm.
#if defined(__arm__) || defined(_M_ARM)
#define CPU_ARM 1
#endif
#if defined(CPU_X86) && defined(CPU_ARM)
#error CPU_X86 and CPU_ARM both defined.
#endif
#if !defined(RTC_ARCH_CPU_BIG_ENDIAN) && !defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
// x86, arm or GCC provided __BYTE_ORDER__ macros
#if CPU_X86 || CPU_ARM || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define RTC_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define RTC_ARCH_CPU_BIG_ENDIAN
#else
#error RTC_ARCH_CPU_BIG_ENDIAN or RTC_ARCH_CPU_LITTLE_ENDIAN should be defined.
#endif
#endif
#if defined(RTC_ARCH_CPU_BIG_ENDIAN) && defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
#error RTC_ARCH_CPU_BIG_ENDIAN and RTC_ARCH_CPU_LITTLE_ENDIAN both defined.
#endif
#if defined(WEBRTC_WIN)
typedef int socklen_t;
#endif
// The following only works for C++
#ifdef __cplusplus
#ifndef ALIGNP
#define ALIGNP(p, t) \
(reinterpret_cast<uint8_t*>(((reinterpret_cast<uintptr_t>(p) + \
((t) - 1)) & ~((t) - 1))))
#endif
#define RTC_IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))
// Use these to declare and define a static local variable that gets leaked so
// that its destructors are not called at exit.
#define RTC_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
#endif // __cplusplus
#endif // WEBRTC_BASE_BASICTYPES_H_

View File

@ -0,0 +1,169 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/checks.h"
namespace rtc {
CriticalSection::CriticalSection() {
#if defined(WEBRTC_WIN)
InitializeCriticalSection(&crit_);
#else
pthread_mutexattr_t mutex_attribute;
pthread_mutexattr_init(&mutex_attribute);
pthread_mutexattr_settype(&mutex_attribute, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex_, &mutex_attribute);
pthread_mutexattr_destroy(&mutex_attribute);
CS_DEBUG_CODE(thread_ = 0);
CS_DEBUG_CODE(recursion_count_ = 0);
#endif
}
CriticalSection::~CriticalSection() {
#if defined(WEBRTC_WIN)
DeleteCriticalSection(&crit_);
#else
pthread_mutex_destroy(&mutex_);
#endif
}
void CriticalSection::Enter() EXCLUSIVE_LOCK_FUNCTION() {
#if defined(WEBRTC_WIN)
EnterCriticalSection(&crit_);
#else
pthread_mutex_lock(&mutex_);
#if CS_DEBUG_CHECKS
if (!recursion_count_) {
RTC_DCHECK(!thread_);
thread_ = pthread_self();
} else {
RTC_DCHECK(CurrentThreadIsOwner());
}
++recursion_count_;
#endif
#endif
}
bool CriticalSection::TryEnter() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
#if defined(WEBRTC_WIN)
return TryEnterCriticalSection(&crit_) != FALSE;
#else
if (pthread_mutex_trylock(&mutex_) != 0)
return false;
#if CS_DEBUG_CHECKS
if (!recursion_count_) {
RTC_DCHECK(!thread_);
thread_ = pthread_self();
} else {
RTC_DCHECK(CurrentThreadIsOwner());
}
++recursion_count_;
#endif
return true;
#endif
}
void CriticalSection::Leave() UNLOCK_FUNCTION() {
RTC_DCHECK(CurrentThreadIsOwner());
#if defined(WEBRTC_WIN)
LeaveCriticalSection(&crit_);
#else
#if CS_DEBUG_CHECKS
--recursion_count_;
RTC_DCHECK(recursion_count_ >= 0);
if (!recursion_count_)
thread_ = 0;
#endif
pthread_mutex_unlock(&mutex_);
#endif
}
bool CriticalSection::CurrentThreadIsOwner() const {
#if defined(WEBRTC_WIN)
// OwningThread has type HANDLE but actually contains the Thread ID:
// http://stackoverflow.com/questions/12675301/why-is-the-owningthread-member-of-critical-section-of-type-handle-when-it-is-de
// Converting through size_t avoids the VS 2015 warning C4312: conversion from
// 'type1' to 'type2' of greater size
return crit_.OwningThread ==
reinterpret_cast<HANDLE>(static_cast<size_t>(GetCurrentThreadId()));
#else
#if CS_DEBUG_CHECKS
return pthread_equal(thread_, pthread_self());
#else
return true;
#endif // CS_DEBUG_CHECKS
#endif
}
bool CriticalSection::IsLocked() const {
#if defined(WEBRTC_WIN)
return crit_.LockCount != -1;
#else
#if CS_DEBUG_CHECKS
return thread_ != 0;
#else
return true;
#endif
#endif
}
CritScope::CritScope(CriticalSection* cs) : cs_(cs) { cs_->Enter(); }
CritScope::~CritScope() { cs_->Leave(); }
TryCritScope::TryCritScope(CriticalSection* cs)
: cs_(cs), locked_(cs->TryEnter()) {
CS_DEBUG_CODE(lock_was_called_ = false);
}
TryCritScope::~TryCritScope() {
CS_DEBUG_CODE(RTC_DCHECK(lock_was_called_));
if (locked_)
cs_->Leave();
}
bool TryCritScope::locked() const {
CS_DEBUG_CODE(lock_was_called_ = true);
return locked_;
}
void GlobalLockPod::Lock() {
#if !defined(WEBRTC_WIN)
const struct timespec ts_null = {0};
#endif
while (AtomicOps::CompareAndSwap(&lock_acquired, 0, 1)) {
#if defined(WEBRTC_WIN)
::Sleep(0);
#else
nanosleep(&ts_null, nullptr);
#endif
}
}
void GlobalLockPod::Unlock() {
int old_value = AtomicOps::CompareAndSwap(&lock_acquired, 1, 0);
RTC_DCHECK_EQ(1, old_value) << "Unlock called without calling Lock first";
}
GlobalLock::GlobalLock() {
lock_acquired = 0;
}
GlobalLockScope::GlobalLockScope(GlobalLockPod* lock)
: lock_(lock) {
lock_->Lock();
}
GlobalLockScope::~GlobalLockScope() {
lock_->Unlock();
}
} // namespace rtc

View File

@ -0,0 +1,129 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_CRITICALSECTION_H_
#define WEBRTC_BASE_CRITICALSECTION_H_
#include "webrtc/base/atomicops.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/thread_annotations.h"
#if defined(WEBRTC_WIN)
// Include winsock2.h before including <windows.h> to maintain consistency with
// win32.h. We can't include win32.h directly here since it pulls in
// headers such as basictypes.h which causes problems in Chromium where webrtc
// exists as two separate projects, webrtc and libjingle.
#include <winsock2.h>
#include <windows.h>
#include <sal.h> // must come after windows headers.
#endif // defined(WEBRTC_WIN)
#if defined(WEBRTC_POSIX)
#include <pthread.h>
#endif
#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
#define CS_DEBUG_CHECKS 1
#endif
#if CS_DEBUG_CHECKS
#define CS_DEBUG_CODE(x) x
#else // !CS_DEBUG_CHECKS
#define CS_DEBUG_CODE(x)
#endif // !CS_DEBUG_CHECKS
namespace rtc {
class LOCKABLE CriticalSection {
public:
CriticalSection();
~CriticalSection();
void Enter() EXCLUSIVE_LOCK_FUNCTION();
bool TryEnter() EXCLUSIVE_TRYLOCK_FUNCTION(true);
void Leave() UNLOCK_FUNCTION();
// Use only for RTC_DCHECKing.
bool CurrentThreadIsOwner() const;
// Use only for RTC_DCHECKing.
bool IsLocked() const;
private:
#if defined(WEBRTC_WIN)
CRITICAL_SECTION crit_;
#elif defined(WEBRTC_POSIX)
pthread_mutex_t mutex_;
CS_DEBUG_CODE(pthread_t thread_);
CS_DEBUG_CODE(int recursion_count_);
#endif
};
// CritScope, for serializing execution through a scope.
class SCOPED_LOCKABLE CritScope {
public:
explicit CritScope(CriticalSection* cs) EXCLUSIVE_LOCK_FUNCTION(cs);
~CritScope() UNLOCK_FUNCTION();
private:
CriticalSection* const cs_;
RTC_DISALLOW_COPY_AND_ASSIGN(CritScope);
};
// Tries to lock a critical section on construction via
// CriticalSection::TryEnter, and unlocks on destruction if the
// lock was taken. Never blocks.
//
// IMPORTANT: Unlike CritScope, the lock may not be owned by this thread in
// subsequent code. Users *must* check locked() to determine if the
// lock was taken. If you're not calling locked(), you're doing it wrong!
class TryCritScope {
public:
explicit TryCritScope(CriticalSection* cs);
~TryCritScope();
#if defined(WEBRTC_WIN)
_Check_return_ bool locked() const;
#else
bool locked() const __attribute__((warn_unused_result));
#endif
private:
CriticalSection* const cs_;
const bool locked_;
CS_DEBUG_CODE(mutable bool lock_was_called_);
RTC_DISALLOW_COPY_AND_ASSIGN(TryCritScope);
};
// A POD lock used to protect global variables. Do NOT use for other purposes.
// No custom constructor or private data member should be added.
class LOCKABLE GlobalLockPod {
public:
void Lock() EXCLUSIVE_LOCK_FUNCTION();
void Unlock() UNLOCK_FUNCTION();
volatile int lock_acquired;
};
class GlobalLock : public GlobalLockPod {
public:
GlobalLock();
};
// GlobalLockScope, for serializing execution through a scope.
class SCOPED_LOCKABLE GlobalLockScope {
public:
explicit GlobalLockScope(GlobalLockPod* lock) EXCLUSIVE_LOCK_FUNCTION(lock);
~GlobalLockScope() UNLOCK_FUNCTION();
private:
GlobalLockPod* const lock_;
RTC_DISALLOW_COPY_AND_ASSIGN(GlobalLockScope);
};
} // namespace rtc
#endif // WEBRTC_BASE_CRITICALSECTION_H_

135
webrtc/base/event.cc Normal file
View File

@ -0,0 +1,135 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/event.h"
#if defined(WEBRTC_WIN)
#include <windows.h>
#elif defined(WEBRTC_POSIX)
#include <pthread.h>
#include <sys/time.h>
#include <time.h>
#else
#error "Must define either WEBRTC_WIN or WEBRTC_POSIX."
#endif
#include "webrtc/base/checks.h"
namespace rtc {
#if defined(WEBRTC_WIN)
Event::Event(bool manual_reset, bool initially_signaled) {
event_handle_ = ::CreateEvent(NULL, // Security attributes.
manual_reset,
initially_signaled,
NULL); // Name.
RTC_CHECK(event_handle_);
}
Event::~Event() {
CloseHandle(event_handle_);
}
void Event::Set() {
SetEvent(event_handle_);
}
void Event::Reset() {
ResetEvent(event_handle_);
}
bool Event::Wait(int milliseconds) {
DWORD ms = (milliseconds == kForever) ? INFINITE : milliseconds;
return (WaitForSingleObject(event_handle_, ms) == WAIT_OBJECT_0);
}
#elif defined(WEBRTC_POSIX)
Event::Event(bool manual_reset, bool initially_signaled)
: is_manual_reset_(manual_reset),
event_status_(initially_signaled) {
RTC_CHECK(pthread_mutex_init(&event_mutex_, NULL) == 0);
RTC_CHECK(pthread_cond_init(&event_cond_, NULL) == 0);
}
Event::~Event() {
pthread_mutex_destroy(&event_mutex_);
pthread_cond_destroy(&event_cond_);
}
void Event::Set() {
pthread_mutex_lock(&event_mutex_);
event_status_ = true;
pthread_cond_broadcast(&event_cond_);
pthread_mutex_unlock(&event_mutex_);
}
void Event::Reset() {
pthread_mutex_lock(&event_mutex_);
event_status_ = false;
pthread_mutex_unlock(&event_mutex_);
}
bool Event::Wait(int milliseconds) {
pthread_mutex_lock(&event_mutex_);
int error = 0;
if (milliseconds != kForever) {
// Converting from seconds and microseconds (1e-6) plus
// milliseconds (1e-3) to seconds and nanoseconds (1e-9).
struct timespec ts;
#if HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE
// Use relative time version, which tends to be more efficient for
// pthread implementations where provided (like on Android).
ts.tv_sec = milliseconds / 1000;
ts.tv_nsec = (milliseconds % 1000) * 1000000;
#else
struct timeval tv;
gettimeofday(&tv, NULL);
ts.tv_sec = tv.tv_sec + (milliseconds / 1000);
ts.tv_nsec = tv.tv_usec * 1000 + (milliseconds % 1000) * 1000000;
// Handle overflow.
if (ts.tv_nsec >= 1000000000) {
ts.tv_sec++;
ts.tv_nsec -= 1000000000;
}
#endif
while (!event_status_ && error == 0) {
#if HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE
error = pthread_cond_timedwait_relative_np(
&event_cond_, &event_mutex_, &ts);
#else
error = pthread_cond_timedwait(&event_cond_, &event_mutex_, &ts);
#endif
}
} else {
while (!event_status_ && error == 0)
error = pthread_cond_wait(&event_cond_, &event_mutex_);
}
// NOTE(liulk): Exactly one thread will auto-reset this event. All
// the other threads will think it's unsignaled. This seems to be
// consistent with auto-reset events in WEBRTC_WIN
if (error == 0 && !is_manual_reset_)
event_status_ = false;
pthread_mutex_unlock(&event_mutex_);
return (error == 0);
}
#endif
} // namespace rtc

53
webrtc/base/event.h Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_EVENT_H__
#define WEBRTC_BASE_EVENT_H__
#if defined(WEBRTC_WIN)
#include "webrtc/base/win32.h" // NOLINT: consider this a system header.
#elif defined(WEBRTC_POSIX)
#include <pthread.h>
#else
#error "Must define either WEBRTC_WIN or WEBRTC_POSIX."
#endif
#include "webrtc/base/basictypes.h"
namespace rtc {
class Event {
public:
static const int kForever = -1;
Event(bool manual_reset, bool initially_signaled);
~Event();
void Set();
void Reset();
// Wait for the event to become signaled, for the specified number of
// |milliseconds|. To wait indefinetly, pass kForever.
bool Wait(int milliseconds);
private:
#if defined(WEBRTC_WIN)
HANDLE event_handle_;
#elif defined(WEBRTC_POSIX)
pthread_mutex_t event_mutex_;
pthread_cond_t event_cond_;
const bool is_manual_reset_;
bool event_status_;
#endif
};
} // namespace rtc
#endif // WEBRTC_BASE_EVENT_H__

View File

@ -0,0 +1,49 @@
/*
* Copyright 2014 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/platform_file.h"
#if defined(WEBRTC_WIN)
#include <io.h>
#else
#include <unistd.h>
#endif
namespace rtc {
#if defined(WEBRTC_WIN)
const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE;
FILE* FdopenPlatformFileForWriting(PlatformFile file) {
if (file == kInvalidPlatformFileValue)
return NULL;
int fd = _open_osfhandle(reinterpret_cast<intptr_t>(file), 0);
if (fd < 0)
return NULL;
return _fdopen(fd, "w");
}
bool ClosePlatformFile(PlatformFile file) {
return CloseHandle(file) != 0;
}
#else
const PlatformFile kInvalidPlatformFileValue = -1;
FILE* FdopenPlatformFileForWriting(PlatformFile file) {
return fdopen(file, "w");
}
bool ClosePlatformFile(PlatformFile file) {
return close(file);
}
#endif
} // namespace rtc

View File

@ -0,0 +1,44 @@
/*
* Copyright 2014 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_PLATFORM_FILE_H_
#define WEBRTC_BASE_PLATFORM_FILE_H_
#include <stdio.h>
#if defined(WEBRTC_WIN)
#include <windows.h>
#endif
namespace rtc {
#if defined(WEBRTC_WIN)
typedef HANDLE PlatformFile;
#elif defined(WEBRTC_POSIX)
typedef int PlatformFile;
#else
#error Unsupported platform
#endif
extern const PlatformFile kInvalidPlatformFileValue;
// Associates a standard FILE stream with an existing PlatformFile.
// Note that after this function has returned a valid FILE stream,
// the PlatformFile should no longer be used.
FILE* FdopenPlatformFileForWriting(PlatformFile file);
// Closes a PlatformFile.
// Don't use ClosePlatformFile to close a file opened with FdopenPlatformFile.
// Use fclose instead.
bool ClosePlatformFile(PlatformFile file);
} // namespace rtc
#endif // WEBRTC_BASE_PLATFORM_FILE_H_

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/platform_thread.h"
#include <string.h>
#include "webrtc/base/checks.h"
#if defined(WEBRTC_LINUX)
#include <sys/prctl.h>
#include <sys/syscall.h>
#endif
namespace rtc {
PlatformThreadId CurrentThreadId() {
PlatformThreadId ret;
#if defined(WEBRTC_WIN)
ret = GetCurrentThreadId();
#elif defined(WEBRTC_POSIX)
#if defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
ret = pthread_mach_thread_np(pthread_self());
#elif defined(WEBRTC_LINUX)
ret = syscall(__NR_gettid);
#elif defined(WEBRTC_ANDROID)
ret = gettid();
#else
// Default implementation for nacl and solaris.
ret = reinterpret_cast<pid_t>(pthread_self());
#endif
#endif // defined(WEBRTC_POSIX)
RTC_DCHECK(ret);
return ret;
}
PlatformThreadRef CurrentThreadRef() {
#if defined(WEBRTC_WIN)
return GetCurrentThreadId();
#elif defined(WEBRTC_POSIX)
return pthread_self();
#endif
}
bool IsThreadRefEqual(const PlatformThreadRef& a, const PlatformThreadRef& b) {
#if defined(WEBRTC_WIN)
return a == b;
#elif defined(WEBRTC_POSIX)
return pthread_equal(a, b);
#endif
}
void SetCurrentThreadName(const char* name) {
RTC_DCHECK(strlen(name) < 64);
#if defined(WEBRTC_WIN)
struct {
DWORD dwType;
LPCSTR szName;
DWORD dwThreadID;
DWORD dwFlags;
} threadname_info = {0x1000, name, static_cast<DWORD>(-1), 0};
__try {
::RaiseException(0x406D1388, 0, sizeof(threadname_info) / sizeof(DWORD),
reinterpret_cast<ULONG_PTR*>(&threadname_info));
} __except (EXCEPTION_EXECUTE_HANDLER) {
}
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
prctl(PR_SET_NAME, reinterpret_cast<unsigned long>(name));
#elif defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
pthread_setname_np(name);
#endif
}
} // namespace rtc

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_PLATFORM_THREAD_H_
#define WEBRTC_BASE_PLATFORM_THREAD_H_
#if defined(WEBRTC_WIN)
#include <winsock2.h>
#include <windows.h>
#elif defined(WEBRTC_POSIX)
#include <pthread.h>
#include <unistd.h>
#endif
namespace rtc {
#if defined(WEBRTC_WIN)
typedef DWORD PlatformThreadId;
typedef DWORD PlatformThreadRef;
#elif defined(WEBRTC_POSIX)
typedef pid_t PlatformThreadId;
typedef pthread_t PlatformThreadRef;
#endif
PlatformThreadId CurrentThreadId();
PlatformThreadRef CurrentThreadRef();
// Compares two thread identifiers for equality.
bool IsThreadRefEqual(const PlatformThreadRef& a, const PlatformThreadRef& b);
// Sets the current thread name.
void SetCurrentThreadName(const char* name);
} // namespace rtc
#endif // WEBRTC_BASE_PLATFORM_THREAD_H_

View File

@ -0,0 +1,70 @@
/*
* Copyright 2014 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Borrowed from Chromium's src/base/numerics/safe_conversions.h.
#ifndef WEBRTC_BASE_SAFE_CONVERSIONS_H_
#define WEBRTC_BASE_SAFE_CONVERSIONS_H_
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions_impl.h"
namespace rtc {
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
inline bool IsValueInRangeForNumericType(Src value) {
return internal::RangeCheck<Dst>(value) == internal::TYPE_VALID;
}
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
template <typename Dst, typename Src>
inline Dst checked_cast(Src value) {
RTC_CHECK(IsValueInRangeForNumericType<Dst>(value));
return static_cast<Dst>(value);
}
// saturated_cast<> is analogous to static_cast<> for numeric types, except
// that the specified numeric conversion will saturate rather than overflow or
// underflow. NaN assignment to an integral will trigger a RTC_CHECK condition.
template <typename Dst, typename Src>
inline Dst saturated_cast(Src value) {
// Optimization for floating point values, which already saturate.
if (std::numeric_limits<Dst>::is_iec559)
return static_cast<Dst>(value);
switch (internal::RangeCheck<Dst>(value)) {
case internal::TYPE_VALID:
return static_cast<Dst>(value);
case internal::TYPE_UNDERFLOW:
return std::numeric_limits<Dst>::min();
case internal::TYPE_OVERFLOW:
return std::numeric_limits<Dst>::max();
// Should fail only on attempting to assign NaN to a saturated integer.
case internal::TYPE_INVALID:
FATAL();
return std::numeric_limits<Dst>::max();
}
FATAL();
return static_cast<Dst>(value);
}
} // namespace rtc
#endif // WEBRTC_BASE_SAFE_CONVERSIONS_H_

View File

@ -0,0 +1,188 @@
/*
* Copyright 2014 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Borrowed from Chromium's src/base/numerics/safe_conversions_impl.h.
#ifndef WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_
#define WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_
#include <limits>
namespace rtc {
namespace internal {
enum DstSign {
DST_UNSIGNED,
DST_SIGNED
};
enum SrcSign {
SRC_UNSIGNED,
SRC_SIGNED
};
enum DstRange {
OVERLAPS_RANGE,
CONTAINS_RANGE
};
// Helper templates to statically determine if our destination type can contain
// all values represented by the source type.
template <typename Dst, typename Src,
DstSign IsDstSigned = std::numeric_limits<Dst>::is_signed ?
DST_SIGNED : DST_UNSIGNED,
SrcSign IsSrcSigned = std::numeric_limits<Src>::is_signed ?
SRC_SIGNED : SRC_UNSIGNED>
struct StaticRangeCheck {};
template <typename Dst, typename Src>
struct StaticRangeCheck<Dst, Src, DST_SIGNED, SRC_SIGNED> {
typedef std::numeric_limits<Dst> DstLimits;
typedef std::numeric_limits<Src> SrcLimits;
// Compare based on max_exponent, which we must compute for integrals.
static const size_t kDstMaxExponent = DstLimits::is_iec559 ?
DstLimits::max_exponent :
(sizeof(Dst) * 8 - 1);
static const size_t kSrcMaxExponent = SrcLimits::is_iec559 ?
SrcLimits::max_exponent :
(sizeof(Src) * 8 - 1);
static const DstRange value = kDstMaxExponent >= kSrcMaxExponent ?
CONTAINS_RANGE : OVERLAPS_RANGE;
};
template <typename Dst, typename Src>
struct StaticRangeCheck<Dst, Src, DST_UNSIGNED, SRC_UNSIGNED> {
static const DstRange value = sizeof(Dst) >= sizeof(Src) ?
CONTAINS_RANGE : OVERLAPS_RANGE;
};
template <typename Dst, typename Src>
struct StaticRangeCheck<Dst, Src, DST_SIGNED, SRC_UNSIGNED> {
typedef std::numeric_limits<Dst> DstLimits;
typedef std::numeric_limits<Src> SrcLimits;
// Compare based on max_exponent, which we must compute for integrals.
static const size_t kDstMaxExponent = DstLimits::is_iec559 ?
DstLimits::max_exponent :
(sizeof(Dst) * 8 - 1);
static const size_t kSrcMaxExponent = sizeof(Src) * 8;
static const DstRange value = kDstMaxExponent >= kSrcMaxExponent ?
CONTAINS_RANGE : OVERLAPS_RANGE;
};
template <typename Dst, typename Src>
struct StaticRangeCheck<Dst, Src, DST_UNSIGNED, SRC_SIGNED> {
static const DstRange value = OVERLAPS_RANGE;
};
enum RangeCheckResult {
TYPE_VALID = 0, // Value can be represented by the destination type.
TYPE_UNDERFLOW = 1, // Value would overflow.
TYPE_OVERFLOW = 2, // Value would underflow.
TYPE_INVALID = 3 // Source value is invalid (i.e. NaN).
};
// This macro creates a RangeCheckResult from an upper and lower bound
// check by taking advantage of the fact that only NaN can be out of range in
// both directions at once.
#define BASE_NUMERIC_RANGE_CHECK_RESULT(is_in_upper_bound, is_in_lower_bound) \
RangeCheckResult(((is_in_upper_bound) ? 0 : TYPE_OVERFLOW) | \
((is_in_lower_bound) ? 0 : TYPE_UNDERFLOW))
template <typename Dst,
typename Src,
DstSign IsDstSigned = std::numeric_limits<Dst>::is_signed ?
DST_SIGNED : DST_UNSIGNED,
SrcSign IsSrcSigned = std::numeric_limits<Src>::is_signed ?
SRC_SIGNED : SRC_UNSIGNED,
DstRange IsSrcRangeContained = StaticRangeCheck<Dst, Src>::value>
struct RangeCheckImpl {};
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
// Dst range always contains the result: nothing to check.
template <typename Dst, typename Src, DstSign IsDstSigned, SrcSign IsSrcSigned>
struct RangeCheckImpl<Dst, Src, IsDstSigned, IsSrcSigned, CONTAINS_RANGE> {
static RangeCheckResult Check(Src value) {
return TYPE_VALID;
}
};
// Signed to signed narrowing.
template <typename Dst, typename Src>
struct RangeCheckImpl<Dst, Src, DST_SIGNED, SRC_SIGNED, OVERLAPS_RANGE> {
static RangeCheckResult Check(Src value) {
typedef std::numeric_limits<Dst> DstLimits;
return DstLimits::is_iec559 ?
BASE_NUMERIC_RANGE_CHECK_RESULT(
value <= static_cast<Src>(DstLimits::max()),
value >= static_cast<Src>(DstLimits::max() * -1)) :
BASE_NUMERIC_RANGE_CHECK_RESULT(
value <= static_cast<Src>(DstLimits::max()),
value >= static_cast<Src>(DstLimits::min()));
}
};
// Unsigned to unsigned narrowing.
template <typename Dst, typename Src>
struct RangeCheckImpl<Dst, Src, DST_UNSIGNED, SRC_UNSIGNED, OVERLAPS_RANGE> {
static RangeCheckResult Check(Src value) {
typedef std::numeric_limits<Dst> DstLimits;
return BASE_NUMERIC_RANGE_CHECK_RESULT(
value <= static_cast<Src>(DstLimits::max()), true);
}
};
// Unsigned to signed.
template <typename Dst, typename Src>
struct RangeCheckImpl<Dst, Src, DST_SIGNED, SRC_UNSIGNED, OVERLAPS_RANGE> {
static RangeCheckResult Check(Src value) {
typedef std::numeric_limits<Dst> DstLimits;
return sizeof(Dst) > sizeof(Src) ? TYPE_VALID :
BASE_NUMERIC_RANGE_CHECK_RESULT(
value <= static_cast<Src>(DstLimits::max()), true);
}
};
// Signed to unsigned.
template <typename Dst, typename Src>
struct RangeCheckImpl<Dst, Src, DST_UNSIGNED, SRC_SIGNED, OVERLAPS_RANGE> {
static RangeCheckResult Check(Src value) {
typedef std::numeric_limits<Dst> DstLimits;
typedef std::numeric_limits<Src> SrcLimits;
// Compare based on max_exponent, which we must compute for integrals.
static const size_t kDstMaxExponent = sizeof(Dst) * 8;
static const size_t kSrcMaxExponent = SrcLimits::is_iec559 ?
SrcLimits::max_exponent :
(sizeof(Src) * 8 - 1);
return (kDstMaxExponent >= kSrcMaxExponent) ?
BASE_NUMERIC_RANGE_CHECK_RESULT(true, value >= static_cast<Src>(0)) :
BASE_NUMERIC_RANGE_CHECK_RESULT(
value <= static_cast<Src>(DstLimits::max()),
value >= static_cast<Src>(0));
}
};
template <typename Dst, typename Src>
inline RangeCheckResult RangeCheck(Src value) {
static_assert(std::numeric_limits<Src>::is_specialized,
"argument must be numeric");
static_assert(std::numeric_limits<Dst>::is_specialized,
"result must be numeric");
return RangeCheckImpl<Dst, Src>::Check(value);
}
} // namespace internal
} // namespace rtc
#endif // WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_

133
webrtc/base/stringutils.cc Normal file
View File

@ -0,0 +1,133 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/checks.h"
#include "webrtc/base/stringutils.h"
namespace rtc {
bool memory_check(const void* memory, int c, size_t count) {
const char* char_memory = static_cast<const char*>(memory);
char char_c = static_cast<char>(c);
for (size_t i = 0; i < count; ++i) {
if (char_memory[i] != char_c) {
return false;
}
}
return true;
}
bool string_match(const char* target, const char* pattern) {
while (*pattern) {
if (*pattern == '*') {
if (!*++pattern) {
return true;
}
while (*target) {
if ((toupper(*pattern) == toupper(*target))
&& string_match(target + 1, pattern + 1)) {
return true;
}
++target;
}
return false;
} else {
if (toupper(*pattern) != toupper(*target)) {
return false;
}
++target;
++pattern;
}
}
return !*target;
}
#if defined(WEBRTC_WIN)
int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n,
CharacterTransformation transformation) {
wchar_t c1, c2;
while (true) {
if (n-- == 0) return 0;
c1 = transformation(*s1);
// Double check that characters are not UTF-8
RTC_DCHECK_LT(static_cast<unsigned char>(*s2), 128);
// Note: *s2 gets implicitly promoted to wchar_t
c2 = transformation(*s2);
if (c1 != c2) return (c1 < c2) ? -1 : 1;
if (!c1) return 0;
++s1;
++s2;
}
}
size_t asccpyn(wchar_t* buffer, size_t buflen,
const char* source, size_t srclen) {
if (buflen <= 0)
return 0;
if (srclen == SIZE_UNKNOWN) {
srclen = strlenn(source, buflen - 1);
} else if (srclen >= buflen) {
srclen = buflen - 1;
}
#if _DEBUG
// Double check that characters are not UTF-8
for (size_t pos = 0; pos < srclen; ++pos)
RTC_DCHECK_LT(static_cast<unsigned char>(source[pos]), 128);
#endif // _DEBUG
std::copy(source, source + srclen, buffer);
buffer[srclen] = 0;
return srclen;
}
#endif // WEBRTC_WIN
void replace_substrs(const char *search,
size_t search_len,
const char *replace,
size_t replace_len,
std::string *s) {
size_t pos = 0;
while ((pos = s->find(search, pos, search_len)) != std::string::npos) {
s->replace(pos, search_len, replace, replace_len);
pos += replace_len;
}
}
bool starts_with(const char *s1, const char *s2) {
return strncmp(s1, s2, strlen(s2)) == 0;
}
bool ends_with(const char *s1, const char *s2) {
size_t s1_length = strlen(s1);
size_t s2_length = strlen(s2);
if (s2_length > s1_length) {
return false;
}
const char* start = s1 + (s1_length - s2_length);
return strncmp(start, s2, s2_length) == 0;
}
static const char kWhitespace[] = " \n\r\t";
std::string string_trim(const std::string& s) {
std::string::size_type first = s.find_first_not_of(kWhitespace);
std::string::size_type last = s.find_last_not_of(kWhitespace);
if (first == std::string::npos || last == std::string::npos) {
return std::string("");
}
return s.substr(first, last - first + 1);
}
} // namespace rtc

318
webrtc/base/stringutils.h Normal file
View File

@ -0,0 +1,318 @@
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_STRINGUTILS_H__
#define WEBRTC_BASE_STRINGUTILS_H__
#include <ctype.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#if defined(WEBRTC_WIN)
#include <malloc.h>
#include <wchar.h>
#define alloca _alloca
#endif // WEBRTC_WIN
#if defined(WEBRTC_POSIX)
#ifdef BSD
#include <stdlib.h>
#else // BSD
#include <alloca.h>
#endif // !BSD
#endif // WEBRTC_POSIX
#include <string>
#include "webrtc/base/basictypes.h"
///////////////////////////////////////////////////////////////////////////////
// Generic string/memory utilities
///////////////////////////////////////////////////////////////////////////////
#define STACK_ARRAY(TYPE, LEN) static_cast<TYPE*>(::alloca((LEN)*sizeof(TYPE)))
namespace rtc {
// Complement to memset. Verifies memory consists of count bytes of value c.
bool memory_check(const void* memory, int c, size_t count);
// Determines whether the simple wildcard pattern matches target.
// Alpha characters in pattern match case-insensitively.
// Asterisks in pattern match 0 or more characters.
// Ex: string_match("www.TEST.GOOGLE.COM", "www.*.com") -> true
bool string_match(const char* target, const char* pattern);
} // namespace rtc
///////////////////////////////////////////////////////////////////////////////
// Rename a bunch of common string functions so they are consistent across
// platforms and between char and wchar_t variants.
// Here is the full list of functions that are unified:
// strlen, strcmp, stricmp, strncmp, strnicmp
// strchr, vsnprintf, strtoul, tolowercase
// tolowercase is like tolower, but not compatible with end-of-file value
//
// It's not clear if we will ever use wchar_t strings on unix. In theory,
// all strings should be Utf8 all the time, except when interfacing with Win32
// APIs that require Utf16.
///////////////////////////////////////////////////////////////////////////////
inline char tolowercase(char c) {
return static_cast<char>(tolower(c));
}
#if defined(WEBRTC_WIN)
inline size_t strlen(const wchar_t* s) {
return wcslen(s);
}
inline int strcmp(const wchar_t* s1, const wchar_t* s2) {
return wcscmp(s1, s2);
}
inline int stricmp(const wchar_t* s1, const wchar_t* s2) {
return _wcsicmp(s1, s2);
}
inline int strncmp(const wchar_t* s1, const wchar_t* s2, size_t n) {
return wcsncmp(s1, s2, n);
}
inline int strnicmp(const wchar_t* s1, const wchar_t* s2, size_t n) {
return _wcsnicmp(s1, s2, n);
}
inline const wchar_t* strchr(const wchar_t* s, wchar_t c) {
return wcschr(s, c);
}
inline const wchar_t* strstr(const wchar_t* haystack, const wchar_t* needle) {
return wcsstr(haystack, needle);
}
#ifndef vsnprintf
inline int vsnprintf(wchar_t* buf, size_t n, const wchar_t* fmt, va_list args) {
return _vsnwprintf(buf, n, fmt, args);
}
#endif // !vsnprintf
inline unsigned long strtoul(const wchar_t* snum, wchar_t** end, int base) {
return wcstoul(snum, end, base);
}
inline wchar_t tolowercase(wchar_t c) {
return static_cast<wchar_t>(towlower(c));
}
#endif // WEBRTC_WIN
#if defined(WEBRTC_POSIX)
inline int _stricmp(const char* s1, const char* s2) {
return strcasecmp(s1, s2);
}
inline int _strnicmp(const char* s1, const char* s2, size_t n) {
return strncasecmp(s1, s2, n);
}
#endif // WEBRTC_POSIX
///////////////////////////////////////////////////////////////////////////////
// Traits simplifies porting string functions to be CTYPE-agnostic
///////////////////////////////////////////////////////////////////////////////
namespace rtc {
const size_t SIZE_UNKNOWN = static_cast<size_t>(-1);
template<class CTYPE>
struct Traits {
// STL string type
//typedef XXX string;
// Null-terminated string
//inline static const CTYPE* empty_str();
};
///////////////////////////////////////////////////////////////////////////////
// String utilities which work with char or wchar_t
///////////////////////////////////////////////////////////////////////////////
template<class CTYPE>
inline const CTYPE* nonnull(const CTYPE* str, const CTYPE* def_str = NULL) {
return str ? str : (def_str ? def_str : Traits<CTYPE>::empty_str());
}
template<class CTYPE>
const CTYPE* strchr(const CTYPE* str, const CTYPE* chs) {
for (size_t i=0; str[i]; ++i) {
for (size_t j=0; chs[j]; ++j) {
if (str[i] == chs[j]) {
return str + i;
}
}
}
return 0;
}
template<class CTYPE>
const CTYPE* strchrn(const CTYPE* str, size_t slen, CTYPE ch) {
for (size_t i=0; i<slen && str[i]; ++i) {
if (str[i] == ch) {
return str + i;
}
}
return 0;
}
template<class CTYPE>
size_t strlenn(const CTYPE* buffer, size_t buflen) {
size_t bufpos = 0;
while (buffer[bufpos] && (bufpos < buflen)) {
++bufpos;
}
return bufpos;
}
// Safe versions of strncpy, strncat, snprintf and vsnprintf that always
// null-terminate.
template<class CTYPE>
size_t strcpyn(CTYPE* buffer, size_t buflen,
const CTYPE* source, size_t srclen = SIZE_UNKNOWN) {
if (buflen <= 0)
return 0;
if (srclen == SIZE_UNKNOWN) {
srclen = strlenn(source, buflen - 1);
} else if (srclen >= buflen) {
srclen = buflen - 1;
}
memcpy(buffer, source, srclen * sizeof(CTYPE));
buffer[srclen] = 0;
return srclen;
}
template<class CTYPE>
size_t strcatn(CTYPE* buffer, size_t buflen,
const CTYPE* source, size_t srclen = SIZE_UNKNOWN) {
if (buflen <= 0)
return 0;
size_t bufpos = strlenn(buffer, buflen - 1);
return bufpos + strcpyn(buffer + bufpos, buflen - bufpos, source, srclen);
}
// Some compilers (clang specifically) require vsprintfn be defined before
// sprintfn.
template<class CTYPE>
size_t vsprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format,
va_list args) {
int len = vsnprintf(buffer, buflen, format, args);
if ((len < 0) || (static_cast<size_t>(len) >= buflen)) {
len = static_cast<int>(buflen - 1);
buffer[len] = 0;
}
return len;
}
template<class CTYPE>
size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...);
template<class CTYPE>
size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...) {
va_list args;
va_start(args, format);
size_t len = vsprintfn(buffer, buflen, format, args);
va_end(args);
return len;
}
///////////////////////////////////////////////////////////////////////////////
// Allow safe comparing and copying ascii (not UTF-8) with both wide and
// non-wide character strings.
///////////////////////////////////////////////////////////////////////////////
inline int asccmp(const char* s1, const char* s2) {
return strcmp(s1, s2);
}
inline int ascicmp(const char* s1, const char* s2) {
return _stricmp(s1, s2);
}
inline int ascncmp(const char* s1, const char* s2, size_t n) {
return strncmp(s1, s2, n);
}
inline int ascnicmp(const char* s1, const char* s2, size_t n) {
return _strnicmp(s1, s2, n);
}
inline size_t asccpyn(char* buffer, size_t buflen,
const char* source, size_t srclen = SIZE_UNKNOWN) {
return strcpyn(buffer, buflen, source, srclen);
}
#if defined(WEBRTC_WIN)
typedef wchar_t(*CharacterTransformation)(wchar_t);
inline wchar_t identity(wchar_t c) { return c; }
int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n,
CharacterTransformation transformation);
inline int asccmp(const wchar_t* s1, const char* s2) {
return ascii_string_compare(s1, s2, static_cast<size_t>(-1), identity);
}
inline int ascicmp(const wchar_t* s1, const char* s2) {
return ascii_string_compare(s1, s2, static_cast<size_t>(-1), tolowercase);
}
inline int ascncmp(const wchar_t* s1, const char* s2, size_t n) {
return ascii_string_compare(s1, s2, n, identity);
}
inline int ascnicmp(const wchar_t* s1, const char* s2, size_t n) {
return ascii_string_compare(s1, s2, n, tolowercase);
}
size_t asccpyn(wchar_t* buffer, size_t buflen,
const char* source, size_t srclen = SIZE_UNKNOWN);
#endif // WEBRTC_WIN
///////////////////////////////////////////////////////////////////////////////
// Traits<char> specializations
///////////////////////////////////////////////////////////////////////////////
template<>
struct Traits<char> {
typedef std::string string;
inline static const char* empty_str() { return ""; }
};
///////////////////////////////////////////////////////////////////////////////
// Traits<wchar_t> specializations (Windows only, currently)
///////////////////////////////////////////////////////////////////////////////
#if defined(WEBRTC_WIN)
template<>
struct Traits<wchar_t> {
typedef std::wstring string;
inline static const wchar_t* empty_str() { return L""; }
};
#endif // WEBRTC_WIN
// Replaces all occurrences of "search" with "replace".
void replace_substrs(const char *search,
size_t search_len,
const char *replace,
size_t replace_len,
std::string *s);
// True iff s1 starts with s2.
bool starts_with(const char *s1, const char *s2);
// True iff s1 ends with s2.
bool ends_with(const char *s1, const char *s2);
// Remove leading and trailing whitespaces.
std::string string_trim(const std::string& s);
} // namespace rtc
#endif // WEBRTC_BASE_STRINGUTILS_H__

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Borrowed from Chromium's src/base/threading/thread_checker.h.
#ifndef WEBRTC_BASE_THREAD_CHECKER_H_
#define WEBRTC_BASE_THREAD_CHECKER_H_
// Apart from debug builds, we also enable the thread checker in
// builds with DCHECK_ALWAYS_ON so that trybots and waterfall bots
// with this define will get the same level of thread checking as
// debug bots.
//
// Note that this does not perfectly match situations where RTC_DCHECK is
// enabled. For example a non-official release build may have
// DCHECK_ALWAYS_ON undefined (and therefore ThreadChecker would be
// disabled) but have RTC_DCHECKs enabled at runtime.
#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
#define ENABLE_THREAD_CHECKER 1
#else
#define ENABLE_THREAD_CHECKER 0
#endif
#include "webrtc/base/thread_checker_impl.h"
namespace rtc {
// Do nothing implementation, for use in release mode.
//
// Note: You should almost always use the ThreadChecker class to get the
// right version for your build configuration.
class ThreadCheckerDoNothing {
public:
bool CalledOnValidThread() const {
return true;
}
void DetachFromThread() {}
};
// ThreadChecker is a helper class used to help verify that some methods of a
// class are called from the same thread. It provides identical functionality to
// base::NonThreadSafe, but it is meant to be held as a member variable, rather
// than inherited from base::NonThreadSafe.
//
// While inheriting from base::NonThreadSafe may give a clear indication about
// the thread-safety of a class, it may also lead to violations of the style
// guide with regard to multiple inheritance. The choice between having a
// ThreadChecker member and inheriting from base::NonThreadSafe should be based
// on whether:
// - Derived classes need to know the thread they belong to, as opposed to
// having that functionality fully encapsulated in the base class.
// - Derived classes should be able to reassign the base class to another
// thread, via DetachFromThread.
//
// If neither of these are true, then having a ThreadChecker member and calling
// CalledOnValidThread is the preferable solution.
//
// Example:
// class MyClass {
// public:
// void Foo() {
// RTC_DCHECK(thread_checker_.CalledOnValidThread());
// ... (do stuff) ...
// }
//
// private:
// ThreadChecker thread_checker_;
// }
//
// In Release mode, CalledOnValidThread will always return true.
#if ENABLE_THREAD_CHECKER
class ThreadChecker : public ThreadCheckerImpl {
};
#else
class ThreadChecker : public ThreadCheckerDoNothing {
};
#endif // ENABLE_THREAD_CHECKER
#undef ENABLE_THREAD_CHECKER
} // namespace rtc
#endif // WEBRTC_BASE_THREAD_CHECKER_H_

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Borrowed from Chromium's src/base/threading/thread_checker_impl.cc.
#include "webrtc/base/thread_checker_impl.h"
namespace rtc {
ThreadCheckerImpl::ThreadCheckerImpl() : valid_thread_(CurrentThreadRef()) {
}
ThreadCheckerImpl::~ThreadCheckerImpl() {
}
bool ThreadCheckerImpl::CalledOnValidThread() const {
const PlatformThreadRef current_thread = CurrentThreadRef();
CritScope scoped_lock(&lock_);
if (!valid_thread_) // Set if previously detached.
valid_thread_ = current_thread;
return IsThreadRefEqual(valid_thread_, current_thread);
}
void ThreadCheckerImpl::DetachFromThread() {
CritScope scoped_lock(&lock_);
valid_thread_ = 0;
}
} // namespace rtc

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Borrowed from Chromium's src/base/threading/thread_checker_impl.h.
#ifndef WEBRTC_BASE_THREAD_CHECKER_IMPL_H_
#define WEBRTC_BASE_THREAD_CHECKER_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/platform_thread.h"
namespace rtc {
// Real implementation of ThreadChecker, for use in debug mode, or
// for temporary use in release mode (e.g. to RTC_CHECK on a threading issue
// seen only in the wild).
//
// Note: You should almost always use the ThreadChecker class to get the
// right version for your build configuration.
class ThreadCheckerImpl {
public:
ThreadCheckerImpl();
~ThreadCheckerImpl();
bool CalledOnValidThread() const;
// Changes the thread that is checked for in CalledOnValidThread. This may
// be useful when an object may be created on one thread and then used
// exclusively on another thread.
void DetachFromThread();
private:
mutable CriticalSection lock_;
// This is mutable so that CalledOnValidThread can set it.
// It's guarded by |lock_|.
mutable PlatformThreadRef valid_thread_;
};
} // namespace rtc
#endif // WEBRTC_BASE_THREAD_CHECKER_IMPL_H_

124
webrtc/common.h Normal file
View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_H_
#define WEBRTC_COMMON_H_
#include <map>
#include "webrtc/base/basictypes.h"
namespace webrtc {
// Class Config is designed to ease passing a set of options across webrtc code.
// Options are identified by typename in order to avoid incorrect casts.
//
// Usage:
// * declaring an option:
// struct Algo1_CostFunction {
// virtual float cost(int x) const { return x; }
// virtual ~Algo1_CostFunction() {}
// };
//
// * accessing an option:
// config.Get<Algo1_CostFunction>().cost(value);
//
// * setting an option:
// struct SqrCost : Algo1_CostFunction {
// virtual float cost(int x) const { return x*x; }
// };
// config.Set<Algo1_CostFunction>(new SqrCost());
//
// Note: This class is thread-compatible (like STL containers).
class Config {
public:
// Returns the option if set or a default constructed one.
// Callers that access options too often are encouraged to cache the result.
// Returned references are owned by this.
//
// Requires std::is_default_constructible<T>
template<typename T> const T& Get() const;
// Set the option, deleting any previous instance of the same.
// This instance gets ownership of the newly set value.
template<typename T> void Set(T* value);
Config() {}
~Config() {
// Note: this method is inline so webrtc public API depends only
// on the headers.
for (OptionMap::iterator it = options_.begin();
it != options_.end(); ++it) {
delete it->second;
}
}
private:
typedef void* OptionIdentifier;
struct BaseOption {
virtual ~BaseOption() {}
};
template<typename T>
struct Option : BaseOption {
explicit Option(T* v): value(v) {}
~Option() {
delete value;
}
T* value;
};
// Own implementation of rtti-subset to avoid depending on rtti and its costs.
template<typename T>
static OptionIdentifier identifier() {
static char id_placeholder;
return &id_placeholder;
}
// Used to instantiate a default constructed object that doesn't needs to be
// owned. This allows Get<T> to be implemented without requiring explicitly
// locks.
template<typename T>
static const T& default_value() {
RTC_DEFINE_STATIC_LOCAL(const T, def, ());
return def;
}
typedef std::map<OptionIdentifier, BaseOption*> OptionMap;
OptionMap options_;
// RTC_DISALLOW_COPY_AND_ASSIGN
Config(const Config&);
void operator=(const Config&);
};
template<typename T>
const T& Config::Get() const {
OptionMap::const_iterator it = options_.find(identifier<T>());
if (it != options_.end()) {
const T* t = static_cast<Option<T>*>(it->second)->value;
if (t) {
return *t;
}
}
return default_value<T>();
}
template<typename T>
void Config::Set(T* value) {
BaseOption*& it = options_[identifier<T>()];
delete it;
it = new Option<T>(value);
}
} // namespace webrtc
#endif // WEBRTC_COMMON_H_

View File

@ -1,6 +1,18 @@
noinst_LTLIBRARIES = libcommon_audio.la noinst_LTLIBRARIES = libcommon_audio.la
libcommon_audio_la_SOURCES = signal_processing/include/real_fft.h \ libcommon_audio_la_SOURCES = \
resampler/include/push_resampler.h \
resampler/include/resampler.h \
resampler/push_sinc_resampler.h \
resampler/sinc_resampler.h \
resampler/sinusoidal_linear_chirp_source.h \
resampler/push_resampler.cc \
resampler/push_sinc_resampler.cc \
resampler/resampler.cc \
resampler/sinc_resampler.cc \
resampler/sinc_resampler_sse.cc \
resampler/sinusoidal_linear_chirp_source.cc \
signal_processing/include/real_fft.h \
signal_processing/include/signal_processing_library.h \ signal_processing/include/signal_processing_library.h \
signal_processing/include/spl_inl.h \ signal_processing/include/spl_inl.h \
signal_processing/include/spl_inl_armv7.h \ signal_processing/include/spl_inl_armv7.h \
@ -51,24 +63,59 @@ libcommon_audio_la_SOURCES = signal_processing/include/real_fft.h \
vad/vad_gmm.h \ vad/vad_gmm.h \
vad/vad_sp.c \ vad/vad_sp.c \
vad/vad_sp.h \ vad/vad_sp.h \
vad/webrtc_vad.c vad/webrtc_vad.c \
audio_converter.cc \
audio_converter.h \
audio_ring_buffer.cc \
audio_ring_buffer.h \
blocker.cc \
blocker.h \
channel_buffer.cc \
channel_buffer.h \
fft4g.c \
fft4g.h \
fir_filter.cc \
fir_filter.h \
fir_filter_sse.cc \
fir_filter_sse.h \
lapped_transform.cc \
lapped_transform.h \
real_fourier.cc \
real_fourier.h \
real_fourier_ooura.cc \
real_fourier_ooura.h \
real_fourier_openmax.h \
ring_buffer.h \
ring_buffer.c \
sparse_fir_filter.cc \
sparse_fir_filter.h \
wav_file.h \
wav_file.cc \
wav_header.h \
wav_header.cc \
window_generator.h \
window_generator.cc
libcommon_audio_la_CFLAGS = $(AM_CFLAGS) $(COMMON_CFLAGS) libcommon_audio_la_CFLAGS = $(AM_CFLAGS) $(COMMON_CFLAGS)
libcommon_audio_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS) libcommon_audio_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS)
# FIXME: # FIXME:
# if ARM - signal_processing/complex_bit_reverse_arm.S # x86 - resampler/sinc_resampler_sse.cc
# signal_processing/spl_sqrt_floor_arm.S # fir_filter_sse.cc
# ARM7 - signal_processing/filter_ar_fast_q12_armv7.S # ARM - signal_processing/complex_bit_reverse_arm.S
# NEON - signal_processing/cross_correlation_neon.c # signal_processing/spl_sqrt_floor_arm.S
# signal_processing/downsample_fast_neon.c # ARM7 - signal_processing/filter_ar_fast_q12_armv7.S
# signal_processing/min_max_operations_neon.c # NEON - resampler/sinc_resampler_neon.cc \
# if MIPS - signal_processing/complex_bit_reverse_mips.c # signal_processing/cross_correlation_neon.c
# signal_processing/complex_fft_mips.c # signal_processing/downsample_fast_neon.c
# signal_processing/cross_correlation_mips.c # signal_processing/min_max_operations_neon.c
# signal_processing/downsample_fast_mips.c # fir_filter_neon.c
# signal_processing/filter_ar_fast_q12_mips.c # MIPS - signal_processing/complex_bit_reverse_mips.c
# signal_processing/min_max_operations_mips.c # signal_processing/complex_fft_mips.c
# signal_processing/resample_by_2_mips.c # signal_processing/cross_correlation_mips.c
# signal_processing/spl_sqrt_floor_mips.c # signal_processing/downsample_fast_mips.c
# signal_processing/vector_scaling_operations_mips.c # signal_processing/filter_ar_fast_q12_mips.c
# signal_processing/min_max_operations_mips.c
# signal_processing/resample_by_2_mips.c
# signal_processing/spl_sqrt_floor_mips.c
# signal_processing/vector_scaling_operations_mips.c

View File

@ -0,0 +1,200 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/audio_converter.h"
#include <cstring>
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
#include "webrtc/system_wrappers/interface/scoped_vector.h"
using rtc::checked_cast;
namespace webrtc {
class CopyConverter : public AudioConverter {
public:
CopyConverter(int src_channels, size_t src_frames, int dst_channels,
size_t dst_frames)
: AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
~CopyConverter() override {};
void Convert(const float* const* src, size_t src_size, float* const* dst,
size_t dst_capacity) override {
CheckSizes(src_size, dst_capacity);
if (src != dst) {
for (int i = 0; i < src_channels(); ++i)
std::memcpy(dst[i], src[i], dst_frames() * sizeof(*dst[i]));
}
}
};
class UpmixConverter : public AudioConverter {
public:
UpmixConverter(int src_channels, size_t src_frames, int dst_channels,
size_t dst_frames)
: AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
~UpmixConverter() override {};
void Convert(const float* const* src, size_t src_size, float* const* dst,
size_t dst_capacity) override {
CheckSizes(src_size, dst_capacity);
for (size_t i = 0; i < dst_frames(); ++i) {
const float value = src[0][i];
for (int j = 0; j < dst_channels(); ++j)
dst[j][i] = value;
}
}
};
class DownmixConverter : public AudioConverter {
public:
DownmixConverter(int src_channels, size_t src_frames, int dst_channels,
size_t dst_frames)
: AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
}
~DownmixConverter() override {};
void Convert(const float* const* src, size_t src_size, float* const* dst,
size_t dst_capacity) override {
CheckSizes(src_size, dst_capacity);
float* dst_mono = dst[0];
for (size_t i = 0; i < src_frames(); ++i) {
float sum = 0;
for (int j = 0; j < src_channels(); ++j)
sum += src[j][i];
dst_mono[i] = sum / src_channels();
}
}
};
class ResampleConverter : public AudioConverter {
public:
ResampleConverter(int src_channels, size_t src_frames, int dst_channels,
size_t dst_frames)
: AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
resamplers_.reserve(src_channels);
for (int i = 0; i < src_channels; ++i)
resamplers_.push_back(new PushSincResampler(src_frames, dst_frames));
}
~ResampleConverter() override {};
void Convert(const float* const* src, size_t src_size, float* const* dst,
size_t dst_capacity) override {
CheckSizes(src_size, dst_capacity);
for (size_t i = 0; i < resamplers_.size(); ++i)
resamplers_[i]->Resample(src[i], src_frames(), dst[i], dst_frames());
}
private:
ScopedVector<PushSincResampler> resamplers_;
};
// Apply a vector of converters in serial, in the order given. At least two
// converters must be provided.
class CompositionConverter : public AudioConverter {
public:
CompositionConverter(ScopedVector<AudioConverter> converters)
: converters_(converters.Pass()) {
RTC_CHECK_GE(converters_.size(), 2u);
// We need an intermediate buffer after every converter.
for (auto it = converters_.begin(); it != converters_.end() - 1; ++it)
buffers_.push_back(new ChannelBuffer<float>((*it)->dst_frames(),
(*it)->dst_channels()));
}
~CompositionConverter() override {};
void Convert(const float* const* src, size_t src_size, float* const* dst,
size_t dst_capacity) override {
converters_.front()->Convert(src, src_size, buffers_.front()->channels(),
buffers_.front()->size());
for (size_t i = 2; i < converters_.size(); ++i) {
auto src_buffer = buffers_[i - 2];
auto dst_buffer = buffers_[i - 1];
converters_[i]->Convert(src_buffer->channels(),
src_buffer->size(),
dst_buffer->channels(),
dst_buffer->size());
}
converters_.back()->Convert(buffers_.back()->channels(),
buffers_.back()->size(), dst, dst_capacity);
}
private:
ScopedVector<AudioConverter> converters_;
ScopedVector<ChannelBuffer<float>> buffers_;
};
rtc::scoped_ptr<AudioConverter> AudioConverter::Create(int src_channels,
size_t src_frames,
int dst_channels,
size_t dst_frames) {
rtc::scoped_ptr<AudioConverter> sp;
if (src_channels > dst_channels) {
if (src_frames != dst_frames) {
ScopedVector<AudioConverter> converters;
converters.push_back(new DownmixConverter(src_channels, src_frames,
dst_channels, src_frames));
converters.push_back(new ResampleConverter(dst_channels, src_frames,
dst_channels, dst_frames));
sp.reset(new CompositionConverter(converters.Pass()));
} else {
sp.reset(new DownmixConverter(src_channels, src_frames, dst_channels,
dst_frames));
}
} else if (src_channels < dst_channels) {
if (src_frames != dst_frames) {
ScopedVector<AudioConverter> converters;
converters.push_back(new ResampleConverter(src_channels, src_frames,
src_channels, dst_frames));
converters.push_back(new UpmixConverter(src_channels, dst_frames,
dst_channels, dst_frames));
sp.reset(new CompositionConverter(converters.Pass()));
} else {
sp.reset(new UpmixConverter(src_channels, src_frames, dst_channels,
dst_frames));
}
} else if (src_frames != dst_frames) {
sp.reset(new ResampleConverter(src_channels, src_frames, dst_channels,
dst_frames));
} else {
sp.reset(new CopyConverter(src_channels, src_frames, dst_channels,
dst_frames));
}
return sp.Pass();
}
// For CompositionConverter.
AudioConverter::AudioConverter()
: src_channels_(0),
src_frames_(0),
dst_channels_(0),
dst_frames_(0) {}
AudioConverter::AudioConverter(int src_channels, size_t src_frames,
int dst_channels, size_t dst_frames)
: src_channels_(src_channels),
src_frames_(src_frames),
dst_channels_(dst_channels),
dst_frames_(dst_frames) {
RTC_CHECK(dst_channels == src_channels || dst_channels == 1 ||
src_channels == 1);
}
void AudioConverter::CheckSizes(size_t src_size, size_t dst_capacity) const {
RTC_CHECK_EQ(src_size, src_channels() * src_frames());
RTC_CHECK_GE(dst_capacity, dst_channels() * dst_frames());
}
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_
#define WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
namespace webrtc {
// Format conversion (remixing and resampling) for audio. Only simple remixing
// conversions are supported: downmix to mono (i.e. |dst_channels| == 1) or
// upmix from mono (i.e. |src_channels == 1|).
//
// The source and destination chunks have the same duration in time; specifying
// the number of frames is equivalent to specifying the sample rates.
class AudioConverter {
public:
// Returns a new AudioConverter, which will use the supplied format for its
// lifetime. Caller is responsible for the memory.
static rtc::scoped_ptr<AudioConverter> Create(int src_channels,
size_t src_frames,
int dst_channels,
size_t dst_frames);
virtual ~AudioConverter() {};
// Convert |src|, containing |src_size| samples, to |dst|, having a sample
// capacity of |dst_capacity|. Both point to a series of buffers containing
// the samples for each channel. The sizes must correspond to the format
// passed to Create().
virtual void Convert(const float* const* src, size_t src_size,
float* const* dst, size_t dst_capacity) = 0;
int src_channels() const { return src_channels_; }
size_t src_frames() const { return src_frames_; }
int dst_channels() const { return dst_channels_; }
size_t dst_frames() const { return dst_frames_; }
protected:
AudioConverter();
AudioConverter(int src_channels, size_t src_frames, int dst_channels,
size_t dst_frames);
// Helper to RTC_CHECK that inputs are correctly sized.
void CheckSizes(size_t src_size, size_t dst_capacity) const;
private:
const int src_channels_;
const size_t src_frames_;
const int dst_channels_;
const size_t dst_frames_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioConverter);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/audio_ring_buffer.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/ring_buffer.h"
// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
namespace webrtc {
AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
buffers_.reserve(channels);
for (size_t i = 0; i < channels; ++i)
buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
}
AudioRingBuffer::~AudioRingBuffer() {
for (auto buf : buffers_)
WebRtc_FreeBuffer(buf);
}
void AudioRingBuffer::Write(const float* const* data, size_t channels,
size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
RTC_CHECK_EQ(written, frames);
}
}
void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t read =
WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
RTC_CHECK_EQ(read, frames);
}
}
size_t AudioRingBuffer::ReadFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_read(buffers_[0]);
}
size_t AudioRingBuffer::WriteFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_write(buffers_[0]);
}
void AudioRingBuffer::MoveReadPositionForward(size_t frames) {
for (auto buf : buffers_) {
const size_t moved =
static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
void AudioRingBuffer::MoveReadPositionBackward(size_t frames) {
for (auto buf : buffers_) {
const size_t moved = static_cast<size_t>(
-WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
} // namespace webrtc

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_AUDIO_RING_BUFFER_H_
#define WEBRTC_COMMON_AUDIO_AUDIO_RING_BUFFER_H_
#include <stddef.h>
#include <vector>
struct RingBuffer;
namespace webrtc {
// A ring buffer tailored for float deinterleaved audio. Any operation that
// cannot be performed as requested will cause a crash (e.g. insufficient data
// in the buffer to fulfill a read request.)
class AudioRingBuffer final {
public:
// Specify the number of channels and maximum number of frames the buffer will
// contain.
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copies |data| to the buffer and advances the write pointer. |channels| must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copies from the buffer to |data| and advances the read pointer. |channels|
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);
size_t ReadFramesAvailable() const;
size_t WriteFramesAvailable() const;
// Moves the read position. The forward version advances the read pointer
// towards the write pointer and the backward verison withdraws the read
// pointer away from the write pointer (i.e. flushing and stuffing the buffer
// respectively.)
void MoveReadPositionForward(size_t frames);
void MoveReadPositionBackward(size_t frames);
private:
// We don't use a ScopedVector because it doesn't support a specialized
// deleter (like scoped_ptr for instance.)
std::vector<RingBuffer*> buffers_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_AUDIO_RING_BUFFER_H_

View File

@ -0,0 +1,236 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/blocker.h"
#include <string.h>
#include "webrtc/base/checks.h"
namespace {
// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
void AddFrames(const float* const* a,
size_t a_start_index,
const float* const* b,
int b_start_index,
size_t num_frames,
int num_channels,
float* const* result,
size_t result_start_index) {
for (int i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
result[i][j + result_start_index] =
a[i][j + a_start_index] + b[i][j + b_start_index];
}
}
}
// Copies |src| into |dst| channel by channel.
void CopyFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
int num_channels,
float* const* dst,
size_t dst_start_index) {
for (int i = 0; i < num_channels; ++i) {
memcpy(&dst[i][dst_start_index],
&src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
// Moves |src| into |dst| channel by channel.
void MoveFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
int num_channels,
float* const* dst,
size_t dst_start_index) {
for (int i = 0; i < num_channels; ++i) {
memmove(&dst[i][dst_start_index],
&src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
void ZeroOut(float* const* buffer,
size_t starting_idx,
size_t num_frames,
int num_channels) {
for (int i = 0; i < num_channels; ++i) {
memset(&buffer[i][starting_idx], 0,
num_frames * sizeof(buffer[i][starting_idx]));
}
}
// Pointwise multiplies each channel of |frames| with |window|. Results are
// stored in |frames|.
void ApplyWindow(const float* window,
size_t num_frames,
int num_channels,
float* const* frames) {
for (int i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
frames[i][j] = frames[i][j] * window[j];
}
}
}
size_t gcd(size_t a, size_t b) {
size_t tmp;
while (b) {
tmp = a;
a = b;
b = tmp % b;
}
return a;
}
} // namespace
namespace webrtc {
Blocker::Blocker(size_t chunk_size,
size_t block_size,
int num_input_channels,
int num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback)
: chunk_size_(chunk_size),
block_size_(block_size),
num_input_channels_(num_input_channels),
num_output_channels_(num_output_channels),
initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
frame_offset_(0),
input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
input_block_(block_size_, num_input_channels_),
output_block_(block_size_, num_output_channels_),
window_(new float[block_size_]),
shift_amount_(shift_amount),
callback_(callback) {
RTC_CHECK_LE(num_output_channels_, num_input_channels_);
RTC_CHECK_LE(shift_amount_, block_size_);
memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
input_buffer_.MoveReadPositionBackward(initial_delay_);
}
// When block_size < chunk_size the input and output buffers look like this:
//
// delay* chunk_size chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// 1. New input gets read into sections _b_ and _c_ of the input buffer.
// 2. We block starting from frame_offset.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from sections _a_ or _b_ of the input buffer.
// 4. We window the current block, fire the callback for processing, window
// again, and overlap/add to the output buffer.
// 5. We copy sections _a_ and _b_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy section _c_ into
// section _a_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _b_ and _c_.
//
// When block_size > chunk_size the input and output buffers look like this:
//
// chunk_size delay* chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// The procedure is the same as above, except for:
// 1. New input gets read into section _c_ of the input buffer.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from section _a_ of the input buffer.
// 5. We copy section _a_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy sections _b_ and _c_
// into section _a_ and _b_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _a_ and _b_.
//
// * delay here refers to inintial_delay_
//
// TODO(claguna): Look at using ring buffers to eliminate some copies.
void Blocker::ProcessChunk(const float* const* input,
size_t chunk_size,
int num_input_channels,
int num_output_channels,
float* const* output) {
RTC_CHECK_EQ(chunk_size, chunk_size_);
RTC_CHECK_EQ(num_input_channels, num_input_channels_);
RTC_CHECK_EQ(num_output_channels, num_output_channels_);
input_buffer_.Write(input, num_input_channels, chunk_size_);
size_t first_frame_in_block = frame_offset_;
// Loop through blocks.
while (first_frame_in_block < chunk_size_) {
input_buffer_.Read(input_block_.channels(), num_input_channels,
block_size_);
input_buffer_.MoveReadPositionBackward(block_size_ - shift_amount_);
ApplyWindow(window_.get(),
block_size_,
num_input_channels_,
input_block_.channels());
callback_->ProcessBlock(input_block_.channels(),
block_size_,
num_input_channels_,
num_output_channels_,
output_block_.channels());
ApplyWindow(window_.get(),
block_size_,
num_output_channels_,
output_block_.channels());
AddFrames(output_buffer_.channels(),
first_frame_in_block,
output_block_.channels(),
0,
block_size_,
num_output_channels_,
output_buffer_.channels(),
first_frame_in_block);
first_frame_in_block += shift_amount_;
}
// Copy output buffer to output
CopyFrames(output_buffer_.channels(),
0,
chunk_size_,
num_output_channels_,
output,
0);
// Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
// to output buffer [0, initial_delay], zero the rest.
MoveFrames(output_buffer_.channels(),
chunk_size,
initial_delay_,
num_output_channels_,
output_buffer_.channels(),
0);
ZeroOut(output_buffer_.channels(),
initial_delay_,
chunk_size_,
num_output_channels_);
// Calculate new starting frames.
frame_offset_ = first_frame_in_block - chunk_size_;
}
} // namespace webrtc

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_
#define WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/audio_ring_buffer.h"
#include "webrtc/common_audio/channel_buffer.h"
namespace webrtc {
// The callback function to process audio in the time domain. Input has already
// been windowed, and output will be windowed. The number of input channels
// must be >= the number of output channels.
class BlockerCallback {
public:
virtual ~BlockerCallback() {}
virtual void ProcessBlock(const float* const* input,
size_t num_frames,
int num_input_channels,
int num_output_channels,
float* const* output) = 0;
};
// The main purpose of Blocker is to abstract away the fact that often we
// receive a different number of audio frames than our transform takes. For
// example, most FFTs work best when the fft-size is a power of 2, but suppose
// we receive 20ms of audio at a sample rate of 48000. That comes to 960 frames
// of audio, which is not a power of 2. Blocker allows us to specify the
// transform and all other necessary processing via the Process() callback
// function without any constraints on the transform-size
// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
// We handle this for the multichannel audio case, allowing for different
// numbers of input and output channels (for example, beamforming takes 2 or
// more input channels and returns 1 output channel). Audio signals are
// represented as deinterleaved floats in the range [-1, 1].
//
// Blocker is responsible for:
// - blocking audio while handling potential discontinuities on the edges
// of chunks
// - windowing blocks before sending them to Process()
// - windowing processed blocks, and overlap-adding them together before
// sending back a processed chunk
//
// To use blocker:
// 1. Impelment a BlockerCallback object |bc|.
// 2. Instantiate a Blocker object |b|, passing in |bc|.
// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
//
// A small amount of delay is added to the first received chunk to deal with
// the difference in chunk/block sizes. This delay is <= chunk_size.
//
// Ownership of window is retained by the caller. That is, Blocker makes a
// copy of window and does not attempt to delete it.
class Blocker {
public:
Blocker(size_t chunk_size,
size_t block_size,
int num_input_channels,
int num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback);
void ProcessChunk(const float* const* input,
size_t chunk_size,
int num_input_channels,
int num_output_channels,
float* const* output);
private:
const size_t chunk_size_;
const size_t block_size_;
const int num_input_channels_;
const int num_output_channels_;
// The number of frames of delay to add at the beginning of the first chunk.
const size_t initial_delay_;
// The frame index into the input buffer where the first block should be read
// from. This is necessary because shift_amount_ is not necessarily a
// multiple of chunk_size_, so blocks won't line up at the start of the
// buffer.
size_t frame_offset_;
// Since blocks nearly always overlap, there are certain blocks that require
// frames from the end of one chunk and the beginning of the next chunk. The
// input and output buffers are responsible for saving those frames between
// calls to ProcessChunk().
//
// Both contain |initial delay| + |chunk_size| frames. The input is a fairly
// standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
ChannelBuffer<float> output_buffer_;
// Space for the input block (can't wrap because of windowing).
ChannelBuffer<float> input_block_;
// Space for the output block (can't wrap because of overlap/add).
ChannelBuffer<float> output_block_;
rtc::scoped_ptr<float[]> window_;
// The amount of frames between the start of contiguous blocks. For example,
// |shift_amount_| = |block_size_| / 2 for a Hann window.
size_t shift_amount_;
BlockerCallback* callback_;
};
} // namespace webrtc
#endif // WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/channel_buffer.h"
namespace webrtc {
IFChannelBuffer::IFChannelBuffer(size_t num_frames,
int num_channels,
size_t num_bands)
: ivalid_(true),
ibuf_(num_frames, num_channels, num_bands),
fvalid_(true),
fbuf_(num_frames, num_channels, num_bands) {}
ChannelBuffer<int16_t>* IFChannelBuffer::ibuf() {
RefreshI();
fvalid_ = false;
return &ibuf_;
}
ChannelBuffer<float>* IFChannelBuffer::fbuf() {
RefreshF();
ivalid_ = false;
return &fbuf_;
}
const ChannelBuffer<int16_t>* IFChannelBuffer::ibuf_const() const {
RefreshI();
return &ibuf_;
}
const ChannelBuffer<float>* IFChannelBuffer::fbuf_const() const {
RefreshF();
return &fbuf_;
}
void IFChannelBuffer::RefreshF() const {
if (!fvalid_) {
assert(ivalid_);
const int16_t* const* int_channels = ibuf_.channels();
float* const* float_channels = fbuf_.channels();
for (int i = 0; i < ibuf_.num_channels(); ++i) {
for (size_t j = 0; j < ibuf_.num_frames(); ++j) {
float_channels[i][j] = int_channels[i][j];
}
}
fvalid_ = true;
}
}
void IFChannelBuffer::RefreshI() const {
if (!ivalid_) {
assert(fvalid_);
int16_t* const* int_channels = ibuf_.channels();
const float* const* float_channels = fbuf_.channels();
for (int i = 0; i < ibuf_.num_channels(); ++i) {
FloatS16ToS16(float_channels[i],
ibuf_.num_frames(),
int_channels[i]);
}
ivalid_ = true;
}
}
} // namespace webrtc

View File

@ -0,0 +1,169 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_
#include <string.h>
#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/test/testsupport/gtest_prod_util.h"
namespace webrtc {
// Helper to encapsulate a contiguous data buffer, full or split into frequency
// bands, with access to a pointer arrays of the deinterleaved channels and
// bands. The buffer is zero initialized at creation.
//
// The buffer structure is showed below for a 2 channel and 2 bands case:
//
// |data_|:
// { [ --- b1ch1 --- ] [ --- b2ch1 --- ] [ --- b1ch2 --- ] [ --- b2ch2 --- ] }
//
// The pointer arrays for the same example are as follows:
//
// |channels_|:
// { [ b1ch1* ] [ b1ch2* ] [ b2ch1* ] [ b2ch2* ] }
//
// |bands_|:
// { [ b1ch1* ] [ b2ch1* ] [ b1ch2* ] [ b2ch2* ] }
template <typename T>
class ChannelBuffer {
public:
ChannelBuffer(size_t num_frames,
int num_channels,
size_t num_bands = 1)
: data_(new T[num_frames * num_channels]()),
channels_(new T*[num_channels * num_bands]),
bands_(new T*[num_channels * num_bands]),
num_frames_(num_frames),
num_frames_per_band_(num_frames / num_bands),
num_channels_(num_channels),
num_bands_(num_bands) {
for (int i = 0; i < num_channels_; ++i) {
for (size_t j = 0; j < num_bands_; ++j) {
channels_[j * num_channels_ + i] =
&data_[i * num_frames_ + j * num_frames_per_band_];
bands_[i * num_bands_ + j] = channels_[j * num_channels_ + i];
}
}
}
// Returns a pointer array to the full-band channels (or lower band channels).
// Usage:
// channels()[channel][sample].
// Where:
// 0 <= channel < |num_channels_|
// 0 <= sample < |num_frames_|
T* const* channels() { return channels(0); }
const T* const* channels() const { return channels(0); }
// Returns a pointer array to the channels for a specific band.
// Usage:
// channels(band)[channel][sample].
// Where:
// 0 <= band < |num_bands_|
// 0 <= channel < |num_channels_|
// 0 <= sample < |num_frames_per_band_|
const T* const* channels(size_t band) const {
RTC_DCHECK_LT(band, num_bands_);
return &channels_[band * num_channels_];
}
T* const* channels(size_t band) {
const ChannelBuffer<T>* t = this;
return const_cast<T* const*>(t->channels(band));
}
// Returns a pointer array to the bands for a specific channel.
// Usage:
// bands(channel)[band][sample].
// Where:
// 0 <= channel < |num_channels_|
// 0 <= band < |num_bands_|
// 0 <= sample < |num_frames_per_band_|
const T* const* bands(int channel) const {
RTC_DCHECK_LT(channel, num_channels_);
RTC_DCHECK_GE(channel, 0);
return &bands_[channel * num_bands_];
}
T* const* bands(int channel) {
const ChannelBuffer<T>* t = this;
return const_cast<T* const*>(t->bands(channel));
}
// Sets the |slice| pointers to the |start_frame| position for each channel.
// Returns |slice| for convenience.
const T* const* Slice(T** slice, size_t start_frame) const {
RTC_DCHECK_LT(start_frame, num_frames_);
for (int i = 0; i < num_channels_; ++i)
slice[i] = &channels_[i][start_frame];
return slice;
}
T** Slice(T** slice, size_t start_frame) {
const ChannelBuffer<T>* t = this;
return const_cast<T**>(t->Slice(slice, start_frame));
}
size_t num_frames() const { return num_frames_; }
size_t num_frames_per_band() const { return num_frames_per_band_; }
int num_channels() const { return num_channels_; }
size_t num_bands() const { return num_bands_; }
size_t size() const {return num_frames_ * num_channels_; }
void SetDataForTesting(const T* data, size_t size) {
RTC_CHECK_EQ(size, this->size());
memcpy(data_.get(), data, size * sizeof(*data));
}
private:
rtc::scoped_ptr<T[]> data_;
rtc::scoped_ptr<T* []> channels_;
rtc::scoped_ptr<T* []> bands_;
const size_t num_frames_;
const size_t num_frames_per_band_;
const int num_channels_;
const size_t num_bands_;
};
// One int16_t and one float ChannelBuffer that are kept in sync. The sync is
// broken when someone requests write access to either ChannelBuffer, and
// reestablished when someone requests the outdated ChannelBuffer. It is
// therefore safe to use the return value of ibuf_const() and fbuf_const()
// until the next call to ibuf() or fbuf(), and the return value of ibuf() and
// fbuf() until the next call to any of the other functions.
class IFChannelBuffer {
public:
IFChannelBuffer(size_t num_frames, int num_channels, size_t num_bands = 1);
ChannelBuffer<int16_t>* ibuf();
ChannelBuffer<float>* fbuf();
const ChannelBuffer<int16_t>* ibuf_const() const;
const ChannelBuffer<float>* fbuf_const() const;
size_t num_frames() const { return ibuf_.num_frames(); }
size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
int num_channels() const { return ibuf_.num_channels(); }
size_t num_bands() const { return ibuf_.num_bands(); }
private:
void RefreshF() const;
void RefreshI() const;
mutable bool ivalid_;
mutable ChannelBuffer<int16_t> ibuf_;
mutable bool fvalid_;
mutable ChannelBuffer<float> fbuf_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_

View File

@ -27,7 +27,7 @@ functions
dfst: Sine Transform of RDFT (Real Anti-symmetric DFT) dfst: Sine Transform of RDFT (Real Anti-symmetric DFT)
function prototypes function prototypes
void cdft(int, int, float *, int *, float *); void cdft(int, int, float *, int *, float *);
void rdft(int, int, float *, int *, float *); void rdft(size_t, int, float *, size_t *, float *);
void ddct(int, int, float *, int *, float *); void ddct(int, int, float *, int *, float *);
void ddst(int, int, float *, int *, float *); void ddst(int, int, float *, int *, float *);
void dfct(int, float *, float *, int *, float *); void dfct(int, float *, float *, int *, float *);
@ -94,7 +94,7 @@ function prototypes
ip[0] = 0; // first time only ip[0] = 0; // first time only
rdft(n, -1, a, ip, w); rdft(n, -1, a, ip, w);
[parameters] [parameters]
n :data length (int) n :data length (size_t)
n >= 2, n = power of 2 n >= 2, n = power of 2
a[0...n-1] :input/output data (float *) a[0...n-1] :input/output data (float *)
<case1> <case1>
@ -107,7 +107,7 @@ function prototypes
a[2*j] = R[j], 0<=j<n/2 a[2*j] = R[j], 0<=j<n/2
a[2*j+1] = I[j], 0<j<n/2 a[2*j+1] = I[j], 0<j<n/2
a[1] = R[n/2] a[1] = R[n/2]
ip[0...*] :work area for bit reversal (int *) ip[0...*] :work area for bit reversal (size_t *)
length of ip >= 2+sqrt(n/2) length of ip >= 2+sqrt(n/2)
strictly, strictly,
length of ip >= length of ip >=
@ -286,14 +286,29 @@ Appendix :
w[] and ip[] are compatible with all routines. w[] and ip[] are compatible with all routines.
*/ */
void cdft(int n, int isgn, float *a, int *ip, float *w) #include <stddef.h>
{
void makewt(int nw, int *ip, float *w);
void bitrv2(int n, int *ip, float *a);
void bitrv2conj(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void cftbsub(int n, float *a, float *w);
static void makewt(size_t nw, size_t *ip, float *w);
static void makect(size_t nc, size_t *ip, float *c);
static void bitrv2(size_t n, size_t *ip, float *a);
#if 0 // Not used.
static void bitrv2conj(int n, int *ip, float *a);
#endif
static void cftfsub(size_t n, float *a, float *w);
static void cftbsub(size_t n, float *a, float *w);
static void cft1st(size_t n, float *a, float *w);
static void cftmdl(size_t n, size_t l, float *a, float *w);
static void rftfsub(size_t n, float *a, size_t nc, float *c);
static void rftbsub(size_t n, float *a, size_t nc, float *c);
#if 0 // Not used.
static void dctsub(int n, float *a, int nc, float *c)
static void dstsub(int n, float *a, int nc, float *c)
#endif
#if 0 // Not used.
void WebRtc_cdft(int n, int isgn, float *a, int *ip, float *w)
{
if (n > (ip[0] << 2)) { if (n > (ip[0] << 2)) {
makewt(n >> 2, ip, w); makewt(n >> 2, ip, w);
} }
@ -309,18 +324,12 @@ void cdft(int n, int isgn, float *a, int *ip, float *w)
cftfsub(n, a, w); cftfsub(n, a, w);
} }
} }
#endif
void rdft(int n, int isgn, float *a, int *ip, float *w) void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w)
{ {
void makewt(int nw, int *ip, float *w); size_t nw, nc;
void makect(int nc, int *ip, float *c);
void bitrv2(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void cftbsub(int n, float *a, float *w);
void rftfsub(int n, float *a, int nc, float *c);
void rftbsub(int n, float *a, int nc, float *c);
int nw, nc;
float xi; float xi;
nw = ip[0]; nw = ip[0];
@ -357,17 +366,9 @@ void rdft(int n, int isgn, float *a, int *ip, float *w)
} }
} }
#if 0 // Not used.
void ddct(int n, int isgn, float *a, int *ip, float *w) static void ddct(int n, int isgn, float *a, int *ip, float *w)
{ {
void makewt(int nw, int *ip, float *w);
void makect(int nc, int *ip, float *c);
void bitrv2(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void cftbsub(int n, float *a, float *w);
void rftfsub(int n, float *a, int nc, float *c);
void rftbsub(int n, float *a, int nc, float *c);
void dctsub(int n, float *a, int nc, float *c);
int j, nw, nc; int j, nw, nc;
float xr; float xr;
@ -417,16 +418,8 @@ void ddct(int n, int isgn, float *a, int *ip, float *w)
} }
void ddst(int n, int isgn, float *a, int *ip, float *w) static void ddst(int n, int isgn, float *a, int *ip, float *w)
{ {
void makewt(int nw, int *ip, float *w);
void makect(int nc, int *ip, float *c);
void bitrv2(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void cftbsub(int n, float *a, float *w);
void rftfsub(int n, float *a, int nc, float *c);
void rftbsub(int n, float *a, int nc, float *c);
void dstsub(int n, float *a, int nc, float *c);
int j, nw, nc; int j, nw, nc;
float xr; float xr;
@ -476,14 +469,8 @@ void ddst(int n, int isgn, float *a, int *ip, float *w)
} }
void dfct(int n, float *a, float *t, int *ip, float *w) static void dfct(int n, float *a, float *t, int *ip, float *w)
{ {
void makewt(int nw, int *ip, float *w);
void makect(int nc, int *ip, float *c);
void bitrv2(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void rftfsub(int n, float *a, int nc, float *c);
void dctsub(int n, float *a, int nc, float *c);
int j, k, l, m, mh, nw, nc; int j, k, l, m, mh, nw, nc;
float xr, xi, yr, yi; float xr, xi, yr, yi;
@ -571,15 +558,8 @@ void dfct(int n, float *a, float *t, int *ip, float *w)
} }
} }
static void dfst(int n, float *a, float *t, int *ip, float *w)
void dfst(int n, float *a, float *t, int *ip, float *w)
{ {
void makewt(int nw, int *ip, float *w);
void makect(int nc, int *ip, float *c);
void bitrv2(int n, int *ip, float *a);
void cftfsub(int n, float *a, float *w);
void rftfsub(int n, float *a, int nc, float *c);
void dstsub(int n, float *a, int nc, float *c);
int j, k, l, m, mh, nw, nc; int j, k, l, m, mh, nw, nc;
float xr, xi, yr, yi; float xr, xi, yr, yi;
@ -657,6 +637,7 @@ void dfst(int n, float *a, float *t, int *ip, float *w)
} }
a[0] = 0; a[0] = 0;
} }
#endif // Not used.
/* -------- initializing routines -------- */ /* -------- initializing routines -------- */
@ -664,17 +645,16 @@ void dfst(int n, float *a, float *t, int *ip, float *w)
#include <math.h> #include <math.h>
void makewt(int nw, int *ip, float *w) static void makewt(size_t nw, size_t *ip, float *w)
{ {
void bitrv2(int n, int *ip, float *a); size_t j, nwh;
int j, nwh;
float delta, x, y; float delta, x, y;
ip[0] = nw; ip[0] = nw;
ip[1] = 1; ip[1] = 1;
if (nw > 2) { if (nw > 2) {
nwh = nw >> 1; nwh = nw >> 1;
delta = (float)atan(1.0f) / nwh; delta = atanf(1.0f) / nwh;
w[0] = 1; w[0] = 1;
w[1] = 0; w[1] = 0;
w[nwh] = (float)cos(delta * nwh); w[nwh] = (float)cos(delta * nwh);
@ -694,15 +674,15 @@ void makewt(int nw, int *ip, float *w)
} }
void makect(int nc, int *ip, float *c) static void makect(size_t nc, size_t *ip, float *c)
{ {
int j, nch; size_t j, nch;
float delta; float delta;
ip[1] = nc; ip[1] = nc;
if (nc > 1) { if (nc > 1) {
nch = nc >> 1; nch = nc >> 1;
delta = (float)atan(1.0f) / nch; delta = atanf(1.0f) / nch;
c[0] = (float)cos(delta * nch); c[0] = (float)cos(delta * nch);
c[nch] = 0.5f * c[0]; c[nch] = 0.5f * c[0];
for (j = 1; j < nch; j++) { for (j = 1; j < nch; j++) {
@ -716,9 +696,9 @@ void makect(int nc, int *ip, float *c)
/* -------- child routines -------- */ /* -------- child routines -------- */
void bitrv2(int n, int *ip, float *a) static void bitrv2(size_t n, size_t *ip, float *a)
{ {
int j, j1, k, k1, l, m, m2; size_t j, j1, k, k1, l, m, m2;
float xr, xi, yr, yi; float xr, xi, yr, yi;
ip[0] = 0; ip[0] = 0;
@ -815,8 +795,8 @@ void bitrv2(int n, int *ip, float *a)
} }
} }
#if 0 // Not used.
void bitrv2conj(int n, int *ip, float *a) static void bitrv2conj(int n, int *ip, float *a)
{ {
int j, j1, k, k1, l, m, m2; int j, j1, k, k1, l, m, m2;
float xr, xi, yr, yi; float xr, xi, yr, yi;
@ -923,13 +903,11 @@ void bitrv2conj(int n, int *ip, float *a)
} }
} }
} }
#endif
static void cftfsub(size_t n, float *a, float *w)
void cftfsub(int n, float *a, float *w)
{ {
void cft1st(int n, float *a, float *w); size_t j, j1, j2, j3, l;
void cftmdl(int n, int l, float *a, float *w);
int j, j1, j2, j3, l;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
l = 2; l = 2;
@ -977,11 +955,9 @@ void cftfsub(int n, float *a, float *w)
} }
void cftbsub(int n, float *a, float *w) static void cftbsub(size_t n, float *a, float *w)
{ {
void cft1st(int n, float *a, float *w); size_t j, j1, j2, j3, l;
void cftmdl(int n, int l, float *a, float *w);
int j, j1, j2, j3, l;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
l = 2; l = 2;
@ -1029,9 +1005,9 @@ void cftbsub(int n, float *a, float *w)
} }
void cft1st(int n, float *a, float *w) static void cft1st(size_t n, float *a, float *w)
{ {
int j, k1, k2; size_t j, k1, k2;
float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
@ -1134,9 +1110,9 @@ void cft1st(int n, float *a, float *w)
} }
void cftmdl(int n, int l, float *a, float *w) static void cftmdl(size_t n, size_t l, float *a, float *w)
{ {
int j, j1, j2, j3, k, k1, k2, m, m2; size_t j, j1, j2, j3, k, k1, k2, m, m2;
float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
@ -1261,9 +1237,9 @@ void cftmdl(int n, int l, float *a, float *w)
} }
void rftfsub(int n, float *a, int nc, float *c) static void rftfsub(size_t n, float *a, size_t nc, float *c)
{ {
int j, k, kk, ks, m; size_t j, k, kk, ks, m;
float wkr, wki, xr, xi, yr, yi; float wkr, wki, xr, xi, yr, yi;
m = n >> 1; m = n >> 1;
@ -1286,9 +1262,9 @@ void rftfsub(int n, float *a, int nc, float *c)
} }
void rftbsub(int n, float *a, int nc, float *c) static void rftbsub(size_t n, float *a, size_t nc, float *c)
{ {
int j, k, kk, ks, m; size_t j, k, kk, ks, m;
float wkr, wki, xr, xi, yr, yi; float wkr, wki, xr, xi, yr, yi;
a[1] = -a[1]; a[1] = -a[1];
@ -1312,8 +1288,8 @@ void rftbsub(int n, float *a, int nc, float *c)
a[m + 1] = -a[m + 1]; a[m + 1] = -a[m + 1];
} }
#if 0 // Not used.
void dctsub(int n, float *a, int nc, float *c) static void dctsub(int n, float *a, int nc, float *c)
{ {
int j, k, kk, ks, m; int j, k, kk, ks, m;
float wkr, wki, xr; float wkr, wki, xr;
@ -1334,7 +1310,7 @@ void dctsub(int n, float *a, int nc, float *c)
} }
void dstsub(int n, float *a, int nc, float *c) static void dstsub(int n, float *a, int nc, float *c)
{ {
int j, k, kk, ks, m; int j, k, kk, ks, m;
float wkr, wki, xr; float wkr, wki, xr;
@ -1353,4 +1329,4 @@ void dstsub(int n, float *a, int nc, float *c)
} }
a[m] *= c[0]; a[m] *= c[0];
} }
#endif // Not used.

View File

@ -8,11 +8,18 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_ #ifndef WEBRTC_COMMON_AUDIO_FFT4G_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_ #define WEBRTC_COMMON_AUDIO_FFT4G_H_
void rdft(int, int, float *, int *, float *);
void cdft(int, int, float *, int *, float *);
#if defined(__cplusplus)
extern "C" {
#endif #endif
// Refer to fft4g.c for documentation.
void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w);
#if defined(__cplusplus)
}
#endif
#endif // WEBRTC_COMMON_AUDIO_FFT4G_H_

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/fir_filter.h"
#include <assert.h>
#include <string.h>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/fir_filter_neon.h"
#include "webrtc/common_audio/fir_filter_sse.h"
#include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
namespace webrtc {
class FIRFilterC : public FIRFilter {
public:
FIRFilterC(const float* coefficients,
size_t coefficients_length);
void Filter(const float* in, size_t length, float* out) override;
private:
size_t coefficients_length_;
size_t state_length_;
rtc::scoped_ptr<float[]> coefficients_;
rtc::scoped_ptr<float[]> state_;
};
FIRFilter* FIRFilter::Create(const float* coefficients,
size_t coefficients_length,
size_t max_input_length) {
if (!coefficients || coefficients_length <= 0 || max_input_length <= 0) {
assert(false);
return NULL;
}
FIRFilter* filter = NULL;
// If we know the minimum architecture at compile time, avoid CPU detection.
#if defined(WEBRTC_ARCH_X86_FAMILY)
#if defined(__SSE2__)
filter =
new FIRFilterSSE2(coefficients, coefficients_length, max_input_length);
#else
// x86 CPU detection required.
if (WebRtc_GetCPUInfo(kSSE2)) {
filter =
new FIRFilterSSE2(coefficients, coefficients_length, max_input_length);
} else {
filter = new FIRFilterC(coefficients, coefficients_length);
}
#endif
#elif defined(WEBRTC_HAS_NEON)
filter =
new FIRFilterNEON(coefficients, coefficients_length, max_input_length);
#elif defined(WEBRTC_DETECT_NEON)
if (WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) {
filter =
new FIRFilterNEON(coefficients, coefficients_length, max_input_length);
} else {
filter = new FIRFilterC(coefficients, coefficients_length);
}
#else
filter = new FIRFilterC(coefficients, coefficients_length);
#endif
return filter;
}
FIRFilterC::FIRFilterC(const float* coefficients, size_t coefficients_length)
: coefficients_length_(coefficients_length),
state_length_(coefficients_length - 1),
coefficients_(new float[coefficients_length_]),
state_(new float[state_length_]) {
for (size_t i = 0; i < coefficients_length_; ++i) {
coefficients_[i] = coefficients[coefficients_length_ - i - 1];
}
memset(state_.get(), 0, state_length_ * sizeof(state_[0]));
}
void FIRFilterC::Filter(const float* in, size_t length, float* out) {
assert(length > 0);
// Convolves the input signal |in| with the filter kernel |coefficients_|
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
out[i] = 0.f;
size_t j;
for (j = 0; state_length_ > i && j < state_length_ - i; ++j) {
out[i] += state_[i + j] * coefficients_[j];
}
for (; j < coefficients_length_; ++j) {
out[i] += in[j + i - state_length_] * coefficients_[j];
}
}
// Update current state.
if (length >= state_length_) {
memcpy(
state_.get(), &in[length - state_length_], state_length_ * sizeof(*in));
} else {
memmove(state_.get(),
&state_[length],
(state_length_ - length) * sizeof(state_[0]));
memcpy(&state_[state_length_ - length], in, length * sizeof(*in));
}
}
} // namespace webrtc

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_FIR_FILTER_H_
#define WEBRTC_COMMON_AUDIO_FIR_FILTER_H_
#include <string.h>
namespace webrtc {
// Finite Impulse Response filter using floating-point arithmetic.
class FIRFilter {
public:
// Creates a filter with the given coefficients. All initial state values will
// be zeros.
// The length of the chunks fed to the filter should never be greater than
// |max_input_length|. This is needed because, when vectorizing it is
// necessary to concatenate the input after the state, and resizing this array
// dynamically is expensive.
static FIRFilter* Create(const float* coefficients,
size_t coefficients_length,
size_t max_input_length);
virtual ~FIRFilter() {}
// Filters the |in| data supplied.
// |out| must be previously allocated and it must be at least of |length|.
virtual void Filter(const float* in, size_t length, float* out) = 0;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_FIR_FILTER_H_

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/fir_filter_neon.h"
#include <arm_neon.h>
#include <assert.h>
#include <string.h>
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
namespace webrtc {
FIRFilterNEON::FIRFilterNEON(const float* coefficients,
size_t coefficients_length,
size_t max_input_length)
: // Closest higher multiple of four.
coefficients_length_((coefficients_length + 3) & ~0x03),
state_length_(coefficients_length_ - 1),
coefficients_(static_cast<float*>(
AlignedMalloc(sizeof(float) * coefficients_length_, 16))),
state_(static_cast<float*>(
AlignedMalloc(sizeof(float) * (max_input_length + state_length_),
16))) {
// Add zeros at the end of the coefficients.
size_t padding = coefficients_length_ - coefficients_length;
memset(coefficients_.get(), 0.f, padding * sizeof(coefficients_[0]));
// The coefficients are reversed to compensate for the order in which the
// input samples are acquired (most recent last).
for (size_t i = 0; i < coefficients_length; ++i) {
coefficients_[i + padding] = coefficients[coefficients_length - i - 1];
}
memset(state_.get(),
0.f,
(max_input_length + state_length_) * sizeof(state_[0]));
}
void FIRFilterNEON::Filter(const float* in, size_t length, float* out) {
assert(length > 0);
memcpy(&state_[state_length_], in, length * sizeof(*in));
// Convolves the input signal |in| with the filter kernel |coefficients_|
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
float* in_ptr = &state_[i];
float* coef_ptr = coefficients_.get();
float32x4_t m_sum = vmovq_n_f32(0);
float32x4_t m_in;
for (size_t j = 0; j < coefficients_length_; j += 4) {
m_in = vld1q_f32(in_ptr + j);
m_sum = vmlaq_f32(m_sum, m_in, vld1q_f32(coef_ptr + j));
}
float32x2_t m_half = vadd_f32(vget_high_f32(m_sum), vget_low_f32(m_sum));
out[i] = vget_lane_f32(vpadd_f32(m_half, m_half), 0);
}
// Update current state.
memmove(state_.get(), &state_[length], state_length_ * sizeof(state_[0]));
}
} // namespace webrtc

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_FIR_FILTER_NEON_H_
#define WEBRTC_COMMON_AUDIO_FIR_FILTER_NEON_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/fir_filter.h"
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
namespace webrtc {
class FIRFilterNEON : public FIRFilter {
public:
FIRFilterNEON(const float* coefficients,
size_t coefficients_length,
size_t max_input_length);
void Filter(const float* in, size_t length, float* out) override;
private:
size_t coefficients_length_;
size_t state_length_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> coefficients_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> state_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_FIR_FILTER_NEON_H_

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/fir_filter_sse.h"
#include <assert.h>
#include <string.h>
#include <xmmintrin.h>
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
namespace webrtc {
FIRFilterSSE2::FIRFilterSSE2(const float* coefficients,
size_t coefficients_length,
size_t max_input_length)
: // Closest higher multiple of four.
coefficients_length_((coefficients_length + 3) & ~0x03),
state_length_(coefficients_length_ - 1),
coefficients_(static_cast<float*>(
AlignedMalloc(sizeof(float) * coefficients_length_, 16))),
state_(static_cast<float*>(
AlignedMalloc(sizeof(float) * (max_input_length + state_length_),
16))) {
// Add zeros at the end of the coefficients.
size_t padding = coefficients_length_ - coefficients_length;
memset(coefficients_.get(), 0, padding * sizeof(coefficients_[0]));
// The coefficients are reversed to compensate for the order in which the
// input samples are acquired (most recent last).
for (size_t i = 0; i < coefficients_length; ++i) {
coefficients_[i + padding] = coefficients[coefficients_length - i - 1];
}
memset(state_.get(),
0,
(max_input_length + state_length_) * sizeof(state_[0]));
}
void FIRFilterSSE2::Filter(const float* in, size_t length, float* out) {
assert(length > 0);
memcpy(&state_[state_length_], in, length * sizeof(*in));
// Convolves the input signal |in| with the filter kernel |coefficients_|
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
float* in_ptr = &state_[i];
float* coef_ptr = coefficients_.get();
__m128 m_sum = _mm_setzero_ps();
__m128 m_in;
// Depending on if the pointer is aligned with 16 bytes or not it is loaded
// differently.
if (reinterpret_cast<uintptr_t>(in_ptr) & 0x0F) {
for (size_t j = 0; j < coefficients_length_; j += 4) {
m_in = _mm_loadu_ps(in_ptr + j);
m_sum = _mm_add_ps(m_sum, _mm_mul_ps(m_in, _mm_load_ps(coef_ptr + j)));
}
} else {
for (size_t j = 0; j < coefficients_length_; j += 4) {
m_in = _mm_load_ps(in_ptr + j);
m_sum = _mm_add_ps(m_sum, _mm_mul_ps(m_in, _mm_load_ps(coef_ptr + j)));
}
}
m_sum = _mm_add_ps(_mm_movehl_ps(m_sum, m_sum), m_sum);
_mm_store_ss(out + i, _mm_add_ss(m_sum, _mm_shuffle_ps(m_sum, m_sum, 1)));
}
// Update current state.
memmove(state_.get(), &state_[length], state_length_ * sizeof(state_[0]));
}
} // namespace webrtc

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_FIR_FILTER_SSE_H_
#define WEBRTC_COMMON_AUDIO_FIR_FILTER_SSE_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/fir_filter.h"
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
namespace webrtc {
class FIRFilterSSE2 : public FIRFilter {
public:
FIRFilterSSE2(const float* coefficients,
size_t coefficients_length,
size_t max_input_length);
void Filter(const float* in, size_t length, float* out) override;
private:
size_t coefficients_length_;
size_t state_length_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> coefficients_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> state_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_FIR_FILTER_SSE_H_

View File

@ -0,0 +1,188 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
#define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
#include <limits>
#include <cstring>
#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
typedef std::numeric_limits<int16_t> limits_int16;
// The conversion functions use the following naming convention:
// S16: int16_t [-32768, 32767]
// Float: float [-1.0, 1.0]
// FloatS16: float [-32768.0, 32767.0]
static inline int16_t FloatToS16(float v) {
if (v > 0)
return v >= 1 ? limits_int16::max()
: static_cast<int16_t>(v * limits_int16::max() + 0.5f);
return v <= -1 ? limits_int16::min()
: static_cast<int16_t>(-v * limits_int16::min() - 0.5f);
}
static inline float S16ToFloat(int16_t v) {
static const float kMaxInt16Inverse = 1.f / limits_int16::max();
static const float kMinInt16Inverse = 1.f / limits_int16::min();
return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse);
}
static inline int16_t FloatS16ToS16(float v) {
static const float kMaxRound = limits_int16::max() - 0.5f;
static const float kMinRound = limits_int16::min() + 0.5f;
if (v > 0)
return v >= kMaxRound ? limits_int16::max()
: static_cast<int16_t>(v + 0.5f);
return v <= kMinRound ? limits_int16::min() : static_cast<int16_t>(v - 0.5f);
}
static inline float FloatToFloatS16(float v) {
return v * (v > 0 ? limits_int16::max() : -limits_int16::min());
}
static inline float FloatS16ToFloat(float v) {
static const float kMaxInt16Inverse = 1.f / limits_int16::max();
static const float kMinInt16Inverse = 1.f / limits_int16::min();
return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse);
}
void FloatToS16(const float* src, size_t size, int16_t* dest);
void S16ToFloat(const int16_t* src, size_t size, float* dest);
void FloatS16ToS16(const float* src, size_t size, int16_t* dest);
void FloatToFloatS16(const float* src, size_t size, float* dest);
void FloatS16ToFloat(const float* src, size_t size, float* dest);
// Copy audio from |src| channels to |dest| channels unless |src| and |dest|
// point to the same address. |src| and |dest| must have the same number of
// channels, and there must be sufficient space allocated in |dest|.
template <typename T>
void CopyAudioIfNeeded(const T* const* src,
int num_frames,
int num_channels,
T* const* dest) {
for (int i = 0; i < num_channels; ++i) {
if (src[i] != dest[i]) {
std::copy(src[i], src[i] + num_frames, dest[i]);
}
}
}
// Deinterleave audio from |interleaved| to the channel buffers pointed to
// by |deinterleaved|. There must be sufficient space allocated in the
// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel|
// per buffer).
template <typename T>
void Deinterleave(const T* interleaved,
size_t samples_per_channel,
int num_channels,
T* const* deinterleaved) {
for (int i = 0; i < num_channels; ++i) {
T* channel = deinterleaved[i];
int interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
channel[j] = interleaved[interleaved_idx];
interleaved_idx += num_channels;
}
}
}
// Interleave audio from the channel buffers pointed to by |deinterleaved| to
// |interleaved|. There must be sufficient space allocated in |interleaved|
// (|samples_per_channel| * |num_channels|).
template <typename T>
void Interleave(const T* const* deinterleaved,
size_t samples_per_channel,
int num_channels,
T* interleaved) {
for (int i = 0; i < num_channels; ++i) {
const T* channel = deinterleaved[i];
int interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
interleaved[interleaved_idx] = channel[j];
interleaved_idx += num_channels;
}
}
}
// Copies audio from a single channel buffer pointed to by |mono| to each
// channel of |interleaved|. There must be sufficient space allocated in
// |interleaved| (|samples_per_channel| * |num_channels|).
template <typename T>
void UpmixMonoToInterleaved(const T* mono,
int num_frames,
int num_channels,
T* interleaved) {
int interleaved_idx = 0;
for (int i = 0; i < num_frames; ++i) {
for (int j = 0; j < num_channels; ++j) {
interleaved[interleaved_idx++] = mono[i];
}
}
}
template <typename T, typename Intermediate>
void DownmixToMono(const T* const* input_channels,
size_t num_frames,
int num_channels,
T* out) {
for (size_t i = 0; i < num_frames; ++i) {
Intermediate value = input_channels[0][i];
for (int j = 1; j < num_channels; ++j) {
value += input_channels[j][i];
}
out[i] = value / num_channels;
}
}
// Downmixes an interleaved multichannel signal to a single channel by averaging
// all channels.
template <typename T, typename Intermediate>
void DownmixInterleavedToMonoImpl(const T* interleaved,
size_t num_frames,
int num_channels,
T* deinterleaved) {
RTC_DCHECK_GT(num_channels, 0);
RTC_DCHECK_GT(num_frames, 0u);
const T* const end = interleaved + num_frames * num_channels;
while (interleaved < end) {
const T* const frame_end = interleaved + num_channels;
Intermediate value = *interleaved++;
while (interleaved < frame_end) {
value += *interleaved++;
}
*deinterleaved++ = value / num_channels;
}
}
template <typename T>
void DownmixInterleavedToMono(const T* interleaved,
size_t num_frames,
int num_channels,
T* deinterleaved);
template <>
void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
size_t num_frames,
int num_channels,
int16_t* deinterleaved);
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/lapped_transform.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/real_fourier.h"
namespace webrtc {
void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
size_t num_frames,
int num_input_channels,
int num_output_channels,
float* const* output) {
RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
RTC_CHECK_EQ(parent_->block_length_, num_frames);
for (int i = 0; i < num_input_channels; ++i) {
memcpy(parent_->real_buf_.Row(i), input[i],
num_frames * sizeof(*input[0]));
parent_->fft_->Forward(parent_->real_buf_.Row(i),
parent_->cplx_pre_.Row(i));
}
size_t block_length = RealFourier::ComplexLength(
RealFourier::FftOrder(num_frames));
RTC_CHECK_EQ(parent_->cplx_length_, block_length);
parent_->block_processor_->ProcessAudioBlock(parent_->cplx_pre_.Array(),
num_input_channels,
parent_->cplx_length_,
num_output_channels,
parent_->cplx_post_.Array());
for (int i = 0; i < num_output_channels; ++i) {
parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
parent_->real_buf_.Row(i));
memcpy(output[i], parent_->real_buf_.Row(i),
num_frames * sizeof(*input[0]));
}
}
LappedTransform::LappedTransform(int num_in_channels,
int num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback)
: blocker_callback_(this),
num_in_channels_(num_in_channels),
num_out_channels_(num_out_channels),
block_length_(block_length),
chunk_length_(chunk_length),
block_processor_(callback),
blocker_(chunk_length_,
block_length_,
num_in_channels_,
num_out_channels_,
window,
shift_amount,
&blocker_callback_),
fft_(RealFourier::Create(RealFourier::FftOrder(block_length_))),
cplx_length_(RealFourier::ComplexLength(fft_->order())),
real_buf_(num_in_channels,
block_length_,
RealFourier::kFftBufferAlignment),
cplx_pre_(num_in_channels,
cplx_length_,
RealFourier::kFftBufferAlignment),
cplx_post_(num_out_channels,
cplx_length_,
RealFourier::kFftBufferAlignment) {
RTC_CHECK(num_in_channels_ > 0 && num_out_channels_ > 0);
RTC_CHECK_GT(block_length_, 0u);
RTC_CHECK_GT(chunk_length_, 0u);
RTC_CHECK(block_processor_);
// block_length_ power of 2?
RTC_CHECK_EQ(0u, block_length_ & (block_length_ - 1));
}
void LappedTransform::ProcessChunk(const float* const* in_chunk,
float* const* out_chunk) {
blocker_.ProcessChunk(in_chunk, chunk_length_, num_in_channels_,
num_out_channels_, out_chunk);
}
} // namespace webrtc

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_LAPPED_TRANSFORM_H_
#define WEBRTC_COMMON_AUDIO_LAPPED_TRANSFORM_H_
#include <complex>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/blocker.h"
#include "webrtc/common_audio/real_fourier.h"
#include "webrtc/system_wrappers/interface/aligned_array.h"
namespace webrtc {
// Helper class for audio processing modules which operate on frequency domain
// input derived from the windowed time domain audio stream.
//
// The input audio chunk is sliced into possibly overlapping blocks, multiplied
// by a window and transformed with an FFT implementation. The transformed data
// is supplied to the given callback for processing. The processed output is
// then inverse transformed into the time domain and spliced back into a chunk
// which constitutes the final output of this processing module.
class LappedTransform {
public:
class Callback {
public:
virtual ~Callback() {}
virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
int num_in_channels, size_t frames,
int num_out_channels,
std::complex<float>* const* out_block) = 0;
};
// Construct a transform instance. |chunk_length| is the number of samples in
// each channel. |window| defines the window, owned by the caller (a copy is
// made internally); |window| should have length equal to |block_length|.
// |block_length| defines the length of a block, in samples.
// |shift_amount| is in samples. |callback| is the caller-owned audio
// processing function called for each block of the input chunk.
LappedTransform(int num_in_channels,
int num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback);
~LappedTransform() {}
// Main audio processing helper method. Internally slices |in_chunk| into
// blocks, transforms them to frequency domain, calls the callback for each
// block and returns a de-blocked time domain chunk of audio through
// |out_chunk|. Both buffers are caller-owned.
void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
// Get the chunk length.
//
// The chunk length is the number of samples per channel that must be passed
// to ProcessChunk via the parameter in_chunk.
//
// Returns the same chunk_length passed to the LappedTransform constructor.
size_t chunk_length() const { return chunk_length_; }
// Get the number of input channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// in_chunk.
//
// Returns the same num_in_channels passed to the LappedTransform constructor.
int num_in_channels() const { return num_in_channels_; }
// Get the number of output channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// out_chunk.
//
// Returns the same num_out_channels passed to the LappedTransform
// constructor.
int num_out_channels() const { return num_out_channels_; }
private:
// Internal middleware callback, given to the blocker. Transforms each block
// and hands it over to the processing method given at construction time.
class BlockThunk : public BlockerCallback {
public:
explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
virtual void ProcessBlock(const float* const* input, size_t num_frames,
int num_input_channels, int num_output_channels,
float* const* output);
private:
LappedTransform* const parent_;
} blocker_callback_;
const int num_in_channels_;
const int num_out_channels_;
const size_t block_length_;
const size_t chunk_length_;
Callback* const block_processor_;
Blocker blocker_;
rtc::scoped_ptr<RealFourier> fft_;
const size_t cplx_length_;
AlignedArray<float> real_buf_;
AlignedArray<std::complex<float> > cplx_pre_;
AlignedArray<std::complex<float> > cplx_post_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_LAPPED_TRANSFORM_H_

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/real_fourier.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/real_fourier_ooura.h"
#include "webrtc/common_audio/real_fourier_openmax.h"
#include "webrtc/common_audio/signal_processing/include/spl_inl.h"
namespace webrtc {
using std::complex;
const int RealFourier::kFftBufferAlignment = 32;
rtc::scoped_ptr<RealFourier> RealFourier::Create(int fft_order) {
#if defined(RTC_USE_OPENMAX_DL)
return rtc::scoped_ptr<RealFourier>(new RealFourierOpenmax(fft_order));
#else
return rtc::scoped_ptr<RealFourier>(new RealFourierOoura(fft_order));
#endif
}
int RealFourier::FftOrder(size_t length) {
RTC_CHECK_GT(length, 0U);
return WebRtcSpl_GetSizeInBits(static_cast<uint32_t>(length - 1));
}
size_t RealFourier::FftLength(int order) {
RTC_CHECK_GE(order, 0);
return static_cast<size_t>(1 << order);
}
size_t RealFourier::ComplexLength(int order) {
return FftLength(order) / 2 + 1;
}
RealFourier::fft_real_scoper RealFourier::AllocRealBuffer(int count) {
return fft_real_scoper(static_cast<float*>(
AlignedMalloc(sizeof(float) * count, kFftBufferAlignment)));
}
RealFourier::fft_cplx_scoper RealFourier::AllocCplxBuffer(int count) {
return fft_cplx_scoper(static_cast<complex<float>*>(
AlignedMalloc(sizeof(complex<float>) * count, kFftBufferAlignment)));
}
} // namespace webrtc

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_REAL_FOURIER_H_
#define WEBRTC_COMMON_AUDIO_REAL_FOURIER_H_
#include <complex>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
// Uniform interface class for the real DFT and its inverse, for power-of-2
// input lengths. Also contains helper functions for buffer allocation, taking
// care of any memory alignment requirements the underlying library might have.
namespace webrtc {
class RealFourier {
public:
// Shorthand typenames for the scopers used by the buffer allocation helpers.
typedef rtc::scoped_ptr<float[], AlignedFreeDeleter> fft_real_scoper;
typedef rtc::scoped_ptr<std::complex<float>[], AlignedFreeDeleter>
fft_cplx_scoper;
// The alignment required for all input and output buffers, in bytes.
static const int kFftBufferAlignment;
// Construct a wrapper instance for the given input order, which must be
// between 1 and kMaxFftOrder, inclusively.
static rtc::scoped_ptr<RealFourier> Create(int fft_order);
virtual ~RealFourier() {};
// Helper to compute the smallest FFT order (a power of 2) which will contain
// the given input length.
static int FftOrder(size_t length);
// Helper to compute the input length from the FFT order.
static size_t FftLength(int order);
// Helper to compute the exact length, in complex floats, of the transform
// output (i.e. |2^order / 2 + 1|).
static size_t ComplexLength(int order);
// Buffer allocation helpers. The buffers are large enough to hold |count|
// floats/complexes and suitably aligned for use by the implementation.
// The returned scopers are set up with proper deleters; the caller owns
// the allocated memory.
static fft_real_scoper AllocRealBuffer(int count);
static fft_cplx_scoper AllocCplxBuffer(int count);
// Main forward transform interface. The output array need only be big
// enough for |2^order / 2 + 1| elements - the conjugate pairs are not
// returned. Input and output must be properly aligned (e.g. through
// AllocRealBuffer and AllocCplxBuffer) and input length must be
// |2^order| (same as given at construction time).
virtual void Forward(const float* src, std::complex<float>* dest) const = 0;
// Inverse transform. Same input format as output above, conjugate pairs
// not needed.
virtual void Inverse(const std::complex<float>* src, float* dest) const = 0;
virtual int order() const = 0;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_REAL_FOURIER_H_

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/real_fourier_ooura.h"
#include <cmath>
#include <algorithm>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/fft4g.h"
namespace webrtc {
using std::complex;
namespace {
void Conjugate(complex<float>* array, size_t complex_length) {
std::for_each(array, array + complex_length,
[=](complex<float>& v) { v = std::conj(v); });
}
size_t ComputeWorkIpSize(size_t fft_length) {
return static_cast<size_t>(2 + std::ceil(std::sqrt(
static_cast<float>(fft_length))));
}
} // namespace
RealFourierOoura::RealFourierOoura(int fft_order)
: order_(fft_order),
length_(FftLength(order_)),
complex_length_(ComplexLength(order_)),
// Zero-initializing work_ip_ will cause rdft to initialize these work
// arrays on the first call.
work_ip_(new size_t[ComputeWorkIpSize(length_)]()),
work_w_(new float[complex_length_]()) {
RTC_CHECK_GE(fft_order, 1);
}
void RealFourierOoura::Forward(const float* src, complex<float>* dest) const {
{
// This cast is well-defined since C++11. See "Non-static data members" at:
// http://en.cppreference.com/w/cpp/numeric/complex
auto dest_float = reinterpret_cast<float*>(dest);
std::copy(src, src + length_, dest_float);
WebRtc_rdft(length_, 1, dest_float, work_ip_.get(), work_w_.get());
}
// Ooura places real[n/2] in imag[0].
dest[complex_length_ - 1] = complex<float>(dest[0].imag(), 0.0f);
dest[0] = complex<float>(dest[0].real(), 0.0f);
// Ooura returns the conjugate of the usual Fourier definition.
Conjugate(dest, complex_length_);
}
void RealFourierOoura::Inverse(const complex<float>* src, float* dest) const {
{
auto dest_complex = reinterpret_cast<complex<float>*>(dest);
// The real output array is shorter than the input complex array by one
// complex element.
const size_t dest_complex_length = complex_length_ - 1;
std::copy(src, src + dest_complex_length, dest_complex);
// Restore Ooura's conjugate definition.
Conjugate(dest_complex, dest_complex_length);
// Restore real[n/2] to imag[0].
dest_complex[0] = complex<float>(dest_complex[0].real(),
src[complex_length_ - 1].real());
}
WebRtc_rdft(length_, -1, dest, work_ip_.get(), work_w_.get());
// Ooura returns a scaled version.
const float scale = 2.0f / length_;
std::for_each(dest, dest + length_, [scale](float& v) { v *= scale; });
}
} // namespace webrtc

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_REAL_FOURIER_OOURA_H_
#define WEBRTC_COMMON_AUDIO_REAL_FOURIER_OOURA_H_
#include <complex>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/real_fourier.h"
namespace webrtc {
class RealFourierOoura : public RealFourier {
public:
explicit RealFourierOoura(int fft_order);
void Forward(const float* src, std::complex<float>* dest) const override;
void Inverse(const std::complex<float>* src, float* dest) const override;
int order() const override {
return order_;
}
private:
const int order_;
const size_t length_;
const size_t complex_length_;
// These are work arrays for Ooura. The names are based on the comments in
// fft4g.c.
const rtc::scoped_ptr<size_t[]> work_ip_;
const rtc::scoped_ptr<float[]> work_w_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_REAL_FOURIER_OOURA_H_

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_
#define WEBRTC_COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_
#include <complex>
#include "webrtc/common_audio/real_fourier.h"
namespace webrtc {
class RealFourierOpenmax : public RealFourier {
public:
explicit RealFourierOpenmax(int fft_order);
~RealFourierOpenmax() override;
void Forward(const float* src, std::complex<float>* dest) const override;
void Inverse(const std::complex<float>* src, float* dest) const override;
int order() const override {
return order_;
}
private:
// Basically a forward declare of OMXFFTSpec_R_F32. To get rid of the
// dependency on openmax.
typedef void OMXFFTSpec_R_F32_;
const int order_;
OMXFFTSpec_R_F32_* const omx_spec_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
#define WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class PushSincResampler;
// Wraps PushSincResampler to provide stereo support.
// TODO(ajm): add support for an arbitrary number of channels.
template <typename T>
class PushResampler {
public:
PushResampler();
virtual ~PushResampler();
// Must be called whenever the parameters change. Free to be called at any
// time as it is a no-op if parameters have not changed since the last call.
int InitializeIfNeeded(int src_sample_rate_hz, int dst_sample_rate_hz,
int num_channels);
// Returns the total number of samples provided in destination (e.g. 32 kHz,
// 2 channel audio gives 640 samples).
int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
private:
rtc::scoped_ptr<PushSincResampler> sinc_resampler_;
rtc::scoped_ptr<PushSincResampler> sinc_resampler_right_;
int src_sample_rate_hz_;
int dst_sample_rate_hz_;
int num_channels_;
rtc::scoped_ptr<T[]> src_left_;
rtc::scoped_ptr<T[]> src_right_;
rtc::scoped_ptr<T[]> dst_left_;
rtc::scoped_ptr<T[]> dst_right_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_

View File

@ -0,0 +1,95 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* A wrapper for resampling a numerous amount of sampling combinations.
*/
#ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
#define WEBRTC_RESAMPLER_RESAMPLER_H_
#include <stddef.h>
#include "webrtc/typedefs.h"
namespace webrtc {
// All methods return 0 on success and -1 on failure.
class Resampler
{
public:
Resampler();
Resampler(int inFreq, int outFreq, int num_channels);
~Resampler();
// Reset all states
int Reset(int inFreq, int outFreq, int num_channels);
// Reset all states if any parameter has changed
int ResetIfNeeded(int inFreq, int outFreq, int num_channels);
// Resample samplesIn to samplesOut.
int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut,
size_t maxLen, size_t &outLen);
private:
enum ResamplerMode
{
kResamplerMode1To1,
kResamplerMode1To2,
kResamplerMode1To3,
kResamplerMode1To4,
kResamplerMode1To6,
kResamplerMode1To12,
kResamplerMode2To3,
kResamplerMode2To11,
kResamplerMode4To11,
kResamplerMode8To11,
kResamplerMode11To16,
kResamplerMode11To32,
kResamplerMode2To1,
kResamplerMode3To1,
kResamplerMode4To1,
kResamplerMode6To1,
kResamplerMode12To1,
kResamplerMode3To2,
kResamplerMode11To2,
kResamplerMode11To4,
kResamplerMode11To8
};
// Generic pointers since we don't know what states we'll need
void* state1_;
void* state2_;
void* state3_;
// Storage if needed
int16_t* in_buffer_;
int16_t* out_buffer_;
size_t in_buffer_size_;
size_t out_buffer_size_;
size_t in_buffer_size_max_;
size_t out_buffer_size_max_;
int my_in_frequency_khz_;
int my_out_frequency_khz_;
ResamplerMode my_mode_;
int num_channels_;
// Extra instance for stereo
Resampler* slave_left_;
Resampler* slave_right_;
};
} // namespace webrtc
#endif // WEBRTC_RESAMPLER_RESAMPLER_H_

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include <string.h>
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
namespace webrtc {
template <typename T>
PushResampler<T>::PushResampler()
: src_sample_rate_hz_(0),
dst_sample_rate_hz_(0),
num_channels_(0) {
}
template <typename T>
PushResampler<T>::~PushResampler() {
}
template <typename T>
int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
int dst_sample_rate_hz,
int num_channels) {
if (src_sample_rate_hz == src_sample_rate_hz_ &&
dst_sample_rate_hz == dst_sample_rate_hz_ &&
num_channels == num_channels_)
// No-op if settings haven't changed.
return 0;
if (src_sample_rate_hz <= 0 || dst_sample_rate_hz <= 0 ||
num_channels <= 0 || num_channels > 2)
return -1;
src_sample_rate_hz_ = src_sample_rate_hz;
dst_sample_rate_hz_ = dst_sample_rate_hz;
num_channels_ = num_channels;
const size_t src_size_10ms_mono =
static_cast<size_t>(src_sample_rate_hz / 100);
const size_t dst_size_10ms_mono =
static_cast<size_t>(dst_sample_rate_hz / 100);
sinc_resampler_.reset(new PushSincResampler(src_size_10ms_mono,
dst_size_10ms_mono));
if (num_channels_ == 2) {
src_left_.reset(new T[src_size_10ms_mono]);
src_right_.reset(new T[src_size_10ms_mono]);
dst_left_.reset(new T[dst_size_10ms_mono]);
dst_right_.reset(new T[dst_size_10ms_mono]);
sinc_resampler_right_.reset(new PushSincResampler(src_size_10ms_mono,
dst_size_10ms_mono));
}
return 0;
}
template <typename T>
int PushResampler<T>::Resample(const T* src, size_t src_length, T* dst,
size_t dst_capacity) {
const size_t src_size_10ms =
static_cast<size_t>(src_sample_rate_hz_ * num_channels_ / 100);
const size_t dst_size_10ms =
static_cast<size_t>(dst_sample_rate_hz_ * num_channels_ / 100);
if (src_length != src_size_10ms || dst_capacity < dst_size_10ms)
return -1;
if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
// The old resampler provides this memcpy facility in the case of matching
// sample rates, so reproduce it here for the sinc resampler.
memcpy(dst, src, src_length * sizeof(T));
return static_cast<int>(src_length);
}
if (num_channels_ == 2) {
const size_t src_length_mono = src_length / num_channels_;
const size_t dst_capacity_mono = dst_capacity / num_channels_;
T* deinterleaved[] = {src_left_.get(), src_right_.get()};
Deinterleave(src, src_length_mono, num_channels_, deinterleaved);
size_t dst_length_mono =
sinc_resampler_->Resample(src_left_.get(), src_length_mono,
dst_left_.get(), dst_capacity_mono);
sinc_resampler_right_->Resample(src_right_.get(), src_length_mono,
dst_right_.get(), dst_capacity_mono);
deinterleaved[0] = dst_left_.get();
deinterleaved[1] = dst_right_.get();
Interleave(deinterleaved, dst_length_mono, num_channels_, dst);
return static_cast<int>(dst_length_mono * num_channels_);
} else {
return static_cast<int>(
sinc_resampler_->Resample(src, src_length, dst, dst_capacity));
}
}
// Explictly generate required instantiations.
template class PushResampler<int16_t>;
template class PushResampler<float>;
} // namespace webrtc

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
#include <cstring>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/include/audio_util.h"
namespace webrtc {
PushSincResampler::PushSincResampler(size_t source_frames,
size_t destination_frames)
: resampler_(new SincResampler(source_frames * 1.0 / destination_frames,
source_frames,
this)),
source_ptr_(nullptr),
source_ptr_int_(nullptr),
destination_frames_(destination_frames),
first_pass_(true),
source_available_(0) {}
PushSincResampler::~PushSincResampler() {
}
size_t PushSincResampler::Resample(const int16_t* source,
size_t source_length,
int16_t* destination,
size_t destination_capacity) {
if (!float_buffer_.get())
float_buffer_.reset(new float[destination_frames_]);
source_ptr_int_ = source;
// Pass nullptr as the float source to have Run() read from the int16 source.
Resample(nullptr, source_length, float_buffer_.get(), destination_frames_);
FloatS16ToS16(float_buffer_.get(), destination_frames_, destination);
source_ptr_int_ = nullptr;
return destination_frames_;
}
size_t PushSincResampler::Resample(const float* source,
size_t source_length,
float* destination,
size_t destination_capacity) {
RTC_CHECK_EQ(source_length, resampler_->request_frames());
RTC_CHECK_GE(destination_capacity, destination_frames_);
// Cache the source pointer. Calling Resample() will immediately trigger
// the Run() callback whereupon we provide the cached value.
source_ptr_ = source;
source_available_ = source_length;
// On the first pass, we call Resample() twice. During the first call, we
// provide dummy input and discard the output. This is done to prime the
// SincResampler buffer with the correct delay (half the kernel size), thereby
// ensuring that all later Resample() calls will only result in one input
// request through Run().
//
// If this wasn't done, SincResampler would call Run() twice on the first
// pass, and we'd have to introduce an entire |source_frames| of delay, rather
// than the minimum half kernel.
//
// It works out that ChunkSize() is exactly the amount of output we need to
// request in order to prime the buffer with a single Run() request for
// |source_frames|.
if (first_pass_)
resampler_->Resample(resampler_->ChunkSize(), destination);
resampler_->Resample(destination_frames_, destination);
source_ptr_ = nullptr;
return destination_frames_;
}
void PushSincResampler::Run(size_t frames, float* destination) {
// Ensure we are only asked for the available samples. This would fail if
// Run() was triggered more than once per Resample() call.
RTC_CHECK_EQ(source_available_, frames);
if (first_pass_) {
// Provide dummy input on the first pass, the output of which will be
// discarded, as described in Resample().
std::memset(destination, 0, frames * sizeof(*destination));
first_pass_ = false;
return;
}
if (source_ptr_) {
std::memcpy(destination, source_ptr_, frames * sizeof(*destination));
} else {
for (size_t i = 0; i < frames; ++i)
destination[i] = static_cast<float>(source_ptr_int_[i]);
}
source_available_ -= frames;
}
} // namespace webrtc

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_
#define WEBRTC_COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/resampler/sinc_resampler.h"
#include "webrtc/typedefs.h"
namespace webrtc {
// A thin wrapper over SincResampler to provide a push-based interface as
// required by WebRTC. SincResampler uses a pull-based interface, and will
// use SincResamplerCallback::Run() to request data upon a call to Resample().
// These Run() calls will happen on the same thread Resample() is called on.
class PushSincResampler : public SincResamplerCallback {
public:
// Provide the size of the source and destination blocks in samples. These
// must correspond to the same time duration (typically 10 ms) as the sample
// ratio is inferred from them.
PushSincResampler(size_t source_frames, size_t destination_frames);
~PushSincResampler() override;
// Perform the resampling. |source_frames| must always equal the
// |source_frames| provided at construction. |destination_capacity| must be
// at least as large as |destination_frames|. Returns the number of samples
// provided in destination (for convenience, since this will always be equal
// to |destination_frames|).
size_t Resample(const int16_t* source, size_t source_frames,
int16_t* destination, size_t destination_capacity);
size_t Resample(const float* source,
size_t source_frames,
float* destination,
size_t destination_capacity);
// Delay due to the filter kernel. Essentially, the time after which an input
// sample will appear in the resampled output.
static float AlgorithmicDelaySeconds(int source_rate_hz) {
return 1.f / source_rate_hz * SincResampler::kKernelSize / 2;
}
protected:
// Implements SincResamplerCallback.
void Run(size_t frames, float* destination) override;
private:
friend class PushSincResamplerTest;
SincResampler* get_resampler_for_testing() { return resampler_.get(); }
rtc::scoped_ptr<SincResampler> resampler_;
rtc::scoped_ptr<float[]> float_buffer_;
const float* source_ptr_;
const int16_t* source_ptr_int_;
const size_t destination_frames_;
// True on the first call to Resample(), to prime the SincResampler buffer.
bool first_pass_;
// Used to assert we are only requested for as much data as is available.
size_t source_available_;
RTC_DISALLOW_COPY_AND_ASSIGN(PushSincResampler);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_

View File

@ -0,0 +1,959 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* A wrapper for resampling a numerous amount of sampling combinations.
*/
#include <stdlib.h>
#include <string.h>
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
namespace webrtc {
Resampler::Resampler()
: state1_(nullptr),
state2_(nullptr),
state3_(nullptr),
in_buffer_(nullptr),
out_buffer_(nullptr),
in_buffer_size_(0),
out_buffer_size_(0),
in_buffer_size_max_(0),
out_buffer_size_max_(0),
my_in_frequency_khz_(0),
my_out_frequency_khz_(0),
my_mode_(kResamplerMode1To1),
num_channels_(0),
slave_left_(nullptr),
slave_right_(nullptr) {
}
Resampler::Resampler(int inFreq, int outFreq, int num_channels)
: Resampler() {
Reset(inFreq, outFreq, num_channels);
}
Resampler::~Resampler()
{
if (state1_)
{
free(state1_);
}
if (state2_)
{
free(state2_);
}
if (state3_)
{
free(state3_);
}
if (in_buffer_)
{
free(in_buffer_);
}
if (out_buffer_)
{
free(out_buffer_);
}
if (slave_left_)
{
delete slave_left_;
}
if (slave_right_)
{
delete slave_right_;
}
}
int Resampler::ResetIfNeeded(int inFreq, int outFreq, int num_channels)
{
int tmpInFreq_kHz = inFreq / 1000;
int tmpOutFreq_kHz = outFreq / 1000;
if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
|| (num_channels != num_channels_))
{
return Reset(inFreq, outFreq, num_channels);
} else
{
return 0;
}
}
int Resampler::Reset(int inFreq, int outFreq, int num_channels)
{
if (num_channels != 1 && num_channels != 2) {
return -1;
}
num_channels_ = num_channels;
if (state1_)
{
free(state1_);
state1_ = NULL;
}
if (state2_)
{
free(state2_);
state2_ = NULL;
}
if (state3_)
{
free(state3_);
state3_ = NULL;
}
if (in_buffer_)
{
free(in_buffer_);
in_buffer_ = NULL;
}
if (out_buffer_)
{
free(out_buffer_);
out_buffer_ = NULL;
}
if (slave_left_)
{
delete slave_left_;
slave_left_ = NULL;
}
if (slave_right_)
{
delete slave_right_;
slave_right_ = NULL;
}
in_buffer_size_ = 0;
out_buffer_size_ = 0;
in_buffer_size_max_ = 0;
out_buffer_size_max_ = 0;
// Start with a math exercise, Euclid's algorithm to find the gcd:
int a = inFreq;
int b = outFreq;
int c = a % b;
while (c != 0)
{
a = b;
b = c;
c = a % b;
}
// b is now the gcd;
// We need to track what domain we're in.
my_in_frequency_khz_ = inFreq / 1000;
my_out_frequency_khz_ = outFreq / 1000;
// Scale with GCD
inFreq = inFreq / b;
outFreq = outFreq / b;
if (num_channels_ == 2)
{
// Create two mono resamplers.
slave_left_ = new Resampler(inFreq, outFreq, 1);
slave_right_ = new Resampler(inFreq, outFreq, 1);
}
if (inFreq == outFreq)
{
my_mode_ = kResamplerMode1To1;
} else if (inFreq == 1)
{
switch (outFreq)
{
case 2:
my_mode_ = kResamplerMode1To2;
break;
case 3:
my_mode_ = kResamplerMode1To3;
break;
case 4:
my_mode_ = kResamplerMode1To4;
break;
case 6:
my_mode_ = kResamplerMode1To6;
break;
case 12:
my_mode_ = kResamplerMode1To12;
break;
default:
return -1;
}
} else if (outFreq == 1)
{
switch (inFreq)
{
case 2:
my_mode_ = kResamplerMode2To1;
break;
case 3:
my_mode_ = kResamplerMode3To1;
break;
case 4:
my_mode_ = kResamplerMode4To1;
break;
case 6:
my_mode_ = kResamplerMode6To1;
break;
case 12:
my_mode_ = kResamplerMode12To1;
break;
default:
return -1;
}
} else if ((inFreq == 2) && (outFreq == 3))
{
my_mode_ = kResamplerMode2To3;
} else if ((inFreq == 2) && (outFreq == 11))
{
my_mode_ = kResamplerMode2To11;
} else if ((inFreq == 4) && (outFreq == 11))
{
my_mode_ = kResamplerMode4To11;
} else if ((inFreq == 8) && (outFreq == 11))
{
my_mode_ = kResamplerMode8To11;
} else if ((inFreq == 3) && (outFreq == 2))
{
my_mode_ = kResamplerMode3To2;
} else if ((inFreq == 11) && (outFreq == 2))
{
my_mode_ = kResamplerMode11To2;
} else if ((inFreq == 11) && (outFreq == 4))
{
my_mode_ = kResamplerMode11To4;
} else if ((inFreq == 11) && (outFreq == 16))
{
my_mode_ = kResamplerMode11To16;
} else if ((inFreq == 11) && (outFreq == 32))
{
my_mode_ = kResamplerMode11To32;
} else if ((inFreq == 11) && (outFreq == 8))
{
my_mode_ = kResamplerMode11To8;
} else
{
return -1;
}
// Now create the states we need
switch (my_mode_)
{
case kResamplerMode1To1:
// No state needed;
break;
case kResamplerMode1To2:
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode1To3:
state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
break;
case kResamplerMode1To4:
// 1:2
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 2:4
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode1To6:
// 1:2
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 2:6
state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
break;
case kResamplerMode1To12:
// 1:2
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 2:4
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
// 4:12
state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
WebRtcSpl_ResetResample16khzTo48khz(
(WebRtcSpl_State16khzTo48khz*) state3_);
break;
case kResamplerMode2To3:
// 2:6
state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
// 6:3
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode2To11:
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
break;
case kResamplerMode4To11:
state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
break;
case kResamplerMode8To11:
state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
break;
case kResamplerMode11To16:
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
break;
case kResamplerMode11To32:
// 11 -> 22
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 22 -> 16
state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
// 16 -> 32
state3_ = malloc(8 * sizeof(int32_t));
memset(state3_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode2To1:
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode3To1:
state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
break;
case kResamplerMode4To1:
// 4:2
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 2:1
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode6To1:
// 6:2
state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
// 2:1
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode12To1:
// 12:4
state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
WebRtcSpl_ResetResample48khzTo16khz(
(WebRtcSpl_State48khzTo16khz*) state1_);
// 4:2
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
// 2:1
state3_ = malloc(8 * sizeof(int32_t));
memset(state3_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode3To2:
// 3:6
state1_ = malloc(8 * sizeof(int32_t));
memset(state1_, 0, 8 * sizeof(int32_t));
// 6:2
state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
break;
case kResamplerMode11To2:
state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
state2_ = malloc(8 * sizeof(int32_t));
memset(state2_, 0, 8 * sizeof(int32_t));
break;
case kResamplerMode11To4:
state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
break;
case kResamplerMode11To8:
state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
break;
}
return 0;
}
// Synchronous resampling, all output samples are written to samplesOut
int Resampler::Push(const int16_t * samplesIn, size_t lengthIn,
int16_t* samplesOut, size_t maxLen, size_t &outLen)
{
if (num_channels_ == 2)
{
// Split up the signal and call the slave object for each channel
int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
int16_t* out_right =
(int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
int res = 0;
for (size_t i = 0; i < lengthIn; i += 2)
{
left[i >> 1] = samplesIn[i];
right[i >> 1] = samplesIn[i + 1];
}
// It's OK to overwrite the local parameter, since it's just a copy
lengthIn = lengthIn / 2;
size_t actualOutLen_left = 0;
size_t actualOutLen_right = 0;
// Do resampling for right channel
res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
if (res || (actualOutLen_left != actualOutLen_right))
{
free(left);
free(right);
free(out_left);
free(out_right);
return -1;
}
// Reassemble the signal
for (size_t i = 0; i < actualOutLen_left; i++)
{
samplesOut[i * 2] = out_left[i];
samplesOut[i * 2 + 1] = out_right[i];
}
outLen = 2 * actualOutLen_left;
free(left);
free(right);
free(out_left);
free(out_right);
return 0;
}
// Containers for temp samples
int16_t* tmp;
int16_t* tmp_2;
// tmp data for resampling routines
int32_t* tmp_mem;
switch (my_mode_)
{
case kResamplerMode1To1:
memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
outLen = lengthIn;
break;
case kResamplerMode1To2:
if (maxLen < (lengthIn * 2))
{
return -1;
}
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
outLen = lengthIn * 2;
return 0;
case kResamplerMode1To3:
// We can only handle blocks of 160 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 160) != 0)
{
return -1;
}
if (maxLen < (lengthIn * 3))
{
return -1;
}
tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 160)
{
WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
(WebRtcSpl_State16khzTo48khz *)state1_,
tmp_mem);
}
outLen = lengthIn * 3;
free(tmp_mem);
return 0;
case kResamplerMode1To4:
if (maxLen < (lengthIn * 4))
{
return -1;
}
tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
// 1:2
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
// 2:4
WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
outLen = lengthIn * 4;
free(tmp);
return 0;
case kResamplerMode1To6:
// We can only handle blocks of 80 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 80) != 0)
{
return -1;
}
if (maxLen < (lengthIn * 6))
{
return -1;
}
//1:2
tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
outLen = lengthIn * 2;
for (size_t i = 0; i < outLen; i += 160)
{
WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
(WebRtcSpl_State16khzTo48khz *)state2_,
tmp_mem);
}
outLen = outLen * 3;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode1To12:
// We can only handle blocks of 40 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 40) != 0) {
return -1;
}
if (maxLen < (lengthIn * 12)) {
return -1;
}
tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
//1:2
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
(int32_t*) state1_);
outLen = lengthIn * 2;
//2:4
WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
outLen = outLen * 2;
// 4:12
for (size_t i = 0; i < outLen; i += 160) {
// WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
// as input and outputs a resampled block of 480 samples. The
// data is now actually in 32 kHz sampling rate, despite the
// function name, and with a resampling factor of three becomes
// 96 kHz.
WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
(WebRtcSpl_State16khzTo48khz*) state3_,
tmp_mem);
}
outLen = outLen * 3;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode2To3:
if (maxLen < (lengthIn * 3 / 2))
{
return -1;
}
// 2:6
// We can only handle blocks of 160 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 160) != 0)
{
return -1;
}
tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 160)
{
WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
(WebRtcSpl_State16khzTo48khz *)state1_,
tmp_mem);
}
lengthIn = lengthIn * 3;
// 6:3
WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
outLen = lengthIn / 2;
free(tmp);
free(tmp_mem);
return 0;
case kResamplerMode2To11:
// We can only handle blocks of 80 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 80) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 11) / 2))
{
return -1;
}
tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
// 1:2
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
lengthIn *= 2;
tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 80)
{
WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
(WebRtcSpl_State8khzTo22khz *)state2_,
tmp_mem);
}
outLen = (lengthIn * 11) / 4;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode4To11:
// We can only handle blocks of 80 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 80) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 11) / 4))
{
return -1;
}
tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 80)
{
WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
(WebRtcSpl_State8khzTo22khz *)state1_,
tmp_mem);
}
outLen = (lengthIn * 11) / 4;
free(tmp_mem);
return 0;
case kResamplerMode8To11:
// We can only handle blocks of 160 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 160) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 11) / 8))
{
return -1;
}
tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 160)
{
WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
(WebRtcSpl_State16khzTo22khz *)state1_,
tmp_mem);
}
outLen = (lengthIn * 11) / 8;
free(tmp_mem);
return 0;
case kResamplerMode11To16:
// We can only handle blocks of 110 samples
if ((lengthIn % 110) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 16) / 11))
{
return -1;
}
tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
for (size_t i = 0; i < (lengthIn * 2); i += 220)
{
WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
(WebRtcSpl_State22khzTo16khz *)state2_,
tmp_mem);
}
outLen = (lengthIn * 16) / 11;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode11To32:
// We can only handle blocks of 110 samples
if ((lengthIn % 110) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 32) / 11))
{
return -1;
}
tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
// 11 -> 22 kHz in samplesOut
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
// 22 -> 16 in tmp
for (size_t i = 0; i < (lengthIn * 2); i += 220)
{
WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
(WebRtcSpl_State22khzTo16khz *)state2_,
tmp_mem);
}
// 16 -> 32 in samplesOut
WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
(int32_t*)state3_);
outLen = (lengthIn * 32) / 11;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode2To1:
if (maxLen < (lengthIn / 2))
{
return -1;
}
WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
outLen = lengthIn / 2;
return 0;
case kResamplerMode3To1:
// We can only handle blocks of 480 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 480) != 0)
{
return -1;
}
if (maxLen < (lengthIn / 3))
{
return -1;
}
tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 480)
{
WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
(WebRtcSpl_State48khzTo16khz *)state1_,
tmp_mem);
}
outLen = lengthIn / 3;
free(tmp_mem);
return 0;
case kResamplerMode4To1:
if (maxLen < (lengthIn / 4))
{
return -1;
}
tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
// 4:2
WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
// 2:1
WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
outLen = lengthIn / 4;
free(tmp);
return 0;
case kResamplerMode6To1:
// We can only handle blocks of 480 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 480) != 0)
{
return -1;
}
if (maxLen < (lengthIn / 6))
{
return -1;
}
tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
for (size_t i = 0; i < lengthIn; i += 480)
{
WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
(WebRtcSpl_State48khzTo16khz *)state1_,
tmp_mem);
}
outLen = lengthIn / 3;
free(tmp_mem);
WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
free(tmp);
outLen = outLen / 2;
return 0;
case kResamplerMode12To1:
// We can only handle blocks of 480 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 480) != 0) {
return -1;
}
if (maxLen < (lengthIn / 12)) {
return -1;
}
tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
// 12:4
for (size_t i = 0; i < lengthIn; i += 480) {
// WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
// as input and outputs a resampled block of 160 samples. The
// data is now actually in 96 kHz sampling rate, despite the
// function name, and with a resampling factor of 1/3 becomes
// 32 kHz.
WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
(WebRtcSpl_State48khzTo16khz*) state1_,
tmp_mem);
}
outLen = lengthIn / 3;
free(tmp_mem);
// 4:2
WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2, (int32_t*) state2_);
outLen = outLen / 2;
free(tmp);
// 2:1
WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
(int32_t*) state3_);
free(tmp_2);
outLen = outLen / 2;
return 0;
case kResamplerMode3To2:
if (maxLen < (lengthIn * 2 / 3))
{
return -1;
}
// 3:6
tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
lengthIn *= 2;
// 6:2
// We can only handle blocks of 480 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 480) != 0)
{
free(tmp);
return -1;
}
tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 480)
{
WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
(WebRtcSpl_State48khzTo16khz *)state2_,
tmp_mem);
}
outLen = lengthIn / 3;
free(tmp);
free(tmp_mem);
return 0;
case kResamplerMode11To2:
// We can only handle blocks of 220 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 220) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 2) / 11))
{
return -1;
}
tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
for (size_t i = 0; i < lengthIn; i += 220)
{
WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
(WebRtcSpl_State22khzTo8khz *)state1_,
tmp_mem);
}
lengthIn = (lengthIn * 4) / 11;
WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut,
(int32_t*)state2_);
outLen = lengthIn / 2;
free(tmp_mem);
free(tmp);
return 0;
case kResamplerMode11To4:
// We can only handle blocks of 220 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 220) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 4) / 11))
{
return -1;
}
tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 220)
{
WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
(WebRtcSpl_State22khzTo8khz *)state1_,
tmp_mem);
}
outLen = (lengthIn * 4) / 11;
free(tmp_mem);
return 0;
case kResamplerMode11To8:
// We can only handle blocks of 160 samples
// Can be fixed, but I don't think it's needed
if ((lengthIn % 220) != 0)
{
return -1;
}
if (maxLen < ((lengthIn * 8) / 11))
{
return -1;
}
tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
for (size_t i = 0; i < lengthIn; i += 220)
{
WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
(WebRtcSpl_State22khzTo16khz *)state1_,
tmp_mem);
}
outLen = (lengthIn * 8) / 11;
free(tmp_mem);
return 0;
break;
}
return 0;
}
} // namespace webrtc

View File

@ -0,0 +1,378 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Modified from the Chromium original:
// src/media/base/sinc_resampler.cc
// Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
// and r4_ will move after the first load):
//
// |----------------|-----------------------------------------|----------------|
//
// request_frames_
// <--------------------------------------------------------->
// r0_ (during first load)
//
// kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
// <---------------> <---------------> <---------------> <--------------->
// r1_ r2_ r3_ r4_
//
// block_size_ == r4_ - r2_
// <--------------------------------------->
//
// request_frames_
// <------------------ ... ----------------->
// r0_ (during second load)
//
// On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
// and block_size_ are reinitialized via step (3) in the algorithm below.
//
// These new regions remain constant until a Flush() occurs. While complicated,
// this allows us to reduce jitter by always requesting the same amount from the
// provided callback.
//
// The algorithm:
//
// 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
// there's enough room to read request_frames_ from the callback into region
// r0_ (which will move between the first and subsequent passes).
//
// 2) Let r1_, r2_ each represent half the kernel centered around r0_:
//
// r0_ = input_buffer_ + kKernelSize / 2
// r1_ = input_buffer_
// r2_ = r0_
//
// r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in
// size. r1_ must be zero initialized to avoid convolution with garbage (see
// step (5) for why).
//
// 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
// r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
//
// r3_ = r0_ + request_frames_ - kKernelSize
// r4_ = r0_ + request_frames_ - kKernelSize / 2
// block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
//
// 4) Consume request_frames_ frames into r0_.
//
// 5) Position kernel centered at start of r2_ and generate output frames until
// the kernel is centered at the start of r4_ or we've finished generating
// all the output frames.
//
// 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
//
// 7) If we're on the second load, in order to avoid overwriting the frames we
// just wrapped from r4_ we need to slide r0_ to the right by the size of
// r4_, which is kKernelSize / 2:
//
// r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
//
// r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
//
// 8) Else, if we're not on the second load, goto (4).
//
// Note: we're glossing over how the sub-sample handling works with
// |virtual_source_idx_|, etc.
// MSVC++ requires this to be set before any other includes to get M_PI.
#define _USE_MATH_DEFINES
#include "webrtc/common_audio/resampler/sinc_resampler.h"
#include <assert.h>
#include <math.h>
#include <string.h>
#include <limits>
#include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace {
double SincScaleFactor(double io_ratio) {
// |sinc_scale_factor| is basically the normalized cutoff frequency of the
// low-pass filter.
double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
// The sinc function is an idealized brick-wall filter, but since we're
// windowing it the transition from pass to stop does not happen right away.
// So we should adjust the low pass filter cutoff slightly downward to avoid
// some aliasing at the very high-end.
// TODO(crogers): this value is empirical and to be more exact should vary
// depending on kKernelSize.
sinc_scale_factor *= 0.9;
return sinc_scale_factor;
}
} // namespace
// If we know the minimum architecture at compile time, avoid CPU detection.
#if defined(WEBRTC_ARCH_X86_FAMILY)
#if defined(__SSE2__)
#define CONVOLVE_FUNC Convolve_SSE
void SincResampler::InitializeCPUSpecificFeatures() {}
#else
// x86 CPU detection required. Function will be set by
// InitializeCPUSpecificFeatures().
// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
#define CONVOLVE_FUNC convolve_proc_
void SincResampler::InitializeCPUSpecificFeatures() {
convolve_proc_ = WebRtc_GetCPUInfo(kSSE2) ? Convolve_SSE : Convolve_C;
}
#endif
#elif defined(WEBRTC_HAS_NEON)
#define CONVOLVE_FUNC Convolve_NEON
void SincResampler::InitializeCPUSpecificFeatures() {}
#elif defined(WEBRTC_DETECT_NEON)
#define CONVOLVE_FUNC convolve_proc_
void SincResampler::InitializeCPUSpecificFeatures() {
convolve_proc_ = WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON ?
Convolve_NEON : Convolve_C;
}
#else
// Unknown architecture.
#define CONVOLVE_FUNC Convolve_C
void SincResampler::InitializeCPUSpecificFeatures() {}
#endif
SincResampler::SincResampler(double io_sample_rate_ratio,
size_t request_frames,
SincResamplerCallback* read_cb)
: io_sample_rate_ratio_(io_sample_rate_ratio),
read_cb_(read_cb),
request_frames_(request_frames),
input_buffer_size_(request_frames_ + kKernelSize),
// Create input buffers with a 16-byte alignment for SSE optimizations.
kernel_storage_(static_cast<float*>(
AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
kernel_pre_sinc_storage_(static_cast<float*>(
AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
kernel_window_storage_(static_cast<float*>(
AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
input_buffer_(static_cast<float*>(
AlignedMalloc(sizeof(float) * input_buffer_size_, 16))),
#if defined(WEBRTC_CPU_DETECTION)
convolve_proc_(NULL),
#endif
r1_(input_buffer_.get()),
r2_(input_buffer_.get() + kKernelSize / 2) {
#if defined(WEBRTC_CPU_DETECTION)
InitializeCPUSpecificFeatures();
assert(convolve_proc_);
#endif
assert(request_frames_ > 0);
Flush();
assert(block_size_ > kKernelSize);
memset(kernel_storage_.get(), 0,
sizeof(*kernel_storage_.get()) * kKernelStorageSize);
memset(kernel_pre_sinc_storage_.get(), 0,
sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
memset(kernel_window_storage_.get(), 0,
sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
InitializeKernel();
}
SincResampler::~SincResampler() {}
void SincResampler::UpdateRegions(bool second_load) {
// Setup various region pointers in the buffer (see diagram above). If we're
// on the second load we need to slide r0_ to the right by kKernelSize / 2.
r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
r3_ = r0_ + request_frames_ - kKernelSize;
r4_ = r0_ + request_frames_ - kKernelSize / 2;
block_size_ = r4_ - r2_;
// r1_ at the beginning of the buffer.
assert(r1_ == input_buffer_.get());
// r1_ left of r2_, r4_ left of r3_ and size correct.
assert(r2_ - r1_ == r4_ - r3_);
// r2_ left of r3.
assert(r2_ < r3_);
}
void SincResampler::InitializeKernel() {
// Blackman window parameters.
static const double kAlpha = 0.16;
static const double kA0 = 0.5 * (1.0 - kAlpha);
static const double kA1 = 0.5;
static const double kA2 = 0.5 * kAlpha;
// Generates a set of windowed sinc() kernels.
// We generate a range of sub-sample offsets from 0.0 to 1.0.
const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
const float subsample_offset =
static_cast<float>(offset_idx) / kKernelOffsetCount;
for (size_t i = 0; i < kKernelSize; ++i) {
const size_t idx = i + offset_idx * kKernelSize;
const float pre_sinc = static_cast<float>(M_PI *
(static_cast<int>(i) - static_cast<int>(kKernelSize / 2) -
subsample_offset));
kernel_pre_sinc_storage_[idx] = pre_sinc;
// Compute Blackman window, matching the offset of the sinc().
const float x = (i - subsample_offset) / kKernelSize;
const float window = static_cast<float>(kA0 - kA1 * cos(2.0 * M_PI * x) +
kA2 * cos(4.0 * M_PI * x));
kernel_window_storage_[idx] = window;
// Compute the sinc with offset, then window the sinc() function and store
// at the correct offset.
kernel_storage_[idx] = static_cast<float>(window *
((pre_sinc == 0) ?
sinc_scale_factor :
(sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
}
}
}
void SincResampler::SetRatio(double io_sample_rate_ratio) {
if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
std::numeric_limits<double>::epsilon()) {
return;
}
io_sample_rate_ratio_ = io_sample_rate_ratio;
// Optimize reinitialization by reusing values which are independent of
// |sinc_scale_factor|. Provides a 3x speedup.
const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
for (size_t i = 0; i < kKernelSize; ++i) {
const size_t idx = i + offset_idx * kKernelSize;
const float window = kernel_window_storage_[idx];
const float pre_sinc = kernel_pre_sinc_storage_[idx];
kernel_storage_[idx] = static_cast<float>(window *
((pre_sinc == 0) ?
sinc_scale_factor :
(sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
}
}
}
void SincResampler::Resample(size_t frames, float* destination) {
size_t remaining_frames = frames;
// Step (1) -- Prime the input buffer at the start of the input stream.
if (!buffer_primed_ && remaining_frames) {
read_cb_->Run(request_frames_, r0_);
buffer_primed_ = true;
}
// Step (2) -- Resample! const what we can outside of the loop for speed. It
// actually has an impact on ARM performance. See inner loop comment below.
const double current_io_ratio = io_sample_rate_ratio_;
const float* const kernel_ptr = kernel_storage_.get();
while (remaining_frames) {
// |i| may be negative if the last Resample() call ended on an iteration
// that put |virtual_source_idx_| over the limit.
//
// Note: The loop construct here can severely impact performance on ARM
// or when built with clang. See https://codereview.chromium.org/18566009/
for (int i = static_cast<int>(
ceil((block_size_ - virtual_source_idx_) / current_io_ratio));
i > 0; --i) {
assert(virtual_source_idx_ < block_size_);
// |virtual_source_idx_| lies in between two kernel offsets so figure out
// what they are.
const int source_idx = static_cast<int>(virtual_source_idx_);
const double subsample_remainder = virtual_source_idx_ - source_idx;
const double virtual_offset_idx =
subsample_remainder * kKernelOffsetCount;
const int offset_idx = static_cast<int>(virtual_offset_idx);
// We'll compute "convolutions" for the two kernels which straddle
// |virtual_source_idx_|.
const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
const float* const k2 = k1 + kKernelSize;
// Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be
// true so long as kKernelSize is a multiple of 16.
assert(0u == (reinterpret_cast<uintptr_t>(k1) & 0x0F));
assert(0u == (reinterpret_cast<uintptr_t>(k2) & 0x0F));
// Initialize input pointer based on quantized |virtual_source_idx_|.
const float* const input_ptr = r1_ + source_idx;
// Figure out how much to weight each kernel's "convolution".
const double kernel_interpolation_factor =
virtual_offset_idx - offset_idx;
*destination++ = CONVOLVE_FUNC(
input_ptr, k1, k2, kernel_interpolation_factor);
// Advance the virtual index.
virtual_source_idx_ += current_io_ratio;
if (!--remaining_frames)
return;
}
// Wrap back around to the start.
virtual_source_idx_ -= block_size_;
// Step (3) -- Copy r3_, r4_ to r1_, r2_.
// This wraps the last input frames back to the start of the buffer.
memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
// Step (4) -- Reinitialize regions if necessary.
if (r0_ == r2_)
UpdateRegions(true);
// Step (5) -- Refresh the buffer with more input.
read_cb_->Run(request_frames_, r0_);
}
}
#undef CONVOLVE_FUNC
size_t SincResampler::ChunkSize() const {
return static_cast<size_t>(block_size_ / io_sample_rate_ratio_);
}
void SincResampler::Flush() {
virtual_source_idx_ = 0;
buffer_primed_ = false;
memset(input_buffer_.get(), 0,
sizeof(*input_buffer_.get()) * input_buffer_size_);
UpdateRegions(false);
}
float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor) {
float sum1 = 0;
float sum2 = 0;
// Generate a single output sample. Unrolling this loop hurt performance in
// local testing.
size_t n = kKernelSize;
while (n--) {
sum1 += *input_ptr * *k1++;
sum2 += *input_ptr++ * *k2++;
}
// Linearly interpolate the two "convolutions".
return static_cast<float>((1.0 - kernel_interpolation_factor) * sum1 +
kernel_interpolation_factor * sum2);
}
} // namespace webrtc

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Modified from the Chromium original here:
// src/media/base/sinc_resampler.h
#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_
#define WEBRTC_COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/aligned_malloc.h"
#include "webrtc/test/testsupport/gtest_prod_util.h"
#include "webrtc/typedefs.h"
namespace webrtc {
// Callback class for providing more data into the resampler. Expects |frames|
// of data to be rendered into |destination|; zero padded if not enough frames
// are available to satisfy the request.
class SincResamplerCallback {
public:
virtual ~SincResamplerCallback() {}
virtual void Run(size_t frames, float* destination) = 0;
};
// SincResampler is a high-quality single-channel sample-rate converter.
class SincResampler {
public:
// The kernel size can be adjusted for quality (higher is better) at the
// expense of performance. Must be a multiple of 32.
// TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
static const size_t kKernelSize = 32;
// Default request size. Affects how often and for how much SincResampler
// calls back for input. Must be greater than kKernelSize.
static const size_t kDefaultRequestSize = 512;
// The kernel offset count is used for interpolation and is the number of
// sub-sample kernel shifts. Can be adjusted for quality (higher is better)
// at the expense of allocating more memory.
static const size_t kKernelOffsetCount = 32;
static const size_t kKernelStorageSize =
kKernelSize * (kKernelOffsetCount + 1);
// Constructs a SincResampler with the specified |read_cb|, which is used to
// acquire audio data for resampling. |io_sample_rate_ratio| is the ratio
// of input / output sample rates. |request_frames| controls the size in
// frames of the buffer requested by each |read_cb| call. The value must be
// greater than kKernelSize. Specify kDefaultRequestSize if there are no
// request size constraints.
SincResampler(double io_sample_rate_ratio,
size_t request_frames,
SincResamplerCallback* read_cb);
virtual ~SincResampler();
// Resample |frames| of data from |read_cb_| into |destination|.
void Resample(size_t frames, float* destination);
// The maximum size in frames that guarantees Resample() will only make a
// single call to |read_cb_| for more data.
size_t ChunkSize() const;
size_t request_frames() const { return request_frames_; }
// Flush all buffered data and reset internal indices. Not thread safe, do
// not call while Resample() is in progress.
void Flush();
// Update |io_sample_rate_ratio_|. SetRatio() will cause a reconstruction of
// the kernels used for resampling. Not thread safe, do not call while
// Resample() is in progress.
//
// TODO(ajm): Use this in PushSincResampler rather than reconstructing
// SincResampler. We would also need a way to update |request_frames_|.
void SetRatio(double io_sample_rate_ratio);
float* get_kernel_for_testing() { return kernel_storage_.get(); }
private:
FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark);
void InitializeKernel();
void UpdateRegions(bool second_load);
// Selects runtime specific CPU features like SSE. Must be called before
// using SincResampler.
// TODO(ajm): Currently managed by the class internally. See the note with
// |convolve_proc_| below.
void InitializeCPUSpecificFeatures();
// Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are
// linearly interpolated using |kernel_interpolation_factor|. On x86 and ARM
// the underlying implementation is chosen at run time.
static float Convolve_C(const float* input_ptr, const float* k1,
const float* k2, double kernel_interpolation_factor);
#if defined(WEBRTC_ARCH_X86_FAMILY)
static float Convolve_SSE(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor);
#elif defined(WEBRTC_DETECT_NEON) || defined(WEBRTC_HAS_NEON)
static float Convolve_NEON(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor);
#endif
// The ratio of input / output sample rates.
double io_sample_rate_ratio_;
// An index on the source input buffer with sub-sample precision. It must be
// double precision to avoid drift.
double virtual_source_idx_;
// The buffer is primed once at the very beginning of processing.
bool buffer_primed_;
// Source of data for resampling.
SincResamplerCallback* read_cb_;
// The size (in samples) to request from each |read_cb_| execution.
const size_t request_frames_;
// The number of source frames processed per pass.
size_t block_size_;
// The size (in samples) of the internal buffer used by the resampler.
const size_t input_buffer_size_;
// Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
// The kernel offsets are sub-sample shifts of a windowed sinc shifted from
// 0.0 to 1.0 sample.
rtc::scoped_ptr<float[], AlignedFreeDeleter> kernel_storage_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> kernel_pre_sinc_storage_;
rtc::scoped_ptr<float[], AlignedFreeDeleter> kernel_window_storage_;
// Data from the source is copied into this buffer for each processing pass.
rtc::scoped_ptr<float[], AlignedFreeDeleter> input_buffer_;
// Stores the runtime selection of which Convolve function to use.
// TODO(ajm): Move to using a global static which must only be initialized
// once by the user. We're not doing this initially, because we don't have
// e.g. a LazyInstance helper in webrtc.
#if defined(WEBRTC_CPU_DETECTION)
typedef float (*ConvolveProc)(const float*, const float*, const float*,
double);
ConvolveProc convolve_proc_;
#endif
// Pointers to the various regions inside |input_buffer_|. See the diagram at
// the top of the .cc file for more information.
float* r0_;
float* const r1_;
float* const r2_;
float* r3_;
float* r4_;
RTC_DISALLOW_COPY_AND_ASSIGN(SincResampler);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Modified from the Chromium original:
// src/media/base/sinc_resampler.cc
#include "webrtc/common_audio/resampler/sinc_resampler.h"
#include <arm_neon.h>
namespace webrtc {
float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor) {
float32x4_t m_input;
float32x4_t m_sums1 = vmovq_n_f32(0);
float32x4_t m_sums2 = vmovq_n_f32(0);
const float* upper = input_ptr + kKernelSize;
for (; input_ptr < upper; ) {
m_input = vld1q_f32(input_ptr);
input_ptr += 4;
m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
k1 += 4;
m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
k2 += 4;
}
// Linearly interpolate the two "convolutions".
m_sums1 = vmlaq_f32(
vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
m_sums2, vmovq_n_f32(kernel_interpolation_factor));
// Sum components together.
float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
}
} // namespace webrtc

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Modified from the Chromium original:
// src/media/base/simd/sinc_resampler_sse.cc
#include "webrtc/common_audio/resampler/sinc_resampler.h"
#include <xmmintrin.h>
namespace webrtc {
float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor) {
__m128 m_input;
__m128 m_sums1 = _mm_setzero_ps();
__m128 m_sums2 = _mm_setzero_ps();
// Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
// these loops hurt performance in local testing.
if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
for (size_t i = 0; i < kKernelSize; i += 4) {
m_input = _mm_loadu_ps(input_ptr + i);
m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
}
} else {
for (size_t i = 0; i < kKernelSize; i += 4) {
m_input = _mm_load_ps(input_ptr + i);
m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
}
}
// Linearly interpolate the two "convolutions".
m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(
static_cast<float>(1.0 - kernel_interpolation_factor)));
m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(
static_cast<float>(kernel_interpolation_factor)));
m_sums1 = _mm_add_ps(m_sums1, m_sums2);
// Sum components together.
float result;
m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
_mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
m_sums2, m_sums2, 1)));
return result;
}
} // namespace webrtc

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// MSVC++ requires this to be set before any other includes to get M_PI.
#define _USE_MATH_DEFINES
#include "webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.h"
#include <math.h>
namespace webrtc {
SinusoidalLinearChirpSource::SinusoidalLinearChirpSource(int sample_rate,
size_t samples,
double max_frequency,
double delay_samples)
: sample_rate_(sample_rate),
total_samples_(samples),
max_frequency_(max_frequency),
current_index_(0),
delay_samples_(delay_samples) {
// Chirp rate.
double duration = static_cast<double>(total_samples_) / sample_rate_;
k_ = (max_frequency_ - kMinFrequency) / duration;
}
void SinusoidalLinearChirpSource::Run(size_t frames, float* destination) {
for (size_t i = 0; i < frames; ++i, ++current_index_) {
// Filter out frequencies higher than Nyquist.
if (Frequency(current_index_) > 0.5 * sample_rate_) {
destination[i] = 0;
} else {
// Calculate time in seconds.
if (current_index_ < delay_samples_) {
destination[i] = 0;
} else {
// Sinusoidal linear chirp.
double t = (current_index_ - delay_samples_) / sample_rate_;
destination[i] =
sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t));
}
}
}
}
double SinusoidalLinearChirpSource::Frequency(size_t position) {
return kMinFrequency + (position - delay_samples_) *
(max_frequency_ - kMinFrequency) / total_samples_;
}
} // namespace webrtc

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Modified from the Chromium original here:
// src/media/base/sinc_resampler_unittest.cc
#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
#define WEBRTC_COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/common_audio/resampler/sinc_resampler.h"
namespace webrtc {
// Fake audio source for testing the resampler. Generates a sinusoidal linear
// chirp (http://en.wikipedia.org/wiki/Chirp) which can be tuned to stress the
// resampler for the specific sample rate conversion being used.
class SinusoidalLinearChirpSource : public SincResamplerCallback {
public:
// |delay_samples| can be used to insert a fractional sample delay into the
// source. It will produce zeros until non-negative time is reached.
SinusoidalLinearChirpSource(int sample_rate, size_t samples,
double max_frequency, double delay_samples);
virtual ~SinusoidalLinearChirpSource() {}
void Run(size_t frames, float* destination) override;
double Frequency(size_t position);
private:
enum {
kMinFrequency = 5
};
int sample_rate_;
size_t total_samples_;
double max_frequency_;
double k_;
size_t current_index_;
double delay_samples_;
RTC_DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_

View File

@ -0,0 +1,247 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
// otherwise specified, functions return 0 on success and -1 on error.
#include "webrtc/common_audio/ring_buffer.h"
#include <stddef.h> // size_t
#include <stdlib.h>
#include <string.h>
enum Wrap {
SAME_WRAP,
DIFF_WRAP
};
struct RingBuffer {
size_t read_pos;
size_t write_pos;
size_t element_count;
size_t element_size;
enum Wrap rw_wrap;
char* data;
};
// Get address of region(s) from which we can read data.
// If the region is contiguous, |data_ptr_bytes_2| will be zero.
// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
// region. Returns room available to be read or |element_count|, whichever is
// smaller.
static size_t GetBufferReadRegions(RingBuffer* buf,
size_t element_count,
void** data_ptr_1,
size_t* data_ptr_bytes_1,
void** data_ptr_2,
size_t* data_ptr_bytes_2) {
const size_t readable_elements = WebRtc_available_read(buf);
const size_t read_elements = (readable_elements < element_count ?
readable_elements : element_count);
const size_t margin = buf->element_count - buf->read_pos;
// Check to see if read is not contiguous.
if (read_elements > margin) {
// Write data in two blocks that wrap the buffer.
*data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
*data_ptr_bytes_1 = margin * buf->element_size;
*data_ptr_2 = buf->data;
*data_ptr_bytes_2 = (read_elements - margin) * buf->element_size;
} else {
*data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
*data_ptr_bytes_1 = read_elements * buf->element_size;
*data_ptr_2 = NULL;
*data_ptr_bytes_2 = 0;
}
return read_elements;
}
RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size) {
RingBuffer* self = NULL;
if (element_count == 0 || element_size == 0) {
return NULL;
}
self = malloc(sizeof(RingBuffer));
if (!self) {
return NULL;
}
self->data = malloc(element_count * element_size);
if (!self->data) {
free(self);
self = NULL;
return NULL;
}
self->element_count = element_count;
self->element_size = element_size;
WebRtc_InitBuffer(self);
return self;
}
void WebRtc_InitBuffer(RingBuffer* self) {
self->read_pos = 0;
self->write_pos = 0;
self->rw_wrap = SAME_WRAP;
// Initialize buffer to zeros
memset(self->data, 0, self->element_count * self->element_size);
}
void WebRtc_FreeBuffer(void* handle) {
RingBuffer* self = (RingBuffer*)handle;
if (!self) {
return;
}
free(self->data);
free(self);
}
size_t WebRtc_ReadBuffer(RingBuffer* self,
void** data_ptr,
void* data,
size_t element_count) {
if (self == NULL) {
return 0;
}
if (data == NULL) {
return 0;
}
{
void* buf_ptr_1 = NULL;
void* buf_ptr_2 = NULL;
size_t buf_ptr_bytes_1 = 0;
size_t buf_ptr_bytes_2 = 0;
const size_t read_count = GetBufferReadRegions(self,
element_count,
&buf_ptr_1,
&buf_ptr_bytes_1,
&buf_ptr_2,
&buf_ptr_bytes_2);
if (buf_ptr_bytes_2 > 0) {
// We have a wrap around when reading the buffer. Copy the buffer data to
// |data| and point to it.
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2);
buf_ptr_1 = data;
} else if (!data_ptr) {
// No wrap, but a memcpy was requested.
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
}
if (data_ptr) {
// |buf_ptr_1| == |data| in the case of a wrap.
*data_ptr = buf_ptr_1;
}
// Update read position
WebRtc_MoveReadPtr(self, (int) read_count);
return read_count;
}
}
size_t WebRtc_WriteBuffer(RingBuffer* self,
const void* data,
size_t element_count) {
if (!self) {
return 0;
}
if (!data) {
return 0;
}
{
const size_t free_elements = WebRtc_available_write(self);
const size_t write_elements = (free_elements < element_count ? free_elements
: element_count);
size_t n = write_elements;
const size_t margin = self->element_count - self->write_pos;
if (write_elements > margin) {
// Buffer wrap around when writing.
memcpy(self->data + self->write_pos * self->element_size,
data, margin * self->element_size);
self->write_pos = 0;
n -= margin;
self->rw_wrap = DIFF_WRAP;
}
memcpy(self->data + self->write_pos * self->element_size,
((const char*) data) + ((write_elements - n) * self->element_size),
n * self->element_size);
self->write_pos += n;
return write_elements;
}
}
int WebRtc_MoveReadPtr(RingBuffer* self, int element_count) {
if (!self) {
return 0;
}
{
// We need to be able to take care of negative changes, hence use "int"
// instead of "size_t".
const int free_elements = (int) WebRtc_available_write(self);
const int readable_elements = (int) WebRtc_available_read(self);
int read_pos = (int) self->read_pos;
if (element_count > readable_elements) {
element_count = readable_elements;
}
if (element_count < -free_elements) {
element_count = -free_elements;
}
read_pos += element_count;
if (read_pos > (int) self->element_count) {
// Buffer wrap around. Restart read position and wrap indicator.
read_pos -= (int) self->element_count;
self->rw_wrap = SAME_WRAP;
}
if (read_pos < 0) {
// Buffer wrap around. Restart read position and wrap indicator.
read_pos += (int) self->element_count;
self->rw_wrap = DIFF_WRAP;
}
self->read_pos = (size_t) read_pos;
return element_count;
}
}
size_t WebRtc_available_read(const RingBuffer* self) {
if (!self) {
return 0;
}
if (self->rw_wrap == SAME_WRAP) {
return self->write_pos - self->read_pos;
} else {
return self->element_count - self->read_pos + self->write_pos;
}
}
size_t WebRtc_available_write(const RingBuffer* self) {
if (!self) {
return 0;
}
return self->element_count - WebRtc_available_read(self);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
// otherwise specified, functions return 0 on success and -1 on error.
#ifndef WEBRTC_COMMON_AUDIO_RING_BUFFER_H_
#define WEBRTC_COMMON_AUDIO_RING_BUFFER_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h> // size_t
typedef struct RingBuffer RingBuffer;
// Creates and initializes the buffer. Returns NULL on failure.
RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size);
void WebRtc_InitBuffer(RingBuffer* handle);
void WebRtc_FreeBuffer(void* handle);
// Reads data from the buffer. The |data_ptr| will point to the address where
// it is located. If all |element_count| data are feasible to read without
// buffer wrap around |data_ptr| will point to the location in the buffer.
// Otherwise, the data will be copied to |data| (memory allocation done by the
// user) and |data_ptr| points to the address of |data|. |data_ptr| is only
// guaranteed to be valid until the next call to WebRtc_WriteBuffer().
//
// To force a copying to |data|, pass a NULL |data_ptr|.
//
// Returns number of elements read.
size_t WebRtc_ReadBuffer(RingBuffer* handle,
void** data_ptr,
void* data,
size_t element_count);
// Writes |data| to buffer and returns the number of elements written.
size_t WebRtc_WriteBuffer(RingBuffer* handle, const void* data,
size_t element_count);
// Moves the buffer read position and returns the number of elements moved.
// Positive |element_count| moves the read position towards the write position,
// that is, flushing the buffer. Negative |element_count| moves the read
// position away from the the write position, that is, stuffing the buffer.
// Returns number of elements moved.
int WebRtc_MoveReadPtr(RingBuffer* handle, int element_count);
// Returns number of available elements to read.
size_t WebRtc_available_read(const RingBuffer* handle);
// Returns number of available elements for write.
size_t WebRtc_available_write(const RingBuffer* handle);
#ifdef __cplusplus
}
#endif
#endif // WEBRTC_COMMON_AUDIO_RING_BUFFER_H_

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/sparse_fir_filter.h"
#include "webrtc/base/checks.h"
namespace webrtc {
SparseFIRFilter::SparseFIRFilter(const float* nonzero_coeffs,
size_t num_nonzero_coeffs,
size_t sparsity,
size_t offset)
: sparsity_(sparsity),
offset_(offset),
nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs),
state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) {
RTC_CHECK_GE(num_nonzero_coeffs, 1u);
RTC_CHECK_GE(sparsity, 1u);
}
void SparseFIRFilter::Filter(const float* in, size_t length, float* out) {
// Convolves the input signal |in| with the filter kernel |nonzero_coeffs_|
// taking into account the previous state.
for (size_t i = 0; i < length; ++i) {
out[i] = 0.f;
size_t j;
for (j = 0; i >= j * sparsity_ + offset_ &&
j < nonzero_coeffs_.size(); ++j) {
out[i] += in[i - j * sparsity_ - offset_] * nonzero_coeffs_[j];
}
for (; j < nonzero_coeffs_.size(); ++j) {
out[i] += state_[i + (nonzero_coeffs_.size() - j - 1) * sparsity_] *
nonzero_coeffs_[j];
}
}
// Update current state.
if (state_.size() > 0u) {
if (length >= state_.size()) {
std::memcpy(&state_[0],
&in[length - state_.size()],
state_.size() * sizeof(*in));
} else {
std::memmove(&state_[0],
&state_[length],
(state_.size() - length) * sizeof(state_[0]));
std::memcpy(&state_[state_.size() - length], in, length * sizeof(*in));
}
}
}
} // namespace webrtc

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_
#define WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_
#include <cstring>
#include <vector>
#include "webrtc/base/constructormagic.h"
namespace webrtc {
// A Finite Impulse Response filter implementation which takes advantage of a
// sparse structure with uniformly distributed non-zero coefficients.
class SparseFIRFilter final {
public:
// |num_nonzero_coeffs| is the number of non-zero coefficients,
// |nonzero_coeffs|. They are assumed to be uniformly distributed every
// |sparsity| samples and with an initial |offset|. The rest of the filter
// coefficients will be assumed zeros. For example, with sparsity = 3, and
// offset = 1 the filter coefficients will be:
// B = [0 coeffs[0] 0 0 coeffs[1] 0 0 coeffs[2] ... ]
// All initial state values will be zeros.
SparseFIRFilter(const float* nonzero_coeffs,
size_t num_nonzero_coeffs,
size_t sparsity,
size_t offset);
// Filters the |in| data supplied.
// |out| must be previously allocated and it must be at least of |length|.
void Filter(const float* in, size_t length, float* out);
private:
const size_t sparsity_;
const size_t offset_;
const std::vector<float> nonzero_coeffs_;
std::vector<float> state_;
RTC_DISALLOW_COPY_AND_ASSIGN(SparseFIRFilter);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_

View File

@ -0,0 +1,174 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/wav_file.h"
#include <algorithm>
#include <cstdio>
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/wav_header.h"
namespace webrtc {
// We write 16-bit PCM WAV files.
static const WavFormat kWavFormat = kWavFormatPcm;
static const int kBytesPerSample = 2;
// Doesn't take ownership of the file handle and won't close it.
class ReadableWavFile : public ReadableWav {
public:
explicit ReadableWavFile(FILE* file) : file_(file) {}
virtual size_t Read(void* buf, size_t num_bytes) {
return fread(buf, 1, num_bytes, file_);
}
private:
FILE* file_;
};
WavReader::WavReader(const std::string& filename)
: file_handle_(fopen(filename.c_str(), "rb")) {
RTC_CHECK(file_handle_ && "Could not open wav file for reading.");
ReadableWavFile readable(file_handle_);
WavFormat format;
int bytes_per_sample;
RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format,
&bytes_per_sample, &num_samples_));
num_samples_remaining_ = num_samples_;
RTC_CHECK_EQ(kWavFormat, format);
RTC_CHECK_EQ(kBytesPerSample, bytes_per_sample);
}
WavReader::~WavReader() {
Close();
}
size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) {
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Need to convert samples to big-endian when reading from WAV file"
#endif
// There could be metadata after the audio; ensure we don't read it.
num_samples = std::min(rtc::checked_cast<uint32_t>(num_samples),
num_samples_remaining_);
const size_t read =
fread(samples, sizeof(*samples), num_samples, file_handle_);
// If we didn't read what was requested, ensure we've reached the EOF.
RTC_CHECK(read == num_samples || feof(file_handle_));
RTC_CHECK_LE(read, num_samples_remaining_);
num_samples_remaining_ -= rtc::checked_cast<uint32_t>(read);
return read;
}
size_t WavReader::ReadSamples(size_t num_samples, float* samples) {
static const size_t kChunksize = 4096 / sizeof(uint16_t);
size_t read = 0;
for (size_t i = 0; i < num_samples; i += kChunksize) {
int16_t isamples[kChunksize];
size_t chunk = std::min(kChunksize, num_samples - i);
chunk = ReadSamples(chunk, isamples);
for (size_t j = 0; j < chunk; ++j)
samples[i + j] = isamples[j];
read += chunk;
}
return read;
}
void WavReader::Close() {
RTC_CHECK_EQ(0, fclose(file_handle_));
file_handle_ = NULL;
}
WavWriter::WavWriter(const std::string& filename, int sample_rate,
int num_channels)
: sample_rate_(sample_rate),
num_channels_(num_channels),
num_samples_(0),
file_handle_(fopen(filename.c_str(), "wb")) {
RTC_CHECK(file_handle_ && "Could not open wav file for writing.");
RTC_CHECK(CheckWavParameters(num_channels_, sample_rate_, kWavFormat,
kBytesPerSample, num_samples_));
// Write a blank placeholder header, since we need to know the total number
// of samples before we can fill in the real data.
static const uint8_t blank_header[kWavHeaderSize] = {0};
RTC_CHECK_EQ(1u, fwrite(blank_header, kWavHeaderSize, 1, file_handle_));
}
WavWriter::~WavWriter() {
Close();
}
void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Need to convert samples to little-endian when writing to WAV file"
#endif
const size_t written =
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
RTC_CHECK_EQ(num_samples, written);
num_samples_ += static_cast<uint32_t>(written);
RTC_CHECK(written <= std::numeric_limits<uint32_t>::max() ||
num_samples_ >= written); // detect uint32_t overflow
}
void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
static const size_t kChunksize = 4096 / sizeof(uint16_t);
for (size_t i = 0; i < num_samples; i += kChunksize) {
int16_t isamples[kChunksize];
const size_t chunk = std::min(kChunksize, num_samples - i);
FloatS16ToS16(samples + i, chunk, isamples);
WriteSamples(isamples, chunk);
}
}
void WavWriter::Close() {
RTC_CHECK_EQ(0, fseek(file_handle_, 0, SEEK_SET));
uint8_t header[kWavHeaderSize];
WriteWavHeader(header, num_channels_, sample_rate_, kWavFormat,
kBytesPerSample, num_samples_);
RTC_CHECK_EQ(1u, fwrite(header, kWavHeaderSize, 1, file_handle_));
RTC_CHECK_EQ(0, fclose(file_handle_));
file_handle_ = NULL;
}
} // namespace webrtc
rtc_WavWriter* rtc_WavOpen(const char* filename,
int sample_rate,
int num_channels) {
return reinterpret_cast<rtc_WavWriter*>(
new webrtc::WavWriter(filename, sample_rate, num_channels));
}
void rtc_WavClose(rtc_WavWriter* wf) {
delete reinterpret_cast<webrtc::WavWriter*>(wf);
}
void rtc_WavWriteSamples(rtc_WavWriter* wf,
const float* samples,
size_t num_samples) {
reinterpret_cast<webrtc::WavWriter*>(wf)->WriteSamples(samples, num_samples);
}
int rtc_WavSampleRate(const rtc_WavWriter* wf) {
return reinterpret_cast<const webrtc::WavWriter*>(wf)->sample_rate();
}
int rtc_WavNumChannels(const rtc_WavWriter* wf) {
return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_channels();
}
uint32_t rtc_WavNumSamples(const rtc_WavWriter* wf) {
return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_samples();
}

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_WAV_FILE_H_
#define WEBRTC_COMMON_AUDIO_WAV_FILE_H_
#ifdef __cplusplus
#include <stdint.h>
#include <cstddef>
#include <string>
#include "webrtc/base/constructormagic.h"
namespace webrtc {
// Interface to provide access to WAV file parameters.
class WavFile {
public:
virtual ~WavFile() {}
virtual int sample_rate() const = 0;
virtual int num_channels() const = 0;
virtual uint32_t num_samples() const = 0;
};
// Simple C++ class for writing 16-bit PCM WAV files. All error handling is
// by calls to RTC_CHECK(), making it unsuitable for anything but debug code.
class WavWriter final : public WavFile {
public:
// Open a new WAV file for writing.
WavWriter(const std::string& filename, int sample_rate, int num_channels);
// Close the WAV file, after writing its header.
~WavWriter();
// Write additional samples to the file. Each sample is in the range
// [-32768,32767], and there must be the previously specified number of
// interleaved channels.
void WriteSamples(const float* samples, size_t num_samples);
void WriteSamples(const int16_t* samples, size_t num_samples);
int sample_rate() const override { return sample_rate_; }
int num_channels() const override { return num_channels_; }
uint32_t num_samples() const override { return num_samples_; }
private:
void Close();
const int sample_rate_;
const int num_channels_;
uint32_t num_samples_; // Total number of samples written to file.
FILE* file_handle_; // Output file, owned by this class
RTC_DISALLOW_COPY_AND_ASSIGN(WavWriter);
};
// Follows the conventions of WavWriter.
class WavReader final : public WavFile {
public:
// Opens an existing WAV file for reading.
explicit WavReader(const std::string& filename);
// Close the WAV file.
~WavReader();
// Returns the number of samples read. If this is less than requested,
// verifies that the end of the file was reached.
size_t ReadSamples(size_t num_samples, float* samples);
size_t ReadSamples(size_t num_samples, int16_t* samples);
int sample_rate() const override { return sample_rate_; }
int num_channels() const override { return num_channels_; }
uint32_t num_samples() const override { return num_samples_; }
private:
void Close();
int sample_rate_;
int num_channels_;
uint32_t num_samples_; // Total number of samples in the file.
uint32_t num_samples_remaining_;
FILE* file_handle_; // Input file, owned by this class.
RTC_DISALLOW_COPY_AND_ASSIGN(WavReader);
};
} // namespace webrtc
extern "C" {
#endif // __cplusplus
// C wrappers for the WavWriter class.
typedef struct rtc_WavWriter rtc_WavWriter;
rtc_WavWriter* rtc_WavOpen(const char* filename,
int sample_rate,
int num_channels);
void rtc_WavClose(rtc_WavWriter* wf);
void rtc_WavWriteSamples(rtc_WavWriter* wf,
const float* samples,
size_t num_samples);
int rtc_WavSampleRate(const rtc_WavWriter* wf);
int rtc_WavNumChannels(const rtc_WavWriter* wf);
uint32_t rtc_WavNumSamples(const rtc_WavWriter* wf);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBRTC_COMMON_AUDIO_WAV_FILE_H_

View File

@ -0,0 +1,242 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Based on the WAV file format documentation at
// https://ccrma.stanford.edu/courses/422/projects/WaveFormat/ and
// http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
#include "webrtc/common_audio/wav_header.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <string>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/include/audio_util.h"
namespace webrtc {
namespace {
struct ChunkHeader {
uint32_t ID;
uint32_t Size;
};
static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
// We can't nest this definition in WavHeader, because VS2013 gives an error
// on sizeof(WavHeader::fmt): "error C2070: 'unknown': illegal sizeof operand".
struct FmtSubchunk {
ChunkHeader header;
uint16_t AudioFormat;
uint16_t NumChannels;
uint32_t SampleRate;
uint32_t ByteRate;
uint16_t BlockAlign;
uint16_t BitsPerSample;
};
static_assert(sizeof(FmtSubchunk) == 24, "FmtSubchunk size");
const uint32_t kFmtSubchunkSize = sizeof(FmtSubchunk) - sizeof(ChunkHeader);
struct WavHeader {
struct {
ChunkHeader header;
uint32_t Format;
} riff;
FmtSubchunk fmt;
struct {
ChunkHeader header;
} data;
};
static_assert(sizeof(WavHeader) == kWavHeaderSize, "no padding in header");
} // namespace
bool CheckWavParameters(int num_channels,
int sample_rate,
WavFormat format,
int bytes_per_sample,
uint32_t num_samples) {
// num_channels, sample_rate, and bytes_per_sample must be positive, must fit
// in their respective fields, and their product must fit in the 32-bit
// ByteRate field.
if (num_channels <= 0 || sample_rate <= 0 || bytes_per_sample <= 0)
return false;
if (static_cast<uint64_t>(sample_rate) > std::numeric_limits<uint32_t>::max())
return false;
if (static_cast<uint64_t>(num_channels) >
std::numeric_limits<uint16_t>::max())
return false;
if (static_cast<uint64_t>(bytes_per_sample) * 8 >
std::numeric_limits<uint16_t>::max())
return false;
if (static_cast<uint64_t>(sample_rate) * num_channels * bytes_per_sample >
std::numeric_limits<uint32_t>::max())
return false;
// format and bytes_per_sample must agree.
switch (format) {
case kWavFormatPcm:
// Other values may be OK, but for now we're conservative:
if (bytes_per_sample != 1 && bytes_per_sample != 2)
return false;
break;
case kWavFormatALaw:
case kWavFormatMuLaw:
if (bytes_per_sample != 1)
return false;
break;
default:
return false;
}
// The number of bytes in the file, not counting the first ChunkHeader, must
// be less than 2^32; otherwise, the ChunkSize field overflows.
const uint32_t max_samples =
(std::numeric_limits<uint32_t>::max()
- (kWavHeaderSize - sizeof(ChunkHeader))) /
bytes_per_sample;
if (num_samples > max_samples)
return false;
// Each channel must have the same number of samples.
if (num_samples % num_channels != 0)
return false;
return true;
}
#ifdef WEBRTC_ARCH_LITTLE_ENDIAN
static inline void WriteLE16(uint16_t* f, uint16_t x) { *f = x; }
static inline void WriteLE32(uint32_t* f, uint32_t x) { *f = x; }
static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) {
*f = static_cast<uint32_t>(a)
| static_cast<uint32_t>(b) << 8
| static_cast<uint32_t>(c) << 16
| static_cast<uint32_t>(d) << 24;
}
static inline uint16_t ReadLE16(uint16_t x) { return x; }
static inline uint32_t ReadLE32(uint32_t x) { return x; }
static inline std::string ReadFourCC(uint32_t x) {
return std::string(reinterpret_cast<char*>(&x), 4);
}
#else
#error "Write be-to-le conversion functions"
#endif
static inline uint32_t RiffChunkSize(uint32_t bytes_in_payload) {
return bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader);
}
static inline uint32_t ByteRate(int num_channels, int sample_rate,
int bytes_per_sample) {
return static_cast<uint32_t>(num_channels) * sample_rate * bytes_per_sample;
}
static inline uint16_t BlockAlign(int num_channels, int bytes_per_sample) {
return num_channels * bytes_per_sample;
}
void WriteWavHeader(uint8_t* buf,
int num_channels,
int sample_rate,
WavFormat format,
int bytes_per_sample,
uint32_t num_samples) {
RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format,
bytes_per_sample, num_samples));
WavHeader header;
const uint32_t bytes_in_payload = bytes_per_sample * num_samples;
WriteFourCC(&header.riff.header.ID, 'R', 'I', 'F', 'F');
WriteLE32(&header.riff.header.Size, RiffChunkSize(bytes_in_payload));
WriteFourCC(&header.riff.Format, 'W', 'A', 'V', 'E');
WriteFourCC(&header.fmt.header.ID, 'f', 'm', 't', ' ');
WriteLE32(&header.fmt.header.Size, kFmtSubchunkSize);
WriteLE16(&header.fmt.AudioFormat, format);
WriteLE16(&header.fmt.NumChannels, num_channels);
WriteLE32(&header.fmt.SampleRate, sample_rate);
WriteLE32(&header.fmt.ByteRate, ByteRate(num_channels, sample_rate,
bytes_per_sample));
WriteLE16(&header.fmt.BlockAlign, BlockAlign(num_channels, bytes_per_sample));
WriteLE16(&header.fmt.BitsPerSample, 8 * bytes_per_sample);
WriteFourCC(&header.data.header.ID, 'd', 'a', 't', 'a');
WriteLE32(&header.data.header.Size, bytes_in_payload);
// Do an extra copy rather than writing everything to buf directly, since buf
// might not be correctly aligned.
memcpy(buf, &header, kWavHeaderSize);
}
bool ReadWavHeader(ReadableWav* readable,
int* num_channels,
int* sample_rate,
WavFormat* format,
int* bytes_per_sample,
uint32_t* num_samples) {
WavHeader header;
if (readable->Read(&header, kWavHeaderSize - sizeof(header.data)) !=
kWavHeaderSize - sizeof(header.data))
return false;
const uint32_t fmt_size = ReadLE32(header.fmt.header.Size);
if (fmt_size != kFmtSubchunkSize) {
// There is an optional two-byte extension field permitted to be present
// with PCM, but which must be zero.
int16_t ext_size;
if (kFmtSubchunkSize + sizeof(ext_size) != fmt_size)
return false;
if (readable->Read(&ext_size, sizeof(ext_size)) != sizeof(ext_size))
return false;
if (ext_size != 0)
return false;
}
if (readable->Read(&header.data, sizeof(header.data)) != sizeof(header.data))
return false;
// Parse needed fields.
*format = static_cast<WavFormat>(ReadLE16(header.fmt.AudioFormat));
*num_channels = ReadLE16(header.fmt.NumChannels);
*sample_rate = ReadLE32(header.fmt.SampleRate);
*bytes_per_sample = ReadLE16(header.fmt.BitsPerSample) / 8;
const uint32_t bytes_in_payload = ReadLE32(header.data.header.Size);
if (*bytes_per_sample <= 0)
return false;
*num_samples = bytes_in_payload / *bytes_per_sample;
// Sanity check remaining fields.
if (ReadFourCC(header.riff.header.ID) != "RIFF")
return false;
if (ReadFourCC(header.riff.Format) != "WAVE")
return false;
if (ReadFourCC(header.fmt.header.ID) != "fmt ")
return false;
if (ReadFourCC(header.data.header.ID) != "data")
return false;
if (ReadLE32(header.riff.header.Size) < RiffChunkSize(bytes_in_payload))
return false;
if (ReadLE32(header.fmt.ByteRate) !=
ByteRate(*num_channels, *sample_rate, *bytes_per_sample))
return false;
if (ReadLE16(header.fmt.BlockAlign) !=
BlockAlign(*num_channels, *bytes_per_sample))
return false;
return CheckWavParameters(*num_channels, *sample_rate, *format,
*bytes_per_sample, *num_samples);
}
} // namespace webrtc

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_WAV_HEADER_H_
#define WEBRTC_COMMON_AUDIO_WAV_HEADER_H_
#include <stddef.h>
#include <stdint.h>
namespace webrtc {
static const size_t kWavHeaderSize = 44;
class ReadableWav {
public:
// Returns the number of bytes read.
size_t virtual Read(void* buf, size_t num_bytes) = 0;
virtual ~ReadableWav() {}
};
enum WavFormat {
kWavFormatPcm = 1, // PCM, each sample of size bytes_per_sample
kWavFormatALaw = 6, // 8-bit ITU-T G.711 A-law
kWavFormatMuLaw = 7, // 8-bit ITU-T G.711 mu-law
};
// Return true if the given parameters will make a well-formed WAV header.
bool CheckWavParameters(int num_channels,
int sample_rate,
WavFormat format,
int bytes_per_sample,
uint32_t num_samples);
// Write a kWavHeaderSize bytes long WAV header to buf. The payload that
// follows the header is supposed to have the specified number of interleaved
// channels and contain the specified total number of samples of the specified
// type. CHECKs the input parameters for validity.
void WriteWavHeader(uint8_t* buf,
int num_channels,
int sample_rate,
WavFormat format,
int bytes_per_sample,
uint32_t num_samples);
// Read a WAV header from an implemented ReadableWav and parse the values into
// the provided output parameters. ReadableWav is used because the header can
// be variably sized. Returns false if the header is invalid.
bool ReadWavHeader(ReadableWav* readable,
int* num_channels,
int* sample_rate,
WavFormat* format,
int* bytes_per_sample,
uint32_t* num_samples);
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_WAV_HEADER_H_

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#define _USE_MATH_DEFINES
#include "webrtc/common_audio/window_generator.h"
#include <cmath>
#include <complex>
#include "webrtc/base/checks.h"
using std::complex;
namespace {
// Modified Bessel function of order 0 for complex inputs.
complex<float> I0(complex<float> x) {
complex<float> y = x / 3.75f;
y *= y;
return 1.0f + y * (
3.5156229f + y * (
3.0899424f + y * (
1.2067492f + y * (
0.2659732f + y * (
0.360768e-1f + y * 0.45813e-2f)))));
}
} // namespace
namespace webrtc {
void WindowGenerator::Hanning(int length, float* window) {
RTC_CHECK_GT(length, 1);
RTC_CHECK(window != nullptr);
for (int i = 0; i < length; ++i) {
window[i] = 0.5f * (1 - cosf(2 * static_cast<float>(M_PI) * i /
(length - 1)));
}
}
void WindowGenerator::KaiserBesselDerived(float alpha, size_t length,
float* window) {
RTC_CHECK_GT(length, 1U);
RTC_CHECK(window != nullptr);
const size_t half = (length + 1) / 2;
float sum = 0.0f;
for (size_t i = 0; i <= half; ++i) {
complex<float> r = (4.0f * i) / length - 1.0f;
sum += I0(static_cast<float>(M_PI) * alpha * sqrt(1.0f - r * r)).real();
window[i] = sum;
}
for (size_t i = length - 1; i >= half; --i) {
window[length - i - 1] = sqrtf(window[length - i - 1] / sum);
window[i] = window[length - i - 1];
}
if (length % 2 == 1) {
window[half - 1] = sqrtf(window[half - 1] / sum);
}
}
} // namespace webrtc

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_WINDOW_GENERATOR_H_
#define WEBRTC_COMMON_AUDIO_WINDOW_GENERATOR_H_
#include <stddef.h>
#include "webrtc/base/constructormagic.h"
namespace webrtc {
// Helper class with generators for various signal transform windows.
class WindowGenerator {
public:
static void Hanning(int length, float* window);
static void KaiserBesselDerived(float alpha, size_t length, float* window);
private:
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WindowGenerator);
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_WINDOW_GENERATOR_H_

View File

@ -1 +1 @@
SUBDIRS = audio_processing SUBDIRS = audio_coding audio_processing

View File

@ -0,0 +1,835 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//build/config/arm.gni")
import("../../build/webrtc.gni")
config("audio_coding_config") {
include_dirs = [
"main/interface",
"../interface",
]
}
source_set("audio_coding") {
sources = [
"main/acm2/acm_codec_database.cc",
"main/acm2/acm_codec_database.h",
"main/acm2/acm_common_defs.h",
"main/acm2/acm_receiver.cc",
"main/acm2/acm_receiver.h",
"main/acm2/acm_resampler.cc",
"main/acm2/acm_resampler.h",
"main/acm2/audio_coding_module.cc",
"main/acm2/audio_coding_module_impl.cc",
"main/acm2/audio_coding_module_impl.h",
"main/acm2/call_statistics.cc",
"main/acm2/call_statistics.h",
"main/acm2/codec_manager.cc",
"main/acm2/codec_manager.h",
"main/acm2/codec_owner.cc",
"main/acm2/codec_owner.h",
"main/acm2/initial_delay_manager.cc",
"main/acm2/initial_delay_manager.h",
"main/acm2/nack.cc",
"main/acm2/nack.h",
"main/interface/audio_coding_module.h",
"main/interface/audio_coding_module_typedefs.h",
]
defines = []
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":audio_coding_config",
]
if (is_win) {
cflags = [
# TODO(kjellander): Bug 261: fix this warning.
"/wd4373", # virtual function override.
]
}
if (is_clang) {
# Suppress warnings from Chrome's Clang plugins.
# See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
configs -= [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
":cng",
":g711",
":neteq",
":pcm16b",
"../..:rtc_event_log",
"../..:webrtc_common",
"../../common_audio",
"../../system_wrappers",
]
if (rtc_include_opus) {
defines += [ "WEBRTC_CODEC_OPUS" ]
deps += [ ":webrtc_opus" ]
}
if (!build_with_mozilla) {
if (current_cpu == "arm") {
defines += [ "WEBRTC_CODEC_ISACFX" ]
deps += [ ":isac_fix" ]
} else {
defines += [ "WEBRTC_CODEC_ISAC" ]
deps += [ ":isac" ]
}
defines += [ "WEBRTC_CODEC_G722" ]
deps += [ ":g722" ]
}
if (!build_with_mozilla && !build_with_chromium) {
defines += [
"WEBRTC_CODEC_ILBC",
"WEBRTC_CODEC_RED",
]
deps += [
":ilbc",
":red",
]
}
}
source_set("audio_decoder_interface") {
sources = [
"codecs/audio_decoder.cc",
"codecs/audio_decoder.h",
]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
deps = [
"../..:webrtc_common",
]
}
source_set("audio_encoder_interface") {
sources = [
"codecs/audio_encoder.cc",
"codecs/audio_encoder.h",
]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
deps = [
"../..:webrtc_common",
]
}
config("cng_config") {
include_dirs = [
"../../..",
"codecs/cng/include",
]
}
source_set("cng") {
sources = [
"codecs/cng/audio_encoder_cng.cc",
"codecs/cng/cng_helpfuns.c",
"codecs/cng/cng_helpfuns.h",
"codecs/cng/include/audio_encoder_cng.h",
"codecs/cng/include/webrtc_cng.h",
"codecs/cng/webrtc_cng.c",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":cng_config",
]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
config("red_config") {
include_dirs = [ "codecs/red" ]
}
source_set("red") {
sources = [
"codecs/red/audio_encoder_copy_red.cc",
"codecs/red/audio_encoder_copy_red.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":red_config",
]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
config("g711_config") {
include_dirs = [
"../../..",
"codecs/g711/include",
]
}
source_set("g711") {
sources = [
"codecs/g711/audio_decoder_pcm.cc",
"codecs/g711/audio_encoder_pcm.cc",
"codecs/g711/g711.c",
"codecs/g711/g711.h",
"codecs/g711/g711_interface.c",
"codecs/g711/include/audio_decoder_pcm.h",
"codecs/g711/include/audio_encoder_pcm.h",
"codecs/g711/include/g711_interface.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":g711_config",
]
deps = [
":audio_encoder_interface",
]
}
config("g722_config") {
include_dirs = [
"../../..",
"codecs/g722/include",
]
}
source_set("g722") {
sources = [
"codecs/g722/audio_decoder_g722.cc",
"codecs/g722/audio_encoder_g722.cc",
"codecs/g722/g722_decode.c",
"codecs/g722/g722_enc_dec.h",
"codecs/g722/g722_encode.c",
"codecs/g722/g722_interface.c",
"codecs/g722/include/audio_decoder_g722.h",
"codecs/g722/include/audio_encoder_g722.h",
"codecs/g722/include/g722_interface.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":g722_config",
]
deps = [
":audio_encoder_interface",
]
}
config("ilbc_config") {
include_dirs = [
"../../..",
"codecs/ilbc/interface",
]
}
source_set("ilbc") {
sources = [
"codecs/ilbc/abs_quant.c",
"codecs/ilbc/abs_quant.h",
"codecs/ilbc/abs_quant_loop.c",
"codecs/ilbc/abs_quant_loop.h",
"codecs/ilbc/audio_decoder_ilbc.cc",
"codecs/ilbc/audio_encoder_ilbc.cc",
"codecs/ilbc/augmented_cb_corr.c",
"codecs/ilbc/augmented_cb_corr.h",
"codecs/ilbc/bw_expand.c",
"codecs/ilbc/bw_expand.h",
"codecs/ilbc/cb_construct.c",
"codecs/ilbc/cb_construct.h",
"codecs/ilbc/cb_mem_energy.c",
"codecs/ilbc/cb_mem_energy.h",
"codecs/ilbc/cb_mem_energy_augmentation.c",
"codecs/ilbc/cb_mem_energy_augmentation.h",
"codecs/ilbc/cb_mem_energy_calc.c",
"codecs/ilbc/cb_mem_energy_calc.h",
"codecs/ilbc/cb_search.c",
"codecs/ilbc/cb_search.h",
"codecs/ilbc/cb_search_core.c",
"codecs/ilbc/cb_search_core.h",
"codecs/ilbc/cb_update_best_index.c",
"codecs/ilbc/cb_update_best_index.h",
"codecs/ilbc/chebyshev.c",
"codecs/ilbc/chebyshev.h",
"codecs/ilbc/comp_corr.c",
"codecs/ilbc/comp_corr.h",
"codecs/ilbc/constants.c",
"codecs/ilbc/constants.h",
"codecs/ilbc/create_augmented_vec.c",
"codecs/ilbc/create_augmented_vec.h",
"codecs/ilbc/decode.c",
"codecs/ilbc/decode.h",
"codecs/ilbc/decode_residual.c",
"codecs/ilbc/decode_residual.h",
"codecs/ilbc/decoder_interpolate_lsf.c",
"codecs/ilbc/decoder_interpolate_lsf.h",
"codecs/ilbc/defines.h",
"codecs/ilbc/do_plc.c",
"codecs/ilbc/do_plc.h",
"codecs/ilbc/encode.c",
"codecs/ilbc/encode.h",
"codecs/ilbc/energy_inverse.c",
"codecs/ilbc/energy_inverse.h",
"codecs/ilbc/enh_upsample.c",
"codecs/ilbc/enh_upsample.h",
"codecs/ilbc/enhancer.c",
"codecs/ilbc/enhancer.h",
"codecs/ilbc/enhancer_interface.c",
"codecs/ilbc/enhancer_interface.h",
"codecs/ilbc/filtered_cb_vecs.c",
"codecs/ilbc/filtered_cb_vecs.h",
"codecs/ilbc/frame_classify.c",
"codecs/ilbc/frame_classify.h",
"codecs/ilbc/gain_dequant.c",
"codecs/ilbc/gain_dequant.h",
"codecs/ilbc/gain_quant.c",
"codecs/ilbc/gain_quant.h",
"codecs/ilbc/get_cd_vec.c",
"codecs/ilbc/get_cd_vec.h",
"codecs/ilbc/get_lsp_poly.c",
"codecs/ilbc/get_lsp_poly.h",
"codecs/ilbc/get_sync_seq.c",
"codecs/ilbc/get_sync_seq.h",
"codecs/ilbc/hp_input.c",
"codecs/ilbc/hp_input.h",
"codecs/ilbc/hp_output.c",
"codecs/ilbc/hp_output.h",
"codecs/ilbc/ilbc.c",
"codecs/ilbc/include/audio_decoder_ilbc.h",
"codecs/ilbc/include/audio_encoder_ilbc.h",
"codecs/ilbc/index_conv_dec.c",
"codecs/ilbc/index_conv_dec.h",
"codecs/ilbc/index_conv_enc.c",
"codecs/ilbc/index_conv_enc.h",
"codecs/ilbc/init_decode.c",
"codecs/ilbc/init_decode.h",
"codecs/ilbc/init_encode.c",
"codecs/ilbc/init_encode.h",
"codecs/ilbc/interface/ilbc.h",
"codecs/ilbc/interpolate.c",
"codecs/ilbc/interpolate.h",
"codecs/ilbc/interpolate_samples.c",
"codecs/ilbc/interpolate_samples.h",
"codecs/ilbc/lpc_encode.c",
"codecs/ilbc/lpc_encode.h",
"codecs/ilbc/lsf_check.c",
"codecs/ilbc/lsf_check.h",
"codecs/ilbc/lsf_interpolate_to_poly_dec.c",
"codecs/ilbc/lsf_interpolate_to_poly_dec.h",
"codecs/ilbc/lsf_interpolate_to_poly_enc.c",
"codecs/ilbc/lsf_interpolate_to_poly_enc.h",
"codecs/ilbc/lsf_to_lsp.c",
"codecs/ilbc/lsf_to_lsp.h",
"codecs/ilbc/lsf_to_poly.c",
"codecs/ilbc/lsf_to_poly.h",
"codecs/ilbc/lsp_to_lsf.c",
"codecs/ilbc/lsp_to_lsf.h",
"codecs/ilbc/my_corr.c",
"codecs/ilbc/my_corr.h",
"codecs/ilbc/nearest_neighbor.c",
"codecs/ilbc/nearest_neighbor.h",
"codecs/ilbc/pack_bits.c",
"codecs/ilbc/pack_bits.h",
"codecs/ilbc/poly_to_lsf.c",
"codecs/ilbc/poly_to_lsf.h",
"codecs/ilbc/poly_to_lsp.c",
"codecs/ilbc/poly_to_lsp.h",
"codecs/ilbc/refiner.c",
"codecs/ilbc/refiner.h",
"codecs/ilbc/simple_interpolate_lsf.c",
"codecs/ilbc/simple_interpolate_lsf.h",
"codecs/ilbc/simple_lpc_analysis.c",
"codecs/ilbc/simple_lpc_analysis.h",
"codecs/ilbc/simple_lsf_dequant.c",
"codecs/ilbc/simple_lsf_dequant.h",
"codecs/ilbc/simple_lsf_quant.c",
"codecs/ilbc/simple_lsf_quant.h",
"codecs/ilbc/smooth.c",
"codecs/ilbc/smooth.h",
"codecs/ilbc/smooth_out_data.c",
"codecs/ilbc/smooth_out_data.h",
"codecs/ilbc/sort_sq.c",
"codecs/ilbc/sort_sq.h",
"codecs/ilbc/split_vq.c",
"codecs/ilbc/split_vq.h",
"codecs/ilbc/state_construct.c",
"codecs/ilbc/state_construct.h",
"codecs/ilbc/state_search.c",
"codecs/ilbc/state_search.h",
"codecs/ilbc/swap_bytes.c",
"codecs/ilbc/swap_bytes.h",
"codecs/ilbc/unpack_bits.c",
"codecs/ilbc/unpack_bits.h",
"codecs/ilbc/vq3.c",
"codecs/ilbc/vq3.h",
"codecs/ilbc/vq4.c",
"codecs/ilbc/vq4.h",
"codecs/ilbc/window32_w32.c",
"codecs/ilbc/window32_w32.h",
"codecs/ilbc/xcorr_coef.c",
"codecs/ilbc/xcorr_coef.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":ilbc_config",
]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
source_set("isac_common") {
sources = [
"codecs/isac/audio_encoder_isac_t.h",
"codecs/isac/audio_encoder_isac_t_impl.h",
"codecs/isac/locked_bandwidth_info.cc",
"codecs/isac/locked_bandwidth_info.h",
]
public_configs = [ "../..:common_inherited_config" ]
}
config("isac_config") {
include_dirs = [
"../../..",
"codecs/isac/main/interface",
]
}
source_set("isac") {
sources = [
"codecs/isac/main/interface/audio_decoder_isac.h",
"codecs/isac/main/interface/audio_encoder_isac.h",
"codecs/isac/main/interface/isac.h",
"codecs/isac/main/source/arith_routines.c",
"codecs/isac/main/source/arith_routines.h",
"codecs/isac/main/source/arith_routines_hist.c",
"codecs/isac/main/source/arith_routines_logist.c",
"codecs/isac/main/source/audio_decoder_isac.cc",
"codecs/isac/main/source/audio_encoder_isac.cc",
"codecs/isac/main/source/bandwidth_estimator.c",
"codecs/isac/main/source/bandwidth_estimator.h",
"codecs/isac/main/source/codec.h",
"codecs/isac/main/source/crc.c",
"codecs/isac/main/source/crc.h",
"codecs/isac/main/source/decode.c",
"codecs/isac/main/source/decode_bwe.c",
"codecs/isac/main/source/encode.c",
"codecs/isac/main/source/encode_lpc_swb.c",
"codecs/isac/main/source/encode_lpc_swb.h",
"codecs/isac/main/source/entropy_coding.c",
"codecs/isac/main/source/entropy_coding.h",
"codecs/isac/main/source/fft.c",
"codecs/isac/main/source/fft.h",
"codecs/isac/main/source/filter_functions.c",
"codecs/isac/main/source/filterbank_tables.c",
"codecs/isac/main/source/filterbank_tables.h",
"codecs/isac/main/source/filterbanks.c",
"codecs/isac/main/source/intialize.c",
"codecs/isac/main/source/isac.c",
"codecs/isac/main/source/isac_float_type.h",
"codecs/isac/main/source/lattice.c",
"codecs/isac/main/source/lpc_analysis.c",
"codecs/isac/main/source/lpc_analysis.h",
"codecs/isac/main/source/lpc_gain_swb_tables.c",
"codecs/isac/main/source/lpc_gain_swb_tables.h",
"codecs/isac/main/source/lpc_shape_swb12_tables.c",
"codecs/isac/main/source/lpc_shape_swb12_tables.h",
"codecs/isac/main/source/lpc_shape_swb16_tables.c",
"codecs/isac/main/source/lpc_shape_swb16_tables.h",
"codecs/isac/main/source/lpc_tables.c",
"codecs/isac/main/source/lpc_tables.h",
"codecs/isac/main/source/os_specific_inline.h",
"codecs/isac/main/source/pitch_estimator.c",
"codecs/isac/main/source/pitch_estimator.h",
"codecs/isac/main/source/pitch_filter.c",
"codecs/isac/main/source/pitch_gain_tables.c",
"codecs/isac/main/source/pitch_gain_tables.h",
"codecs/isac/main/source/pitch_lag_tables.c",
"codecs/isac/main/source/pitch_lag_tables.h",
"codecs/isac/main/source/settings.h",
"codecs/isac/main/source/spectrum_ar_model_tables.c",
"codecs/isac/main/source/spectrum_ar_model_tables.h",
"codecs/isac/main/source/structs.h",
"codecs/isac/main/source/transform.c",
]
if (is_linux) {
libs = [ "m" ]
}
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":isac_config",
]
deps = [
":audio_decoder_interface",
":audio_encoder_interface",
":isac_common",
"../../common_audio",
]
}
config("isac_fix_config") {
include_dirs = [
"../../..",
"codecs/isac/fix/interface",
]
}
source_set("isac_fix") {
sources = [
"codecs/isac/fix/interface/audio_decoder_isacfix.h",
"codecs/isac/fix/interface/audio_encoder_isacfix.h",
"codecs/isac/fix/interface/isacfix.h",
"codecs/isac/fix/source/arith_routines.c",
"codecs/isac/fix/source/arith_routines_hist.c",
"codecs/isac/fix/source/arith_routines_logist.c",
"codecs/isac/fix/source/arith_routins.h",
"codecs/isac/fix/source/audio_decoder_isacfix.cc",
"codecs/isac/fix/source/audio_encoder_isacfix.cc",
"codecs/isac/fix/source/bandwidth_estimator.c",
"codecs/isac/fix/source/bandwidth_estimator.h",
"codecs/isac/fix/source/codec.h",
"codecs/isac/fix/source/decode.c",
"codecs/isac/fix/source/decode_bwe.c",
"codecs/isac/fix/source/decode_plc.c",
"codecs/isac/fix/source/encode.c",
"codecs/isac/fix/source/entropy_coding.c",
"codecs/isac/fix/source/entropy_coding.h",
"codecs/isac/fix/source/fft.c",
"codecs/isac/fix/source/fft.h",
"codecs/isac/fix/source/filterbank_tables.c",
"codecs/isac/fix/source/filterbank_tables.h",
"codecs/isac/fix/source/filterbanks.c",
"codecs/isac/fix/source/filters.c",
"codecs/isac/fix/source/initialize.c",
"codecs/isac/fix/source/isac_fix_type.h",
"codecs/isac/fix/source/isacfix.c",
"codecs/isac/fix/source/lattice.c",
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/lpc_masking_model.c",
"codecs/isac/fix/source/lpc_masking_model.h",
"codecs/isac/fix/source/lpc_tables.c",
"codecs/isac/fix/source/lpc_tables.h",
"codecs/isac/fix/source/pitch_estimator.c",
"codecs/isac/fix/source/pitch_estimator.h",
"codecs/isac/fix/source/pitch_estimator_c.c",
"codecs/isac/fix/source/pitch_filter.c",
"codecs/isac/fix/source/pitch_filter_c.c",
"codecs/isac/fix/source/pitch_gain_tables.c",
"codecs/isac/fix/source/pitch_gain_tables.h",
"codecs/isac/fix/source/pitch_lag_tables.c",
"codecs/isac/fix/source/pitch_lag_tables.h",
"codecs/isac/fix/source/settings.h",
"codecs/isac/fix/source/spectrum_ar_model_tables.c",
"codecs/isac/fix/source/spectrum_ar_model_tables.h",
"codecs/isac/fix/source/structs.h",
"codecs/isac/fix/source/transform.c",
"codecs/isac/fix/source/transform_tables.c",
]
if (!is_win) {
defines = [ "WEBRTC_LINUX" ]
}
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":isac_fix_config",
]
deps = [
":audio_encoder_interface",
":isac_common",
"../../common_audio",
"../../system_wrappers",
]
if (rtc_build_with_neon) {
deps += [ ":isac_neon" ]
}
if (current_cpu == "arm" && arm_version >= 7) {
sources += [
"codecs/isac/fix/source/lattice_armv7.S",
"codecs/isac/fix/source/pitch_filter_armv6.S",
]
sources -= [
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/pitch_filter_c.c",
]
}
if (current_cpu == "mipsel") {
sources += [
"codecs/isac/fix/source/entropy_coding_mips.c",
"codecs/isac/fix/source/filters_mips.c",
"codecs/isac/fix/source/lattice_mips.c",
"codecs/isac/fix/source/pitch_estimator_mips.c",
"codecs/isac/fix/source/transform_mips.c",
]
sources -= [
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/pitch_estimator_c.c",
]
if (mips_dsp_rev > 0) {
sources += [ "codecs/isac/fix/source/filterbanks_mips.c" ]
}
if (mips_dsp_rev > 1) {
sources += [
"codecs/isac/fix/source/lpc_masking_model_mips.c",
"codecs/isac/fix/source/pitch_filter_mips.c",
]
sources -= [ "codecs/isac/fix/source/pitch_filter_c.c" ]
}
}
}
if (rtc_build_with_neon) {
source_set("isac_neon") {
sources = [
"codecs/isac/fix/source/entropy_coding_neon.c",
"codecs/isac/fix/source/filterbanks_neon.c",
"codecs/isac/fix/source/filters_neon.c",
"codecs/isac/fix/source/lattice_neon.c",
"codecs/isac/fix/source/transform_neon.c",
]
if (current_cpu != "arm64") {
# Enable compilation for the NEON instruction set. This is needed
# since //build/config/arm.gni only enables NEON for iOS, not Android.
# This provides the same functionality as webrtc/build/arm_neon.gypi.
configs -= [ "//build/config/compiler:compiler_arm_fpu" ]
cflags = [ "-mfpu=neon" ]
}
# Disable LTO on NEON targets due to compiler bug.
# TODO(fdegans): Enable this. See crbug.com/408997.
if (rtc_use_lto) {
cflags -= [
"-flto",
"-ffat-lto-objects",
]
}
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
deps = [
"../../common_audio",
]
}
}
config("pcm16b_config") {
include_dirs = [
"../../..",
"codecs/pcm16b/include",
]
}
source_set("pcm16b") {
sources = [
"codecs/pcm16b/audio_decoder_pcm16b.cc",
"codecs/pcm16b/audio_encoder_pcm16b.cc",
"codecs/pcm16b/include/audio_decoder_pcm16b.h",
"codecs/pcm16b/include/audio_encoder_pcm16b.h",
"codecs/pcm16b/include/pcm16b.h",
"codecs/pcm16b/pcm16b.c",
]
deps = [
":audio_encoder_interface",
":g711",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":pcm16b_config",
]
}
config("opus_config") {
include_dirs = [ "../../.." ]
}
source_set("webrtc_opus") {
sources = [
"codecs/opus/audio_decoder_opus.cc",
"codecs/opus/audio_encoder_opus.cc",
"codecs/opus/interface/audio_decoder_opus.h",
"codecs/opus/interface/audio_encoder_opus.h",
"codecs/opus/interface/opus_interface.h",
"codecs/opus/opus_inst.h",
"codecs/opus/opus_interface.c",
]
deps = [
":audio_encoder_interface",
]
if (rtc_build_opus) {
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
public_deps = [
rtc_opus_dir,
]
} else if (build_with_mozilla) {
include_dirs = [ getenv("DIST") + "/include/opus" ]
}
}
config("neteq_config") {
include_dirs = [
# Need Opus header files for the audio classifier.
"//third_party/opus/src/celt",
"//third_party/opus/src/src",
]
}
source_set("neteq") {
sources = [
"neteq/accelerate.cc",
"neteq/accelerate.h",
"neteq/audio_classifier.cc",
"neteq/audio_classifier.h",
"neteq/audio_decoder_impl.cc",
"neteq/audio_decoder_impl.h",
"neteq/audio_multi_vector.cc",
"neteq/audio_multi_vector.h",
"neteq/audio_vector.cc",
"neteq/audio_vector.h",
"neteq/background_noise.cc",
"neteq/background_noise.h",
"neteq/buffer_level_filter.cc",
"neteq/buffer_level_filter.h",
"neteq/comfort_noise.cc",
"neteq/comfort_noise.h",
"neteq/decision_logic.cc",
"neteq/decision_logic.h",
"neteq/decision_logic_fax.cc",
"neteq/decision_logic_fax.h",
"neteq/decision_logic_normal.cc",
"neteq/decision_logic_normal.h",
"neteq/decoder_database.cc",
"neteq/decoder_database.h",
"neteq/defines.h",
"neteq/delay_manager.cc",
"neteq/delay_manager.h",
"neteq/delay_peak_detector.cc",
"neteq/delay_peak_detector.h",
"neteq/dsp_helper.cc",
"neteq/dsp_helper.h",
"neteq/dtmf_buffer.cc",
"neteq/dtmf_buffer.h",
"neteq/dtmf_tone_generator.cc",
"neteq/dtmf_tone_generator.h",
"neteq/expand.cc",
"neteq/expand.h",
"neteq/interface/neteq.h",
"neteq/merge.cc",
"neteq/merge.h",
"neteq/neteq.cc",
"neteq/neteq_impl.cc",
"neteq/neteq_impl.h",
"neteq/normal.cc",
"neteq/normal.h",
"neteq/packet_buffer.cc",
"neteq/packet_buffer.h",
"neteq/payload_splitter.cc",
"neteq/payload_splitter.h",
"neteq/post_decode_vad.cc",
"neteq/post_decode_vad.h",
"neteq/preemptive_expand.cc",
"neteq/preemptive_expand.h",
"neteq/random_vector.cc",
"neteq/random_vector.h",
"neteq/rtcp.cc",
"neteq/rtcp.h",
"neteq/statistics_calculator.cc",
"neteq/statistics_calculator.h",
"neteq/sync_buffer.cc",
"neteq/sync_buffer.h",
"neteq/time_stretch.cc",
"neteq/time_stretch.h",
"neteq/timestamp_scaler.cc",
"neteq/timestamp_scaler.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":neteq_config",
]
deps = [
":audio_decoder_interface",
":cng",
":g711",
":pcm16b",
"../..:webrtc_common",
"../../common_audio",
"../../system_wrappers",
]
defines = []
if (rtc_include_opus) {
defines += [ "WEBRTC_CODEC_OPUS" ]
deps += [ ":webrtc_opus" ]
}
if (!build_with_mozilla) {
if (current_cpu == "arm") {
defines += [ "WEBRTC_CODEC_ISACFX" ]
deps += [ ":isac_fix" ]
} else {
defines += [ "WEBRTC_CODEC_ISAC" ]
deps += [ ":isac" ]
}
defines += [ "WEBRTC_CODEC_G722" ]
deps += [ ":g722" ]
}
if (!build_with_mozilla && !build_with_chromium) {
defines += [ "WEBRTC_CODEC_ILBC" ]
deps += [ ":ilbc" ]
}
}

View File

@ -0,0 +1,35 @@
noinst_LTLIBRARIES = libaudio_coding.la
libaudio_coding_la_SOURCES = codecs/isac/main/interface/isac.h \
codecs/isac/main/source/arith_routines.c \
codecs/isac/main/source/arith_routines.h \
codecs/isac/main/source/codec.h \
codecs/isac/main/source/encode_lpc_swb.c \
codecs/isac/main/source/encode_lpc_swb.h \
codecs/isac/main/source/entropy_coding.c \
codecs/isac/main/source/entropy_coding.h \
codecs/isac/main/source/lpc_analysis.c \
codecs/isac/main/source/lpc_analysis.h \
codecs/isac/main/source/lpc_gain_swb_tables.c \
codecs/isac/main/source/lpc_gain_swb_tables.h \
codecs/isac/main/source/lpc_shape_swb12_tables.c \
codecs/isac/main/source/lpc_shape_swb12_tables.h \
codecs/isac/main/source/lpc_shape_swb16_tables.c \
codecs/isac/main/source/lpc_shape_swb16_tables.h \
codecs/isac/main/source/lpc_tables.c \
codecs/isac/main/source/lpc_tables.h \
codecs/isac/main/source/os_specific_inline.h \
codecs/isac/main/source/pitch_estimator.c \
codecs/isac/main/source/pitch_estimator.h \
codecs/isac/main/source/pitch_gain_tables.c \
codecs/isac/main/source/pitch_gain_tables.h \
codecs/isac/main/source/pitch_lag_tables.c \
codecs/isac/main/source/pitch_lag_tables.h \
codecs/isac/main/source/settings.h \
codecs/isac/main/source/spectrum_ar_model_tables.c \
codecs/isac/main/source/spectrum_ar_model_tables.h \
codecs/isac/main/source/structs.h \
codecs/isac/bandwidth_info.h
libaudio_coding_la_CFLAGS = $(AM_CFLAGS) $(COMMON_CFLAGS)
libaudio_coding_la_CXXFLAGS = $(AM_CXXFLAGS) $(COMMON_CXXFLAGS)

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
#include "webrtc/typedefs.h"
typedef struct {
int in_use;
int32_t send_bw_avg;
int32_t send_max_delay_avg;
int16_t bottleneck_idx;
int16_t jitter_info;
} IsacBandwidthInfo;
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_

View File

@ -0,0 +1,724 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
#include <stddef.h>
#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "webrtc/typedefs.h"
typedef struct WebRtcISACStruct ISACStruct;
#if defined(__cplusplus)
extern "C" {
#endif
/******************************************************************************
* WebRtcIsac_AssignSize(...)
*
* This function returns the size of the ISAC instance, so that the instance
* can be created outside iSAC.
*
* Input:
* - samplingRate : sampling rate of the input/output audio.
*
* Output:
* - sizeinbytes : number of bytes needed to allocate for the
* instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_AssignSize(
int* sizeinbytes);
/******************************************************************************
* WebRtcIsac_Assign(...)
*
* This function assignes the memory already created to the ISAC instance.
*
* Input:
* - *ISAC_main_inst : a pointer to the coder instance.
* - samplingRate : sampling rate of the input/output audio.
* - ISAC_inst_Addr : the already allocated memory, where we put the
* iSAC structure.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_Assign(
ISACStruct** ISAC_main_inst,
void* ISAC_inst_Addr);
/******************************************************************************
* WebRtcIsac_Create(...)
*
* This function creates an ISAC instance, which will contain the state
* information for one coding/decoding channel.
*
* Input:
* - *ISAC_main_inst : a pointer to the coder instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_Create(
ISACStruct** ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_Free(...)
*
* This function frees the ISAC instance created at the beginning.
*
* Input:
* - ISAC_main_inst : an ISAC instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_Free(
ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_EncoderInit(...)
*
* This function initializes an ISAC instance prior to the encoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - CodingMode : 0 -> Bit rate and frame length are
* automatically adjusted to available bandwidth
* on transmission channel, just valid if codec
* is created to work in wideband mode.
* 1 -> User sets a frame length and a target bit
* rate which is taken as the maximum
* short-term average bit rate.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_EncoderInit(
ISACStruct* ISAC_main_inst,
int16_t CodingMode);
/******************************************************************************
* WebRtcIsac_Encode(...)
*
* This function encodes 10ms audio blocks and inserts it into a package.
* Input speech length has 160 samples if operating at 16 kHz sampling
* rate, or 320 if operating at 32 kHz sampling rate. The encoder buffers the
* input audio until the whole frame is buffered then proceeds with encoding.
*
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - speechIn : input speech vector.
*
* Output:
* - encoded : the encoded data vector
*
* Return value:
* : >0 - Length (in bytes) of coded data
* : 0 - The buffer didn't reach the chosen
* frame-size so it keeps buffering speech
* samples.
* : -1 - Error
*/
int WebRtcIsac_Encode(
ISACStruct* ISAC_main_inst,
const int16_t* speechIn,
uint8_t* encoded);
/******************************************************************************
* WebRtcIsac_DecoderInit(...)
*
* This function initializes an ISAC instance prior to the decoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
*/
void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_UpdateBwEstimate(...)
*
* This function updates the estimate of the bandwidth.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - packet_size : size of the packet.
* - rtp_seq_number : the RTP number of the packet.
* - send_ts : the RTP send timestamp, given in samples
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_UpdateBwEstimate(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
/******************************************************************************
* WebRtcIsac_Decode(...)
*
* This function decodes an ISAC frame. At 16 kHz sampling rate, the length
* of the output audio could be either 480 or 960 samples, equivalent to
* 30 or 60 ms respectively. At 32 kHz sampling rate, the length of the
* output audio is 960 samples, which is 30 ms.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - len : bytes in encoded vector.
*
* Output:
* - decoded : The decoded vector.
*
* Return value : >0 - number of samples in decoded vector.
* -1 - Error.
*/
int WebRtcIsac_Decode(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/******************************************************************************
* WebRtcIsac_DecodePlc(...)
*
* This function conducts PLC for ISAC frame(s). Output speech length
* will be a multiple of frames, i.e. multiples of 30 ms audio. Therefore,
* the output is multiple of 480 samples if operating at 16 kHz and multiple
* of 960 if operating at 32 kHz.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - noOfLostFrames : Number of PLC frames to produce.
*
* Output:
* - decoded : The decoded vector.
*
* Return value : Number of samples in decoded PLC vector
*/
size_t WebRtcIsac_DecodePlc(
ISACStruct* ISAC_main_inst,
int16_t* decoded,
size_t noOfLostFrames);
/******************************************************************************
* WebRtcIsac_Control(...)
*
* This function sets the limit on the short-term average bit-rate and the
* frame length. Should be used only in Instantaneous mode. At 16 kHz sampling
* rate, an average bit-rate between 10000 to 32000 bps is valid and a
* frame-size of 30 or 60 ms is acceptable. At 32 kHz, an average bit-rate
* between 10000 to 56000 is acceptable, and the valid frame-size is 30 ms.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rate : limit on the short-term average bit rate,
* in bits/second.
* - framesize : frame-size in millisecond.
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsac_Control(
ISACStruct* ISAC_main_inst,
int32_t rate,
int framesize);
void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
int bottleneck_bits_per_second);
/******************************************************************************
* WebRtcIsac_ControlBwe(...)
*
* This function sets the initial values of bottleneck and frame-size if
* iSAC is used in channel-adaptive mode. Therefore, this API is not
* applicable if the codec is created to operate in super-wideband mode.
*
* Through this API, users can enforce a frame-size for all values of
* bottleneck. Then iSAC will not automatically change the frame-size.
*
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rateBPS : initial value of bottleneck in bits/second
* 10000 <= rateBPS <= 56000 is accepted
* For default bottleneck set rateBPS = 0
* - frameSizeMs : number of milliseconds per frame (30 or 60)
* - enforceFrameSize : 1 to enforce the given frame-size through
* out the adaptation process, 0 to let iSAC
* change the frame-size if required.
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsac_ControlBwe(
ISACStruct* ISAC_main_inst,
int32_t rateBPS,
int frameSizeMs,
int16_t enforceFrameSize);
/******************************************************************************
* WebRtcIsac_ReadFrameLen(...)
*
* This function returns the length of the frame represented in the packet.
*
* Input:
* - encoded : Encoded bit-stream
*
* Output:
* - frameLength : Length of frame in packet (in samples)
*
*/
int16_t WebRtcIsac_ReadFrameLen(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
int16_t* frameLength);
/******************************************************************************
* WebRtcIsac_version(...)
*
* This function returns the version number.
*
* Output:
* - version : Pointer to character string
*
*/
void WebRtcIsac_version(
char *version);
/******************************************************************************
* WebRtcIsac_GetErrorCode(...)
*
* This function can be used to check the error code of an iSAC instance. When
* a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occurred in the
* specified instance.
*
* Input:
* - ISAC_main_inst : ISAC instance
*
* Return value : Error code
*/
int16_t WebRtcIsac_GetErrorCode(
ISACStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsac_GetUplinkBw(...)
*
* This function outputs the target bottleneck of the codec. In
* channel-adaptive mode, the target bottleneck is specified through in-band
* signalling retreived by bandwidth estimator.
* In channel-independent, also called instantaneous mode, the target
* bottleneck is provided to the encoder by calling xxx_control(...). If
* xxx_control is never called the default values is returned. The default
* value for bottleneck at 16 kHz encoder sampling rate is 32000 bits/sec,
* and it is 56000 bits/sec for 32 kHz sampling rate.
* Note that the output is the iSAC internal operating bottleneck which might
* differ slightly from the one provided through xxx_control().
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Output:
* - *bottleneck : bottleneck in bits/sec
*
* Return value : -1 if error happens
* 0 bit-rates computed correctly.
*/
int16_t WebRtcIsac_GetUplinkBw(
ISACStruct* ISAC_main_inst,
int32_t* bottleneck);
/******************************************************************************
* WebRtcIsac_SetMaxPayloadSize(...)
*
* This function sets a limit for the maximum payload size of iSAC. The same
* value is used both for 30 and 60 ms packets. If the encoder sampling rate
* is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
* encoder sampling rate is 32 kHz the maximum payload size is between 120
* and 600 bytes.
*
* If an out of range limit is used, the function returns -1, but the closest
* valid value will be applied.
*
* ---------------
* IMPORTANT NOTES
* ---------------
* The size of a packet is limited to the minimum of 'max-payload-size' and
* 'max-rate.' For instance, let's assume the max-payload-size is set to
* 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
* translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
* frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
* i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
* 170 bytes, i.e. min(170, 300).
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxPayloadBytes : maximum size of the payload in bytes
* valid values are between 120 and 400 bytes
* if encoder sampling rate is 16 kHz. For
* 32 kHz encoder sampling rate valid values
* are between 120 and 600 bytes.
*
* Return value : 0 if successful
* -1 if error happens
*/
int16_t WebRtcIsac_SetMaxPayloadSize(
ISACStruct* ISAC_main_inst,
int16_t maxPayloadBytes);
/******************************************************************************
* WebRtcIsac_SetMaxRate(...)
*
* This function sets the maximum rate which the codec may not exceed for
* any signal packet. The maximum rate is defined and payload-size per
* frame-size in bits per second.
*
* The codec has a maximum rate of 53400 bits per second (200 bytes per 30
* ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
* if the encoder sampling rate is 32 kHz.
*
* It is possible to set a maximum rate between 32000 and 53400 bits/sec
* in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
*
* If an out of range limit is used, the function returns -1, but the closest
* valid value will be applied.
*
* ---------------
* IMPORTANT NOTES
* ---------------
* The size of a packet is limited to the minimum of 'max-payload-size' and
* 'max-rate.' For instance, let's assume the max-payload-size is set to
* 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
* translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
* frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
* i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
* 170 bytes, min(170, 300).
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxRate : maximum rate in bits per second,
* valid values are 32000 to 53400 bits/sec in
* wideband mode, and 32000 to 160000 bits/sec in
* super-wideband mode.
*
* Return value : 0 if successful
* -1 if error happens
*/
int16_t WebRtcIsac_SetMaxRate(
ISACStruct* ISAC_main_inst,
int32_t maxRate);
/******************************************************************************
* WebRtcIsac_DecSampRate()
* Return the sampling rate of the decoded audio.
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Return value : sampling frequency in Hertz.
*
*/
uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_EncSampRate()
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Return value : sampling rate in Hertz.
*
*/
uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_SetDecSampRate()
* Set the sampling rate of the decoder. Initialization of the decoder WILL
* NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
* which is set when the instance is created.
*
* Input:
* - ISAC_main_inst : iSAC instance
* - sampRate : sampling rate in Hertz.
*
* Return value : 0 if successful
* -1 if failed.
*/
int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
uint16_t samp_rate_hz);
/******************************************************************************
* WebRtcIsac_SetEncSampRate()
* Set the sampling rate of the encoder. Initialization of the encoder WILL
* NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
* which is set when the instance is created. The encoding-mode and the
* bottleneck remain unchanged by this call, however, the maximum rate and
* maximum payload-size will reset to their default value.
*
* Input:
* - ISAC_main_inst : iSAC instance
* - sampRate : sampling rate in Hertz.
*
* Return value : 0 if successful
* -1 if failed.
*/
int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
uint16_t sample_rate_hz);
/******************************************************************************
* WebRtcIsac_GetNewBitStream(...)
*
* This function returns encoded data, with the recieved bwe-index in the
* stream. If the rate is set to a value less than bottleneck of codec
* the new bistream will be re-encoded with the given target rate.
* It should always return a complete packet, i.e. only called once
* even for 60 msec frames.
*
* NOTE 1! This function does not write in the ISACStruct, it is not allowed.
* NOTE 2! Currently not implemented for SWB mode.
* NOTE 3! Rates larger than the bottleneck of the codec will be limited
* to the current bottleneck.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - bweIndex : Index of bandwidth estimate to put in new
* bitstream
* - rate : target rate of the transcoder is bits/sec.
* Valid values are the accepted rate in iSAC,
* i.e. 10000 to 56000.
* - isRCU : if the new bit-stream is an RCU stream.
* Note that the rate parameter always indicates
* the target rate of the main payload, regardless
* of 'isRCU' value.
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error or called in SWB mode
* NOTE! No error code is written to
* the struct since it is only allowed to read
* the struct.
*/
int16_t WebRtcIsac_GetNewBitStream(
ISACStruct* ISAC_main_inst,
int16_t bweIndex,
int16_t jitterInfo,
int32_t rate,
uint8_t* encoded,
int16_t isRCU);
/****************************************************************************
* WebRtcIsac_GetDownLinkBwIndex(...)
*
* This function returns index representing the Bandwidth estimate from
* other side to this side.
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Output:
* - bweIndex : Bandwidth estimate to transmit to other side.
*
*/
int16_t WebRtcIsac_GetDownLinkBwIndex(
ISACStruct* ISAC_main_inst,
int16_t* bweIndex,
int16_t* jitterInfo);
/****************************************************************************
* WebRtcIsac_UpdateUplinkBw(...)
*
* This function takes an index representing the Bandwidth estimate from
* this side to other side and updates BWE.
*
* Input:
* - ISAC_main_inst : iSAC struct
* - bweIndex : Bandwidth estimate from other side.
*
*/
int16_t WebRtcIsac_UpdateUplinkBw(
ISACStruct* ISAC_main_inst,
int16_t bweIndex);
/****************************************************************************
* WebRtcIsac_ReadBwIndex(...)
*
* This function returns the index of the Bandwidth estimate from the bitstream.
*
* Input:
* - encoded : Encoded bitstream
*
* Output:
* - frameLength : Length of frame in packet (in samples)
* - bweIndex : Bandwidth estimate in bitstream
*
*/
int16_t WebRtcIsac_ReadBwIndex(
const uint8_t* encoded,
int16_t* bweIndex);
/*******************************************************************************
* WebRtcIsac_GetNewFrameLen(...)
*
* returns the frame lenght (in samples) of the next packet. In the case of channel-adaptive
* mode, iSAC decides on its frame lenght based on the estimated bottleneck
* this allows a user to prepare for the next packet (at the encoder)
*
* The primary usage is in CE to make the iSAC works in channel-adaptive mode
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Return Value : frame lenght in samples
*
*/
int16_t WebRtcIsac_GetNewFrameLen(
ISACStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsac_GetRedPayload(...)
*
* Populates "encoded" with the redundant payload of the recently encoded
* frame. This function has to be called once that WebRtcIsac_Encode(...)
* returns a positive value. Regardless of the frame-size this function will
* be called only once after encoding is completed.
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Output:
* - encoded : the encoded data vector
*
*
* Return value:
* : >0 - Length (in bytes) of coded data
* : -1 - Error
*
*
*/
int16_t WebRtcIsac_GetRedPayload(
ISACStruct* ISAC_main_inst,
uint8_t* encoded);
/****************************************************************************
* WebRtcIsac_DecodeRcu(...)
*
* This function decodes a redundant (RCU) iSAC frame. Function is called in
* NetEq with a stored RCU payload i case of packet loss. Output speech length
* will be a multiple of 480 samples: 480 or 960 samples,
* depending on the framesize (30 or 60 ms).
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC RCU frame(s)
* - len : bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
*
* Return value : >0 - number of samples in decoded vector
* -1 - Error
*/
int WebRtcIsac_DecodeRcu(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/* Fills in an IsacBandwidthInfo struct. |inst| should be a decoder. */
void WebRtcIsac_GetBandwidthInfo(ISACStruct* inst, IsacBandwidthInfo* bwinfo);
/* Uses the values from an IsacBandwidthInfo struct. |inst| should be an
encoder. */
void WebRtcIsac_SetBandwidthInfo(ISACStruct* inst,
const IsacBandwidthInfo* bwinfo);
/* If |inst| is a decoder but not an encoder: tell it what sample rate the
encoder is using, for bandwidth estimation purposes. */
void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
#if defined(__cplusplus)
}
#endif
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_ */

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "arith_routines.h"
#include "settings.h"
/*
* terminate and return byte stream;
* returns the number of bytes in the stream
*/
int WebRtcIsac_EncTerminate(Bitstr *streamdata) /* in-/output struct containing bitstream */
{
uint8_t *stream_ptr;
/* point to the right place in the stream buffer */
stream_ptr = streamdata->stream + streamdata->stream_index;
/* find minimum length (determined by current interval width) */
if ( streamdata->W_upper > 0x01FFFFFF )
{
streamdata->streamval += 0x01000000;
/* add carry to buffer */
if (streamdata->streamval < 0x01000000)
{
/* propagate carry */
while ( !(++(*--stream_ptr)) );
/* put pointer back to the old value */
stream_ptr = streamdata->stream + streamdata->stream_index;
}
/* write remaining data to bitstream */
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
}
else
{
streamdata->streamval += 0x00010000;
/* add carry to buffer */
if (streamdata->streamval < 0x00010000)
{
/* propagate carry */
while ( !(++(*--stream_ptr)) );
/* put pointer back to the old value */
stream_ptr = streamdata->stream + streamdata->stream_index;
}
/* write remaining data to bitstream */
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
*stream_ptr++ = (uint8_t) ((streamdata->streamval >> 16) & 0x00FF);
}
/* calculate stream length */
return (int)(stream_ptr - streamdata->stream);
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routines.h
*
* Functions for arithmetic coding.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
#include "structs.h"
int WebRtcIsac_EncLogisticMulti2(
Bitstr *streamdata, /* in-/output struct containing bitstream */
int16_t *dataQ7, /* input: data vector */
const uint16_t *env, /* input: side info vector defining the width of the pdf */
const int N, /* input: data vector length */
const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
/* returns the number of bytes in the stream */
int WebRtcIsac_EncTerminate(Bitstr *streamdata); /* in-/output struct containing bitstream */
/* returns the number of bytes in the stream so far */
int WebRtcIsac_DecLogisticMulti2(
int16_t *data, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t *env, /* input: side info vector defining the width of the pdf */
const int16_t *dither, /* input: dither vector */
const int N, /* input: data vector length */
const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
void WebRtcIsac_EncHistMulti(
Bitstr *streamdata, /* in-/output struct containing bitstream */
const int *data, /* input: data vector */
const uint16_t **cdf, /* input: array of cdf arrays */
const int N); /* input: data vector length */
int WebRtcIsac_DecHistBisectMulti(
int *data, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t **cdf, /* input: array of cdf arrays */
const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
const int N); /* input: data vector length */
int WebRtcIsac_DecHistOneStepMulti(
int *data, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t **cdf, /* input: array of cdf arrays */
const uint16_t *init_index,/* input: vector of initial cdf table search entries */
const int N); /* input: data vector length */
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_ */

View File

@ -0,0 +1,233 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* codec.h
*
* This header file contains the calls to the internal encoder
* and decoder functions.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
#include "structs.h"
void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str, Bitstr* streamdata,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts, uint32_t arr_ts,
enum IsacSamplingRate encoderSampRate,
enum IsacSamplingRate decoderSampRate);
int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
float* signal_out,
ISACLBDecStruct* ISACdec_obj,
int16_t* current_framesamples,
int16_t isRCUPayload);
int WebRtcIsac_DecodeRcuLb(float* signal_out, ISACLBDecStruct* ISACdec_obj,
int16_t* current_framesamples);
int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
float* in,
ISACLBEncStruct* ISACencLB_obj,
int16_t codingMode,
int16_t bottleneckIndex);
int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
Bitstr* ISACBitStr_obj, int BWnumber,
float scale);
int WebRtcIsac_EncodeStoredDataUb(
const ISACUBSaveEncDataStruct* ISACSavedEnc_obj, Bitstr* bitStream,
int32_t jitterInfo, float scale, enum ISACBandwidth bandwidth);
int16_t WebRtcIsac_GetRedPayloadUb(
const ISACUBSaveEncDataStruct* ISACSavedEncObj, Bitstr* bitStreamObj,
enum ISACBandwidth bandwidth);
/******************************************************************************
* WebRtcIsac_RateAllocation()
* Internal function to perform a rate-allocation for upper and lower-band,
* given a total rate.
*
* Input:
* - inRateBitPerSec : a total bit-rate in bits/sec.
*
* Output:
* - rateLBBitPerSec : a bit-rate allocated to the lower-band
* in bits/sec.
* - rateUBBitPerSec : a bit-rate allocated to the upper-band
* in bits/sec.
*
* Return value : 0 if rate allocation has been successful.
* -1 if failed to allocate rates.
*/
int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
double* rateLBBitPerSec,
double* rateUBBitPerSec,
enum ISACBandwidth* bandwidthKHz);
/******************************************************************************
* WebRtcIsac_DecodeUb16()
*
* Decode the upper-band if the codec is in 0-16 kHz mode.
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band decoder object. The
* bit-stream is stored inside the decoder object.
*
* Output:
* -signal_out : decoded audio, 480 samples 30 ms.
*
* Return value : >0 number of decoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
float* signal_out,
ISACUBDecStruct* ISACdec_obj,
int16_t isRCUPayload);
/******************************************************************************
* WebRtcIsac_DecodeUb12()
*
* Decode the upper-band if the codec is in 0-12 kHz mode.
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band decoder object. The
* bit-stream is stored inside the decoder object.
*
* Output:
* -signal_out : decoded audio, 480 samples 30 ms.
*
* Return value : >0 number of decoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
float* signal_out,
ISACUBDecStruct* ISACdec_obj,
int16_t isRCUPayload);
/******************************************************************************
* WebRtcIsac_EncodeUb16()
*
* Encode the upper-band if the codec is in 0-16 kHz mode.
*
* Input:
* -in : upper-band audio, 160 samples (10 ms).
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band encoder object. The
* bit-stream is stored inside the encoder object.
*
* Return value : >0 number of encoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
float* in,
ISACUBEncStruct* ISACenc_obj,
int32_t jitterInfo);
/******************************************************************************
* WebRtcIsac_EncodeUb12()
*
* Encode the upper-band if the codec is in 0-12 kHz mode.
*
* Input:
* -in : upper-band audio, 160 samples (10 ms).
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band encoder object. The
* bit-stream is stored inside the encoder object.
*
* Return value : >0 number of encoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
float* in,
ISACUBEncStruct* ISACenc_obj,
int32_t jitterInfo);
/************************** initialization functions *************************/
void WebRtcIsac_InitMasking(MaskFiltstr* maskdata);
void WebRtcIsac_InitPreFilterbank(PreFiltBankstr* prefiltdata);
void WebRtcIsac_InitPostFilterbank(PostFiltBankstr* postfiltdata);
void WebRtcIsac_InitPitchFilter(PitchFiltstr* pitchfiltdata);
void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct* State);
/**************************** transform functions ****************************/
void WebRtcIsac_InitTransform(TransformTables* tables);
void WebRtcIsac_Time2Spec(const TransformTables* tables,
double* inre1,
double* inre2,
int16_t* outre,
int16_t* outim,
FFTstr* fftstr_obj);
void WebRtcIsac_Spec2time(const TransformTables* tables,
double* inre,
double* inim,
double* outre1,
double* outre2,
FFTstr* fftstr_obj);
/******************************* filter functions ****************************/
void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, size_t lengthInOut,
int orderCoef);
void WebRtcIsac_AllZeroFilter(double* In, double* Coef, size_t lengthInOut,
int orderCoef, double* Out);
void WebRtcIsac_ZeroPoleFilter(double* In, double* ZeroCoef, double* PoleCoef,
size_t lengthInOut, int orderCoef, double* Out);
/***************************** filterbank functions **************************/
void WebRtcIsac_SplitAndFilterFloat(float* in, float* LP, float* HP,
double* LP_la, double* HP_la,
PreFiltBankstr* prefiltdata);
void WebRtcIsac_FilterAndCombineFloat(float* InLP, float* InHP, float* Out,
PostFiltBankstr* postfiltdata);
/************************* normalized lattice filters ************************/
void WebRtcIsac_NormLatticeFilterMa(int orderCoef, float* stateF, float* stateG,
float* lat_in, double* filtcoeflo,
double* lat_out);
void WebRtcIsac_NormLatticeFilterAr(int orderCoef, float* stateF, float* stateG,
double* lat_in, double* lo_filt_coef,
float* lat_out);
void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */

View File

@ -0,0 +1,708 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* code_LPC_UB.c
*
* This file contains definition of functions used to
* encode LPC parameters (Shape & gain) of the upper band.
*
*/
#include "encode_lpc_swb.h"
#include <math.h>
#include <stdio.h>
#include <string.h>
#include "lpc_gain_swb_tables.h"
#include "lpc_shape_swb12_tables.h"
#include "lpc_shape_swb16_tables.h"
#include "settings.h"
#include "webrtc/typedefs.h"
/******************************************************************************
* WebRtcIsac_RemoveLarMean()
*
* Remove the means from LAR coefficients.
*
* Input:
* -lar : pointer to lar vectors. LAR vectors are
* concatenated.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -lar : pointer to mean-removed LAR:s.
*
*
*/
int16_t
WebRtcIsac_RemoveLarMean(
double* lar,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t vecCntr;
int16_t numVec;
const double* meanLAR;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb12;
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb16;
break;
}
default:
return -1;
}
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
// REMOVE MEAN
*lar++ -= meanLAR[coeffCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateIntraVec()
*
* Remove the correlation amonge the components of LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from left.
*
* Input:
* -inLar : pointer to mean-removed LAR vecrtors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t
WebRtcIsac_DecorrelateIntraVec(
const double* data,
double* out,
int16_t bandwidth)
{
const double* ptrData;
const double* ptrRow;
int16_t rowCntr;
int16_t colCntr;
int16_t larVecCntr;
int16_t numVec;
const double* decorrMat;
switch(bandwidth)
{
case isac12kHz:
{
decorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
numVec = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
decorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
numVec = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// decorrMat * data
//
// data is assumed to contain 'numVec' of LAR
// vectors (mean removed) each of dimension 'UB_LPC_ORDER'
// concatenated one after the other.
//
ptrData = data;
for(larVecCntr = 0; larVecCntr < numVec; larVecCntr++)
{
for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
{
ptrRow = &decorrMat[rowCntr * UB_LPC_ORDER];
*out = 0;
for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
{
*out += ptrData[colCntr] * ptrRow[colCntr];
}
out++;
}
ptrData += UB_LPC_ORDER;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateInterVec()
*
* Remover the correlation among mean-removed LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from right.
*
* Input:
* -data : pointer to matrix of LAR vectors. The matrix
* is stored column-wise.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t
WebRtcIsac_DecorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t rowCntr;
int16_t colCntr;
const double* decorrMat;
int16_t interVecDim;
switch(bandwidth)
{
case isac12kHz:
{
decorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
decorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// data * decorrMat
//
// data is of size 'interVecDim' * 'UB_LPC_ORDER'
// That is 'interVecDim' of LAR vectors (mean removed)
// in columns each of dimension 'UB_LPC_ORDER'.
// matrix is stored column-wise.
//
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
for(colCntr = 0; colCntr < interVecDim; colCntr++)
{
out[coeffCntr + colCntr * UB_LPC_ORDER] = 0;
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
out[coeffCntr + colCntr * UB_LPC_ORDER] +=
data[coeffCntr + rowCntr * UB_LPC_ORDER] *
decorrMat[rowCntr * interVecDim + colCntr];
}
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_QuantizeUncorrLar()
*
* Quantize the uncorrelated parameters.
*
* Input:
* -data : uncorrelated LAR vectors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : quantized version of the input.
* -idx : pointer to quantization indices.
*/
double
WebRtcIsac_QuantizeUncorrLar(
double* data,
int* recIdx,
int16_t bandwidth)
{
int16_t cntr;
int32_t idx;
int16_t interVecDim;
const double* leftRecPoint;
double quantizationStepSize;
const int16_t* numQuantCell;
switch(bandwidth)
{
case isac12kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb12;
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb16;
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// Quantize the parametrs.
//
for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
{
idx = (int32_t)floor((*data - leftRecPoint[cntr]) /
quantizationStepSize + 0.5);
if(idx < 0)
{
idx = 0;
}
else if(idx >= numQuantCell[cntr])
{
idx = numQuantCell[cntr] - 1;
}
*data++ = leftRecPoint[cntr] + idx * quantizationStepSize;
*recIdx++ = idx;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DequantizeLpcParam()
*
* Get the quantized value of uncorrelated LARs given the quantization indices.
*
* Input:
* -idx : pointer to quantiztion indices.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : pointer to quantized values.
*/
int16_t
WebRtcIsac_DequantizeLpcParam(
const int* idx,
double* out,
int16_t bandwidth)
{
int16_t cntr;
int16_t interVecDim;
const double* leftRecPoint;
double quantizationStepSize;
switch(bandwidth)
{
case isac12kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// Dequantize given the quantization indices
//
for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
{
*out++ = leftRecPoint[cntr] + *idx++ * quantizationStepSize;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateIntraVec()
*
* This is the inverse of WebRtcIsac_DecorrelateIntraVec().
*
* Input:
* -data : uncorrelated parameters.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t
WebRtcIsac_CorrelateIntraVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t vecCntr;
int16_t rowCntr;
int16_t colCntr;
int16_t numVec;
const double* ptrData;
const double* intraVecDecorrMat;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
intraVecDecorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
intraVecDecorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
break;
}
default:
return -1;
}
ptrData = data;
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
{
*out = 0;
for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
{
*out += ptrData[rowCntr] *
intraVecDecorrMat[rowCntr * UB_LPC_ORDER + colCntr];
}
out++;
}
ptrData += UB_LPC_ORDER;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateInterVec()
*
* This is the inverse of WebRtcIsac_DecorrelateInterVec().
*
* Input:
* -data
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t
WebRtcIsac_CorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t rowCntr;
int16_t colCntr;
int16_t interVecDim;
double myVec[UB16_LPC_VEC_PER_FRAME];
const double* interVecDecorrMat;
switch(bandwidth)
{
case isac12kHz:
{
interVecDim = UB_LPC_VEC_PER_FRAME;
interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
break;
}
case isac16kHz:
{
interVecDim = UB16_LPC_VEC_PER_FRAME;
interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
break;
}
default:
return -1;
}
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
myVec[rowCntr] = 0;
for(colCntr = 0; colCntr < interVecDim; colCntr++)
{
myVec[rowCntr] += data[coeffCntr + colCntr * UB_LPC_ORDER] * //*ptrData *
interVecDecorrMat[rowCntr * interVecDim + colCntr];
//ptrData += UB_LPC_ORDER;
}
}
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
out[coeffCntr + rowCntr * UB_LPC_ORDER] = myVec[rowCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_AddLarMean()
*
* This is the inverse of WebRtcIsac_RemoveLarMean()
*
* Input:
* -data : pointer to mean-removed LAR:s.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : pointer to LARs.
*/
int16_t
WebRtcIsac_AddLarMean(
double* data,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t vecCntr;
int16_t numVec;
const double* meanLAR;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb12;
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb16;
break;
}
default:
return -1;
}
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
*data++ += meanLAR[coeffCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_ToLogDomainRemoveMean()
*
* Transform the LPC gain to log domain then remove the mean value.
*
* Input:
* -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
*
* Output:
* -lpcGain : mean-removed in log domain.
*/
int16_t
WebRtcIsac_ToLogDomainRemoveMean(
double* data)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
data[coeffCntr] = log(data[coeffCntr]) - WebRtcIsac_kMeanLpcGain;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateLPGain()
*
* Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
* multiplying gain vector with decorrelating matrix.
*
* Input:
* -data : LPC gain in log-domain with mean removed.
*
* Output:
* -out : decorrelated parameters.
*/
int16_t WebRtcIsac_DecorrelateLPGain(
const double* data,
double* out)
{
int16_t rowCntr;
int16_t colCntr;
for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
{
*out = 0;
for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
{
*out += data[rowCntr] * WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr];
}
out++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_QuantizeLpcGain()
*
* Quantize the decorrelated log-domain gains.
*
* Input:
* -lpcGain : uncorrelated LPC gains.
*
* Output:
* -idx : quantization indices
* -lpcGain : quantized value of the inpt.
*/
double WebRtcIsac_QuantizeLpcGain(
double* data,
int* idx)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
*idx = (int)floor((*data - WebRtcIsac_kLeftRecPointLpcGain[coeffCntr]) /
WebRtcIsac_kQSizeLpcGain + 0.5);
if(*idx < 0)
{
*idx = 0;
}
else if(*idx >= WebRtcIsac_kNumQCellLpcGain[coeffCntr])
{
*idx = WebRtcIsac_kNumQCellLpcGain[coeffCntr] - 1;
}
*data = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
WebRtcIsac_kQSizeLpcGain;
data++;
idx++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DequantizeLpcGain()
*
* Get the quantized values given the quantization indices.
*
* Input:
* -idx : pointer to quantization indices.
*
* Output:
* -lpcGains : quantized values of the given parametes.
*/
int16_t WebRtcIsac_DequantizeLpcGain(
const int* idx,
double* out)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
*out = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
WebRtcIsac_kQSizeLpcGain;
out++;
idx++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateLpcGain()
*
* This is the inverse of WebRtcIsac_DecorrelateLPGain().
*
* Input:
* -data : decorrelated parameters.
*
* Output:
* -out : correlated parameters.
*/
int16_t WebRtcIsac_CorrelateLpcGain(
const double* data,
double* out)
{
int16_t rowCntr;
int16_t colCntr;
for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
{
*out = 0;
for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
{
*out += WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr] * data[colCntr];
}
out++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_AddMeanToLinearDomain()
*
* This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
*
* Input:
* -lpcGain : LPC gain in log-domain & mean removed
*
* Output:
* -lpcGain : LPC gain in normal domain.
*/
int16_t WebRtcIsac_AddMeanToLinearDomain(
double* lpcGains)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
lpcGains[coeffCntr] = exp(lpcGains[coeffCntr] + WebRtcIsac_kMeanLpcGain);
}
return 0;
}

View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* encode_lpc_swb.h
*
* This file contains declaration of functions used to
* encode LPC parameters (Shape & gain) of the upper band.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
#include "settings.h"
#include "structs.h"
#include "webrtc/typedefs.h"
/******************************************************************************
* WebRtcIsac_RemoveLarMean()
*
* Remove the means from LAR coefficients.
*
* Input:
* -lar : pointer to lar vectors. LAR vectors are
* concatenated.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -lar : pointer to mean-removed LAR:s.
*
*
*/
int16_t WebRtcIsac_RemoveLarMean(
double* lar,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DecorrelateIntraVec()
*
* Remove the correlation amonge the components of LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from left.
*
* Input:
* -inLar : pointer to mean-removed LAR vecrtors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t WebRtcIsac_DecorrelateIntraVec(
const double* inLAR,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DecorrelateInterVec()
*
* Remover the correlation among mean-removed LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from right.
*
* Input:
* -data : pointer to matrix of LAR vectors. The matrix
* is stored column-wise.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t WebRtcIsac_DecorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_QuantizeUncorrLar()
*
* Quantize the uncorrelated parameters.
*
* Input:
* -data : uncorrelated LAR vectors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : quantized version of the input.
* -idx : pointer to quantization indices.
*/
double WebRtcIsac_QuantizeUncorrLar(
double* data,
int* idx,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_CorrelateIntraVec()
*
* This is the inverse of WebRtcIsac_DecorrelateIntraVec().
*
* Input:
* -data : uncorrelated parameters.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t WebRtcIsac_CorrelateIntraVec(
const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_CorrelateInterVec()
*
* This is the inverse of WebRtcIsac_DecorrelateInterVec().
*
* Input:
* -data
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t WebRtcIsac_CorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_AddLarMean()
*
* This is the inverse of WebRtcIsac_RemoveLarMean()
*
* Input:
* -data : pointer to mean-removed LAR:s.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : pointer to LARs.
*/
int16_t WebRtcIsac_AddLarMean(
double* data,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DequantizeLpcParam()
*
* Get the quantized value of uncorrelated LARs given the quantization indices.
*
* Input:
* -idx : pointer to quantiztion indices.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : pointer to quantized values.
*/
int16_t WebRtcIsac_DequantizeLpcParam(
const int* idx,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_ToLogDomainRemoveMean()
*
* Transform the LPC gain to log domain then remove the mean value.
*
* Input:
* -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
*
* Output:
* -lpcGain : mean-removed in log domain.
*/
int16_t WebRtcIsac_ToLogDomainRemoveMean(
double* lpGains);
/******************************************************************************
* WebRtcIsac_DecorrelateLPGain()
*
* Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
* multiplying gain vector with decorrelating matrix.
*
* Input:
* -data : LPC gain in log-domain with mean removed.
*
* Output:
* -out : decorrelated parameters.
*/
int16_t WebRtcIsac_DecorrelateLPGain(
const double* data,
double* out);
/******************************************************************************
* WebRtcIsac_QuantizeLpcGain()
*
* Quantize the decorrelated log-domain gains.
*
* Input:
* -lpcGain : uncorrelated LPC gains.
*
* Output:
* -idx : quantization indices
* -lpcGain : quantized value of the inpt.
*/
double WebRtcIsac_QuantizeLpcGain(
double* lpGains,
int* idx);
/******************************************************************************
* WebRtcIsac_DequantizeLpcGain()
*
* Get the quantized values given the quantization indices.
*
* Input:
* -idx : pointer to quantization indices.
*
* Output:
* -lpcGains : quantized values of the given parametes.
*/
int16_t WebRtcIsac_DequantizeLpcGain(
const int* idx,
double* lpGains);
/******************************************************************************
* WebRtcIsac_CorrelateLpcGain()
*
* This is the inverse of WebRtcIsac_DecorrelateLPGain().
*
* Input:
* -data : decorrelated parameters.
*
* Output:
* -out : correlated parameters.
*/
int16_t WebRtcIsac_CorrelateLpcGain(
const double* data,
double* out);
/******************************************************************************
* WebRtcIsac_AddMeanToLinearDomain()
*
* This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
*
* Input:
* -lpcGain : LPC gain in log-domain & mean removed
*
* Output:
* -lpcGain : LPC gain in normal domain.
*/
int16_t WebRtcIsac_AddMeanToLinearDomain(
double* lpcGains);
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,343 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* entropy_coding.h
*
* This header file declares all of the functions used to arithmetically
* encode the iSAC bistream
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
#include "settings.h"
#include "structs.h"
/******************************************************************************
* WebRtcIsac_DecodeSpec()
* Decode real and imaginary part of the DFT coefficients, given a bit-stream.
* The decoded DFT coefficient can be transformed to time domain by
* WebRtcIsac_Time2Spec().
*
* Input:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
* - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
* relevant for 0-4 kHz band, and the input value is
* not used in other bands.
* - band : specifies which band's DFT should be decoded.
*
* Output:
* - *fr : pointer to a buffer where the real part of DFT
* coefficients are written to.
* - *fi : pointer to a buffer where the imaginary part
* of DFT coefficients are written to.
*
* Return value : < 0 if an error occures
* 0 if succeeded.
*/
int WebRtcIsac_DecodeSpec(Bitstr* streamdata, int16_t AvgPitchGain_Q12,
enum ISACBand band, double* fr, double* fi);
/******************************************************************************
* WebRtcIsac_EncodeSpec()
* Encode real and imaginary part of the DFT coefficients into the given
* bit-stream.
*
* Input:
* - *fr : pointer to a buffer where the real part of DFT
* coefficients are written to.
* - *fi : pointer to a buffer where the imaginary part
* of DFT coefficients are written to.
* - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
* relevant for 0-4 kHz band, and the input value is
* not used in other bands.
* - band : specifies which band's DFT should be decoded.
*
* Output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Return value : < 0 if an error occures
* 0 if succeeded.
*/
int WebRtcIsac_EncodeSpec(const int16_t* fr, const int16_t* fi,
int16_t AvgPitchGain_Q12, enum ISACBand band,
Bitstr* streamdata);
/* decode & dequantize LPC Coef */
int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef);
int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata, double* lpcVecs,
double* percepFilterGains,
int16_t bandwidth);
int WebRtcIsac_DecodeLpc(Bitstr* streamdata, double* LPCCoef_lo,
double* LPCCoef_hi);
/* quantize & code LPC Coef */
void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo, double* LPCCoef_hi,
Bitstr* streamdata, IsacSaveEncoderData* encData);
void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo, double* LPCCoef_hi,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
/******************************************************************************
* WebRtcIsac_EncodeLpcUB()
* Encode LPC parameters, given as A-polynomial, of upper-band. The encoding
* is performed in LAR domain.
* For the upper-band, we compute and encode LPC of some sub-frames, LPC of
* other sub-frames are computed by linear interpolation, in LAR domain. This
* function performs the interpolation and returns the LPC of all sub-frames.
*
* Inputs:
* - lpcCoef : a buffer containing A-polynomials of sub-frames
* (excluding first coefficient that is 1).
* - bandwidth : specifies if the codec is operating at 0-12 kHz
* or 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a structure containing the encoded
* data and the parameters needed for entropy
* coding.
*
* Output:
* - interpolLPCCoeff : Decoded and interpolated LPC (A-polynomial)
* of all sub-frames.
* If LP analysis is of order K, and there are N
* sub-frames then this is a buffer of size
* (k + 1) * N, each vector starts with the LPC gain
* of the corresponding sub-frame. The LPC gains
* are encoded and inserted after this function is
* called. The first A-coefficient which is 1 is not
* included.
*
* Return value : 0 if encoding is successful,
* <0 if failed to encode.
*/
int16_t WebRtcIsac_EncodeLpcUB(double* lpcCoeff, Bitstr* streamdata,
double* interpolLPCCoeff,
int16_t bandwidth,
ISACUBSaveEncDataStruct* encData);
/******************************************************************************
* WebRtcIsac_DecodeInterpolLpcUb()
* Decode LPC coefficients and interpolate to get the coefficients fo all
* sub-frmaes.
*
* Inputs:
* - bandwidth : spepecifies if the codec is in 0-12 kHz or
* 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - percepFilterParam : Decoded and interpolated LPC (A-polynomial) of
* all sub-frames.
* If LP analysis is of order K, and there are N
* sub-frames then this is a buffer of size
* (k + 1) * N, each vector starts with the LPC gain
* of the corresponding sub-frame. The LPC gains
* are encoded and inserted after this function is
* called. The first A-coefficient which is 1 is not
* included.
*
* Return value : 0 if encoding is successful,
* <0 if failed to encode.
*/
int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
double* percepFilterParam,
int16_t bandwidth);
/* Decode & dequantize RC */
int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15);
/* Quantize & code RC */
void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata);
/* Decode & dequantize squared Gain */
int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* Gain2);
/* Quantize & code squared Gain (input is squared gain) */
int WebRtcIsac_EncodeGain2(int32_t* gain2, Bitstr* streamdata);
void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
void WebRtcIsac_EncodePitchLag(double* PitchLags,
int16_t* PitchGain_Q12,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
int WebRtcIsac_DecodePitchGain(Bitstr* streamdata,
int16_t* PitchGain_Q12);
int WebRtcIsac_DecodePitchLag(Bitstr* streamdata, int16_t* PitchGain_Q12,
double* PitchLag);
int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framelength);
int WebRtcIsac_EncodeFrameLen(int16_t framelength, Bitstr* streamdata);
int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno);
void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata);
/* Step-down */
void WebRtcIsac_Poly2Rc(double* a, int N, double* RC);
/* Step-up */
void WebRtcIsac_Rc2Poly(double* RC, int N, double* a);
void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo, double* LPCCoef_hi,
int* index_g);
/******************************************************************************
* WebRtcIsac_EncodeLpcGainUb()
* Encode LPC gains of sub-Frames.
*
* Input/outputs:
* - lpGains : a buffer which contains 'SUBFRAME' number of
* LP gains to be encoded. The input values are
* overwritten by the quantized values.
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - lpcGainIndex : quantization indices for lpc gains, these will
* be stored to be used for FEC.
*/
void WebRtcIsac_EncodeLpcGainUb(double* lpGains, Bitstr* streamdata,
int* lpcGainIndex);
/******************************************************************************
* WebRtcIsac_EncodeLpcGainUb()
* Store LPC gains of sub-Frames in 'streamdata'.
*
* Input:
* - lpGains : a buffer which contains 'SUBFRAME' number of
* LP gains to be encoded.
* Input/outputs:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
*/
void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata);
/******************************************************************************
* WebRtcIsac_DecodeLpcGainUb()
* Decode the LPC gain of sub-frames.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - lpGains : a buffer where decoded LPC gians will be stored.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata);
/******************************************************************************
* WebRtcIsac_EncodeBandwidth()
* Encode if the bandwidth of encoded audio is 0-12 kHz or 0-16 kHz.
*
* Input:
* - bandwidth : an enumerator specifying if the codec in is
* 0-12 kHz or 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
Bitstr* streamData);
/******************************************************************************
* WebRtcIsac_DecodeBandwidth()
* Decode the bandwidth of the encoded audio, i.e. if the bandwidth is 0-12 kHz
* or 0-16 kHz.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - bandwidth : an enumerator specifying if the codec is in
* 0-12 kHz or 0-16 kHz mode.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
enum ISACBandwidth* bandwidth);
/******************************************************************************
* WebRtcIsac_EncodeJitterInfo()
* Decode the jitter information.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Input:
* - jitterInfo : one bit of info specifying if the channel is
* in high/low jitter. Zero indicates low jitter
* and one indicates high jitter.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex,
Bitstr* streamData);
/******************************************************************************
* WebRtcIsac_DecodeJitterInfo()
* Decode the jitter information.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - jitterInfo : one bit of info specifying if the channel is
* in high/low jitter. Zero indicates low jitter
* and one indicates high jitter.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData,
int32_t* jitterInfo);
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_ */

View File

@ -0,0 +1,535 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "lpc_analysis.h"
#include "settings.h"
#include "codec.h"
#include "entropy_coding.h"
#include <math.h>
#include <string.h>
#define LEVINSON_EPS 1.0e-10
/* window */
/* Matlab generation code:
* t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
* for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
*/
static const double kLpcCorrWindow[WINLEN] = {
0.00000000, 0.00000001, 0.00000004, 0.00000010, 0.00000020,
0.00000035, 0.00000055, 0.00000083, 0.00000118, 0.00000163,
0.00000218, 0.00000283, 0.00000361, 0.00000453, 0.00000558, 0.00000679,
0.00000817, 0.00000973, 0.00001147, 0.00001342, 0.00001558,
0.00001796, 0.00002058, 0.00002344, 0.00002657, 0.00002997,
0.00003365, 0.00003762, 0.00004190, 0.00004651, 0.00005144, 0.00005673,
0.00006236, 0.00006837, 0.00007476, 0.00008155, 0.00008875,
0.00009636, 0.00010441, 0.00011290, 0.00012186, 0.00013128,
0.00014119, 0.00015160, 0.00016252, 0.00017396, 0.00018594, 0.00019846,
0.00021155, 0.00022521, 0.00023946, 0.00025432, 0.00026978,
0.00028587, 0.00030260, 0.00031998, 0.00033802, 0.00035674,
0.00037615, 0.00039626, 0.00041708, 0.00043863, 0.00046092, 0.00048396,
0.00050775, 0.00053233, 0.00055768, 0.00058384, 0.00061080,
0.00063858, 0.00066720, 0.00069665, 0.00072696, 0.00075813,
0.00079017, 0.00082310, 0.00085692, 0.00089164, 0.00092728, 0.00096384,
0.00100133, 0.00103976, 0.00107914, 0.00111947, 0.00116077,
0.00120304, 0.00124630, 0.00129053, 0.00133577, 0.00138200,
0.00142924, 0.00147749, 0.00152676, 0.00157705, 0.00162836, 0.00168070,
0.00173408, 0.00178850, 0.00184395, 0.00190045, 0.00195799,
0.00201658, 0.00207621, 0.00213688, 0.00219860, 0.00226137,
0.00232518, 0.00239003, 0.00245591, 0.00252284, 0.00259079, 0.00265977,
0.00272977, 0.00280078, 0.00287280, 0.00294582, 0.00301984,
0.00309484, 0.00317081, 0.00324774, 0.00332563, 0.00340446,
0.00348421, 0.00356488, 0.00364644, 0.00372889, 0.00381220, 0.00389636,
0.00398135, 0.00406715, 0.00415374, 0.00424109, 0.00432920,
0.00441802, 0.00450754, 0.00459773, 0.00468857, 0.00478001,
0.00487205, 0.00496464, 0.00505775, 0.00515136, 0.00524542, 0.00533990,
0.00543476, 0.00552997, 0.00562548, 0.00572125, 0.00581725,
0.00591342, 0.00600973, 0.00610612, 0.00620254, 0.00629895,
0.00639530, 0.00649153, 0.00658758, 0.00668341, 0.00677894, 0.00687413,
0.00696891, 0.00706322, 0.00715699, 0.00725016, 0.00734266,
0.00743441, 0.00752535, 0.00761540, 0.00770449, 0.00779254,
0.00787947, 0.00796519, 0.00804963, 0.00813270, 0.00821431, 0.00829437,
0.00837280, 0.00844949, 0.00852436, 0.00859730, 0.00866822,
0.00873701, 0.00880358, 0.00886781, 0.00892960, 0.00898884,
0.00904542, 0.00909923, 0.00915014, 0.00919805, 0.00924283, 0.00928436,
0.00932252, 0.00935718, 0.00938821, 0.00941550, 0.00943890,
0.00945828, 0.00947351, 0.00948446, 0.00949098, 0.00949294,
0.00949020, 0.00948262, 0.00947005, 0.00945235, 0.00942938, 0.00940099,
0.00936704, 0.00932738, 0.00928186, 0.00923034, 0.00917268,
0.00910872, 0.00903832, 0.00896134, 0.00887763, 0.00878706,
0.00868949, 0.00858478, 0.00847280, 0.00835343, 0.00822653, 0.00809199,
0.00794970, 0.00779956, 0.00764145, 0.00747530, 0.00730103,
0.00711857, 0.00692787, 0.00672888, 0.00652158, 0.00630597,
0.00608208, 0.00584994, 0.00560962, 0.00536124, 0.00510493, 0.00484089,
0.00456935, 0.00429062, 0.00400505, 0.00371310, 0.00341532,
0.00311238, 0.00280511, 0.00249452, 0.00218184, 0.00186864,
0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
};
double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order)
{
double sum, alpha;
size_t m, m_h, i;
alpha = 0; //warning -DH
a[0] = 1.0;
if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
for (i = 0; i < order; i++) {
k[i] = 0;
a[i+1] = 0;
}
} else {
a[1] = k[0] = -r[1]/r[0];
alpha = r[0] + r[1] * k[0];
for (m = 1; m < order; m++){
sum = r[m + 1];
for (i = 0; i < m; i++){
sum += a[i+1] * r[m - i];
}
k[m] = -sum / alpha;
alpha += k[m] * sum;
m_h = (m + 1) >> 1;
for (i = 0; i < m_h; i++){
sum = a[i+1] + k[m] * a[m - i];
a[m - i] += k[m] * a[i+1];
a[i+1] = sum;
}
a[m+1] = k[m];
}
}
return alpha;
}
//was static before, but didn't work with MEX file
void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
double *oldEnergy, double *varscale)
{
double nrg[4], chng, pg;
int k;
double pitchGains[4]={0,0,0,0};;
/* Calculate energies of first and second frame halfs */
nrg[0] = 0.0001;
for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES_QUARTER + QLOOKAHEAD) / 2; k++) {
nrg[0] += input[k]*input[k];
}
nrg[1] = 0.0001;
for ( ; k < (FRAMESAMPLES_HALF + QLOOKAHEAD) / 2; k++) {
nrg[1] += input[k]*input[k];
}
nrg[2] = 0.0001;
for ( ; k < (FRAMESAMPLES*3/4 + QLOOKAHEAD) / 2; k++) {
nrg[2] += input[k]*input[k];
}
nrg[3] = 0.0001;
for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
nrg[3] += input[k]*input[k];
}
/* Calculate average level change */
chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
fabs(10.0 * log10(nrg[2] / nrg[1])) +
fabs(10.0 * log10(nrg[1] / nrg[0])) +
fabs(10.0 * log10(nrg[0] / *oldEnergy)));
/* Find average pitch gain */
pg = 0.0;
for (k=0; k<4; k++)
{
pitchGains[k] = ((float)pitchGains_Q12[k])/4096;
pg += pitchGains[k];
}
pg *= 0.25;
/* If pitch gain is low and energy constant - increase noise level*/
/* Matlab code:
pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
*/
*varscale = 0.0 + 1.0 * exp( -1.4 * exp(-200.0 * pg*pg*pg) / (1.0 + 0.4 * chng) );
*oldEnergy = nrg[3];
}
void
WebRtcIsac_GetVarsUB(
const double* input,
double* oldEnergy,
double* varscale)
{
double nrg[4], chng;
int k;
/* Calculate energies of first and second frame halfs */
nrg[0] = 0.0001;
for (k = 0; k < (FRAMESAMPLES_QUARTER) / 2; k++) {
nrg[0] += input[k]*input[k];
}
nrg[1] = 0.0001;
for ( ; k < (FRAMESAMPLES_HALF) / 2; k++) {
nrg[1] += input[k]*input[k];
}
nrg[2] = 0.0001;
for ( ; k < (FRAMESAMPLES*3/4) / 2; k++) {
nrg[2] += input[k]*input[k];
}
nrg[3] = 0.0001;
for ( ; k < (FRAMESAMPLES) / 2; k++) {
nrg[3] += input[k]*input[k];
}
/* Calculate average level change */
chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
fabs(10.0 * log10(nrg[2] / nrg[1])) +
fabs(10.0 * log10(nrg[1] / nrg[0])) +
fabs(10.0 * log10(nrg[0] / *oldEnergy)));
/* If pitch gain is low and energy constant - increase noise level*/
/* Matlab code:
pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
*/
*varscale = exp( -1.4 / (1.0 + 0.4 * chng) );
*oldEnergy = nrg[3];
}
void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
double signal_noise_ratio, const int16_t *pitchGains_Q12,
double *lo_coeff, double *hi_coeff)
{
int k, n, j, pos1, pos2;
double varscale;
double DataLo[WINLEN], DataHi[WINLEN];
double corrlo[ORDERLO+2], corrlo2[ORDERLO+1];
double corrhi[ORDERHI+1];
double k_veclo[ORDERLO], k_vechi[ORDERHI];
double a_LO[ORDERLO+1], a_HI[ORDERHI+1];
double tmp, res_nrg;
double FwdA, FwdB;
/* hearing threshold level in dB; higher value gives more noise */
const double HearThresOffset = -28.0;
/* bandwdith expansion factors for low- and high band */
const double gammaLo = 0.9;
const double gammaHi = 0.8;
/* less-noise-at-low-frequencies factor */
double aa;
/* convert from dB to signal level */
const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46; /* divide by sqrt(12) */
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsac_GetVars(inLo, pitchGains_Q12, &(maskdata->OldEnergy), &varscale);
/* less-noise-at-low-frequencies factor */
aa = 0.35 * (0.5 + 0.5 * varscale);
/* replace data in buffer by new look-ahead data */
for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++)
maskdata->DataBufferLo[pos1 + WINLEN - QLOOKAHEAD] = inLo[pos1];
for (k = 0; k < SUBFRAMES; k++) {
/* Update input buffer and multiply signal with window */
for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 + UPDATE/2];
maskdata->DataBufferHi[pos1] = maskdata->DataBufferHi[pos1 + UPDATE/2];
DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
}
pos2 = k * UPDATE/2;
for (n = 0; n < UPDATE/2; n++, pos1++) {
maskdata->DataBufferLo[pos1] = inLo[QLOOKAHEAD + pos2];
maskdata->DataBufferHi[pos1] = inHi[pos2++];
DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
}
/* Get correlation coefficients */
WebRtcIsac_AutoCorr(corrlo, DataLo, WINLEN, ORDERLO+1); /* computing autocorrelation */
WebRtcIsac_AutoCorr(corrhi, DataHi, WINLEN, ORDERHI);
/* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
corrlo2[0] = (1.0+aa*aa) * corrlo[0] - 2.0*aa * corrlo[1];
tmp = (1.0 + aa*aa);
for (n = 1; n <= ORDERLO; n++) {
corrlo2[n] = tmp * corrlo[n] - aa * (corrlo[n-1] + corrlo[n+1]);
}
tmp = (1.0+aa) * (1.0+aa);
for (n = 0; n <= ORDERHI; n++) {
corrhi[n] = tmp * corrhi[n];
}
/* add white noise floor */
corrlo2[0] += 1e-6;
corrhi[0] += 1e-6;
FwdA = 0.01;
FwdB = 0.01;
/* recursive filtering of correlation over subframes */
for (n = 0; n <= ORDERLO; n++) {
maskdata->CorrBufLo[n] = FwdA * maskdata->CorrBufLo[n] + corrlo2[n];
corrlo2[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufLo[n] + (1.0-FwdB) * corrlo2[n];
}
for (n = 0; n <= ORDERHI; n++) {
maskdata->CorrBufHi[n] = FwdA * maskdata->CorrBufHi[n] + corrhi[n];
corrhi[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufHi[n] + (1.0-FwdB) * corrhi[n];
}
/* compute prediction coefficients */
WebRtcIsac_LevDurb(a_LO, k_veclo, corrlo2, ORDERLO);
WebRtcIsac_LevDurb(a_HI, k_vechi, corrhi, ORDERHI);
/* bandwidth expansion */
tmp = gammaLo;
for (n = 1; n <= ORDERLO; n++) {
a_LO[n] *= tmp;
tmp *= gammaLo;
}
/* residual energy */
res_nrg = 0.0;
for (j = 0; j <= ORDERLO; j++) {
for (n = 0; n <= j; n++) {
res_nrg += a_LO[j] * corrlo2[j-n] * a_LO[n];
}
for (n = j+1; n <= ORDERLO; n++) {
res_nrg += a_LO[j] * corrlo2[n-j] * a_LO[n];
}
}
/* add hearing threshold and compute the gain */
*lo_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
/* copy coefficients to output array */
for (n = 1; n <= ORDERLO; n++) {
*lo_coeff++ = a_LO[n];
}
/* bandwidth expansion */
tmp = gammaHi;
for (n = 1; n <= ORDERHI; n++) {
a_HI[n] *= tmp;
tmp *= gammaHi;
}
/* residual energy */
res_nrg = 0.0;
for (j = 0; j <= ORDERHI; j++) {
for (n = 0; n <= j; n++) {
res_nrg += a_HI[j] * corrhi[j-n] * a_HI[n];
}
for (n = j+1; n <= ORDERHI; n++) {
res_nrg += a_HI[j] * corrhi[n-j] * a_HI[n];
}
}
/* add hearing threshold and compute of the gain */
*hi_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
/* copy coefficients to output array */
for (n = 1; n <= ORDERHI; n++) {
*hi_coeff++ = a_HI[n];
}
}
}
/******************************************************************************
* WebRtcIsac_GetLpcCoefUb()
*
* Compute LP coefficients and correlation coefficients. At 12 kHz LP
* coefficients of the first and the last sub-frame is computed. At 16 kHz
* LP coefficients of 4th, 8th and 12th sub-frames are computed. We always
* compute correlation coefficients of all sub-frames.
*
* Inputs:
* -inSignal : Input signal
* -maskdata : a structure keeping signal from previous frame.
* -bandwidth : specifies if the codec is in 0-16 kHz mode or
* 0-12 kHz mode.
*
* Outputs:
* -lpCoeff : pointer to a buffer where A-polynomials are
* written to (first coeff is 1 and it is not
* written)
* -corrMat : a matrix where correlation coefficients of each
* sub-frame are written to one row.
* -varscale : a scale used to compute LPC gains.
*/
void
WebRtcIsac_GetLpcCoefUb(
double* inSignal,
MaskFiltstr* maskdata,
double* lpCoeff,
double corrMat[][UB_LPC_ORDER + 1],
double* varscale,
int16_t bandwidth)
{
int frameCntr, activeFrameCntr, n, pos1, pos2;
int16_t criterion1;
int16_t criterion2;
int16_t numSubFrames = SUBFRAMES * (1 + (bandwidth == isac16kHz));
double data[WINLEN];
double corrSubFrame[UB_LPC_ORDER+2];
double reflecCoeff[UB_LPC_ORDER];
double aPolynom[UB_LPC_ORDER+1];
double tmp;
/* bandwdith expansion factors */
const double gamma = 0.9;
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsac_GetVarsUB(inSignal, &(maskdata->OldEnergy), varscale);
/* replace data in buffer by new look-ahead data */
for(frameCntr = 0, activeFrameCntr = 0; frameCntr < numSubFrames;
frameCntr++)
{
if(frameCntr == SUBFRAMES)
{
// we are in 16 kHz
varscale++;
WebRtcIsac_GetVarsUB(&inSignal[FRAMESAMPLES_HALF],
&(maskdata->OldEnergy), varscale);
}
/* Update input buffer and multiply signal with window */
for(pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++)
{
maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 +
UPDATE/2];
data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
}
pos2 = frameCntr * UPDATE/2;
for(n = 0; n < UPDATE/2; n++, pos1++, pos2++)
{
maskdata->DataBufferLo[pos1] = inSignal[pos2];
data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
}
/* Get correlation coefficients */
/* computing autocorrelation */
WebRtcIsac_AutoCorr(corrSubFrame, data, WINLEN, UB_LPC_ORDER+1);
memcpy(corrMat[frameCntr], corrSubFrame,
(UB_LPC_ORDER+1)*sizeof(double));
criterion1 = ((frameCntr == 0) || (frameCntr == (SUBFRAMES - 1))) &&
(bandwidth == isac12kHz);
criterion2 = (((frameCntr+1) % 4) == 0) &&
(bandwidth == isac16kHz);
if(criterion1 || criterion2)
{
/* add noise */
corrSubFrame[0] += 1e-6;
/* compute prediction coefficients */
WebRtcIsac_LevDurb(aPolynom, reflecCoeff, corrSubFrame,
UB_LPC_ORDER);
/* bandwidth expansion */
tmp = gamma;
for (n = 1; n <= UB_LPC_ORDER; n++)
{
*lpCoeff++ = aPolynom[n] * tmp;
tmp *= gamma;
}
activeFrameCntr++;
}
}
}
/******************************************************************************
* WebRtcIsac_GetLpcGain()
*
* Compute the LPC gains for each sub-frame, given the LPC of each sub-frame
* and the corresponding correlation coefficients.
*
* Inputs:
* -signal_noise_ratio : the desired SNR in dB.
* -numVecs : number of sub-frames
* -corrMat : a matrix of correlation coefficients where
* each row is a set of correlation coefficients of
* one sub-frame.
* -varscale : a scale computed when WebRtcIsac_GetLpcCoefUb()
* is called.
*
* Outputs:
* -gain : pointer to a buffer where LP gains are written.
*
*/
void
WebRtcIsac_GetLpcGain(
double signal_noise_ratio,
const double* filtCoeffVecs,
int numVecs,
double* gain,
double corrMat[][UB_LPC_ORDER + 1],
const double* varscale)
{
int16_t j, n;
int16_t subFrameCntr;
double aPolynom[ORDERLO + 1];
double res_nrg;
const double HearThresOffset = -28.0;
const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
/* divide by sqrt(12) = 3.46 */
const double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46;
aPolynom[0] = 1;
for(subFrameCntr = 0; subFrameCntr < numVecs; subFrameCntr++)
{
if(subFrameCntr == SUBFRAMES)
{
// we are in second half of a SWB frame. use new varscale
varscale++;
}
memcpy(&aPolynom[1], &filtCoeffVecs[(subFrameCntr * (UB_LPC_ORDER + 1)) +
1], sizeof(double) * UB_LPC_ORDER);
/* residual energy */
res_nrg = 0.0;
for(j = 0; j <= UB_LPC_ORDER; j++)
{
for(n = 0; n <= j; n++)
{
res_nrg += aPolynom[j] * corrMat[subFrameCntr][j-n] *
aPolynom[n];
}
for(n = j+1; n <= UB_LPC_ORDER; n++)
{
res_nrg += aPolynom[j] * corrMat[subFrameCntr][n-j] *
aPolynom[n];
}
}
/* add hearing threshold and compute the gain */
gain[subFrameCntr] = S_N_R / (sqrt(res_nrg) / *varscale + H_T_H);
}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_analysis.h
*
* LPC functions
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
#include "settings.h"
#include "structs.h"
double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order);
void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
double *oldEnergy, double *varscale);
void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
double signal_noise_ratio, const int16_t *pitchGains_Q12,
double *lo_coeff, double *hi_coeff);
void WebRtcIsac_GetLpcGain(
double signal_noise_ratio,
const double* filtCoeffVecs,
int numVecs,
double* gain,
double corrLo[][UB_LPC_ORDER + 1],
const double* varscale);
void WebRtcIsac_GetLpcCoefUb(
double* inSignal,
MaskFiltstr* maskdata,
double* lpCoeff,
double corr[][UB_LPC_ORDER + 1],
double* varscale,
int16_t bandwidth);
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYIS_H_ */

View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables_LPCGain.c
*
* This file defines tables used for entropy coding of LPC Gain
* of upper-band.
*
*/
#include "lpc_gain_swb_tables.h"
#include "settings.h"
#include "webrtc/typedefs.h"
const double WebRtcIsac_kQSizeLpcGain = 0.100000;
const double WebRtcIsac_kMeanLpcGain = -3.3822;
/*
* The smallest reconstruction points for quantiztion of
* LPC gains.
*/
const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES] =
{
-0.800000, -1.000000, -1.200000, -2.200000, -3.000000, -12.700000
};
/*
* Number of reconstruction points of quantizers for LPC Gains.
*/
const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES] =
{
17, 20, 25, 45, 77, 170
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES] =
{
8, 10, 12, 22, 38, 85
};
/*
* The following 6 vectors define CDF of 6 decorrelated LPC
* gains.
*/
const uint16_t WebRtcIsac_kLpcGainCdfVec0[18] =
{
0, 10, 27, 83, 234, 568, 1601, 4683, 16830, 57534, 63437,
64767, 65229, 65408, 65483, 65514, 65527, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec1[21] =
{
0, 15, 33, 84, 185, 385, 807, 1619, 3529, 7850, 19488,
51365, 62437, 64548, 65088, 65304, 65409, 65484, 65507, 65522, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec2[26] =
{
0, 15, 29, 54, 89, 145, 228, 380, 652, 1493, 4260,
12359, 34133, 50749, 57224, 60814, 62927, 64078, 64742, 65103, 65311, 65418,
65473, 65509, 65521, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec3[46] =
{
0, 8, 12, 16, 26, 42, 56, 76, 111, 164, 247,
366, 508, 693, 1000, 1442, 2155, 3188, 4854, 7387, 11249, 17617,
30079, 46711, 56291, 60127, 62140, 63258, 63954, 64384, 64690, 64891, 65031,
65139, 65227, 65293, 65351, 65399, 65438, 65467, 65492, 65504, 65510, 65518,
65523, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec4[78] =
{
0, 17, 29, 39, 51, 70, 104, 154, 234, 324, 443,
590, 760, 971, 1202, 1494, 1845, 2274, 2797, 3366, 4088, 4905,
5899, 7142, 8683, 10625, 12983, 16095, 20637, 28216, 38859, 47237, 51537,
54150, 56066, 57583, 58756, 59685, 60458, 61103, 61659, 62144, 62550, 62886,
63186, 63480, 63743, 63954, 64148, 64320, 64467, 64600, 64719, 64837, 64939,
65014, 65098, 65160, 65211, 65250, 65290, 65325, 65344, 65366, 65391, 65410,
65430, 65447, 65460, 65474, 65487, 65494, 65501, 65509, 65513, 65518, 65520,
65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec5[171] =
{
0, 10, 12, 14, 16, 18, 23, 29, 35, 42, 51,
58, 65, 72, 78, 87, 96, 103, 111, 122, 134, 150,
167, 184, 202, 223, 244, 265, 289, 315, 346, 379, 414,
450, 491, 532, 572, 613, 656, 700, 751, 802, 853, 905,
957, 1021, 1098, 1174, 1250, 1331, 1413, 1490, 1565, 1647, 1730,
1821, 1913, 2004, 2100, 2207, 2314, 2420, 2532, 2652, 2783, 2921,
3056, 3189, 3327, 3468, 3640, 3817, 3993, 4171, 4362, 4554, 4751,
4948, 5142, 5346, 5566, 5799, 6044, 6301, 6565, 6852, 7150, 7470,
7797, 8143, 8492, 8835, 9181, 9547, 9919, 10315, 10718, 11136, 11566,
12015, 12482, 12967, 13458, 13953, 14432, 14903, 15416, 15936, 16452, 16967,
17492, 18024, 18600, 19173, 19736, 20311, 20911, 21490, 22041, 22597, 23157,
23768, 24405, 25034, 25660, 26280, 26899, 27614, 28331, 29015, 29702, 30403,
31107, 31817, 32566, 33381, 34224, 35099, 36112, 37222, 38375, 39549, 40801,
42074, 43350, 44626, 45982, 47354, 48860, 50361, 51845, 53312, 54739, 56026,
57116, 58104, 58996, 59842, 60658, 61488, 62324, 63057, 63769, 64285, 64779,
65076, 65344, 65430, 65500, 65517, 65535
};
/*
* An array of pointers to CDFs of decorrelated LPC Gains
*/
const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES] =
{
WebRtcIsac_kLpcGainCdfVec0, WebRtcIsac_kLpcGainCdfVec1,
WebRtcIsac_kLpcGainCdfVec2, WebRtcIsac_kLpcGainCdfVec3,
WebRtcIsac_kLpcGainCdfVec4, WebRtcIsac_kLpcGainCdfVec5
};
/*
* A matrix to decorrellate LPC gains of subframes.
*/
const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES] =
{
{-0.150860, 0.327872, 0.367220, 0.504613, 0.559270, 0.409234},
{ 0.457128, -0.613591, -0.289283, -0.029734, 0.393760, 0.418240},
{-0.626043, 0.136489, -0.439118, -0.448323, 0.135987, 0.420869},
{ 0.526617, 0.480187, 0.242552, -0.488754, -0.158713, 0.411331},
{-0.302587, -0.494953, 0.588112, -0.063035, -0.404290, 0.387510},
{ 0.086378, 0.147714, -0.428875, 0.548300, -0.570121, 0.401391}
};

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables_LPCGain.h
*
* This file declares tables used for entropy coding of LPC Gain
* of upper-band.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
#include "settings.h"
#include "webrtc/typedefs.h"
extern const double WebRtcIsac_kQSizeLpcGain;
extern const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES];
extern const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES];
extern const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec0[18];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec1[21];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec2[26];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec3[46];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec4[78];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec5[171];
extern const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES];
extern const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES];
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables.c
*
* This file defines tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 12 kHz.
*
*/
#include "lpc_shape_swb12_tables.h"
#include "settings.h"
#include "webrtc/typedefs.h"
/*
* Mean value of LAR
*/
const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER] =
{
0.03748928306641, 0.09453441192543, -0.01112522344398, 0.03800237516842
};
/*
* A rotation matrix to decorrelate intra-vector correlation,
* i.e. correlation among components of LAR vector.
*/
const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER] =
{
{-0.00075365493856, -0.05809964887743, -0.23397966154116, 0.97050367376411},
{ 0.00625021257734, -0.17299965610679, 0.95977735920651, 0.22104179375008},
{ 0.20543384258374, -0.96202143495696, -0.15301870801552, -0.09432375099565},
{-0.97865075648479, -0.20300322280841, -0.02581111653779, -0.01913568980258}
};
/*
* A rotation matrix to remove correlation among LAR coefficients
* of different LAR vectors. One might guess that decorrelation matrix
* for the first component should differ from the second component
* but we haven't observed a significant benefit of having different
* decorrelation matrices for different components.
*/
const double WebRtcIsac_kInterVecDecorrMatUb12
[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME] =
{
{ 0.70650597970460, -0.70770707262373},
{-0.70770707262373, -0.70650597970460}
};
/*
* LAR quantization step-size.
*/
const double WebRtcIsac_kLpcShapeQStepSizeUb12 = 0.150000;
/*
* The smallest reconstruction points for quantiztion of LAR coefficients.
*/
const double WebRtcIsac_kLpcShapeLeftRecPointUb12
[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME] =
{
-0.900000, -1.050000, -1.350000, -1.800000, -1.350000, -1.650000,
-2.250000, -3.450000
};
/*
* Number of reconstruction points of quantizers for LAR coefficients.
*/
const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
13, 15, 19, 27, 19, 24, 32, 48
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
6, 7, 9, 13, 9, 12, 16, 24
};
/*
* The following 8 vectors define CDF of 8 decorrelated LAR
* coefficients.
*/
const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14] =
{
0, 13, 95, 418, 1687, 6498, 21317, 44200, 59029, 63849, 65147,
65449, 65525, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16] =
{
0, 10, 59, 255, 858, 2667, 8200, 22609, 42988, 57202, 62947,
64743, 65308, 65476, 65522, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20] =
{
0, 18, 40, 118, 332, 857, 2017, 4822, 11321, 24330, 41279,
54342, 60637, 63394, 64659, 65184, 65398, 65482, 65518, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28] =
{
0, 21, 38, 90, 196, 398, 770, 1400, 2589, 4650, 8211,
14933, 26044, 39592, 50814, 57452, 60971, 62884, 63995, 64621, 65019, 65273,
65410, 65480, 65514, 65522, 65531, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20] =
{
0, 7, 46, 141, 403, 969, 2132, 4649, 10633, 24902, 43254,
54665, 59928, 62674, 64173, 64938, 65293, 65464, 65523, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25] =
{
0, 7, 22, 72, 174, 411, 854, 1737, 3545, 6774, 13165,
25221, 40980, 52821, 58714, 61706, 63472, 64437, 64989, 65287, 65430, 65503,
65525, 65529, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33] =
{
0, 11, 21, 36, 65, 128, 228, 401, 707, 1241, 2126,
3589, 6060, 10517, 18853, 31114, 42477, 49770, 54271, 57467, 59838, 61569,
62831, 63772, 64433, 64833, 65123, 65306, 65419, 65466, 65499, 65519, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49] =
{
0, 14, 34, 67, 107, 167, 245, 326, 449, 645, 861,
1155, 1508, 2003, 2669, 3544, 4592, 5961, 7583, 9887, 13256, 18765,
26519, 34077, 40034, 44349, 47795, 50663, 53262, 55473, 57458, 59122, 60592,
61742, 62690, 63391, 63997, 64463, 64794, 65045, 65207, 65309, 65394, 65443,
65478, 65504, 65514, 65523, 65535
};
/*
* An array of pointers to CDFs of decorrelated LARs
*/
const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
WebRtcIsac_kLpcShapeCdfVec0Ub12, WebRtcIsac_kLpcShapeCdfVec1Ub12,
WebRtcIsac_kLpcShapeCdfVec2Ub12, WebRtcIsac_kLpcShapeCdfVec3Ub12,
WebRtcIsac_kLpcShapeCdfVec4Ub12, WebRtcIsac_kLpcShapeCdfVec5Ub12,
WebRtcIsac_kLpcShapeCdfVec6Ub12, WebRtcIsac_kLpcShapeCdfVec7Ub12
};

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_shape_swb12_tables.h
*
* This file declares tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 12 kHz.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
#include "settings.h"
#include "webrtc/typedefs.h"
extern const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER];
extern const double WebRtcIsac_kMeanLpcGain;
extern const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER];
extern const double WebRtcIsac_kInterVecDecorrMatUb12
[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME];
extern const double WebRtcIsac_kLpcShapeQStepSizeUb12;
extern const double WebRtcIsac_kLpcShapeLeftRecPointUb12
[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME];
extern const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49];
extern const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_

View File

@ -0,0 +1,248 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB16_KLT_Tables.c
*
* This file defines tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 16 kHz.
*
*/
#include "lpc_shape_swb16_tables.h"
#include "settings.h"
#include "webrtc/typedefs.h"
/*
* Mean value of LAR
*/
const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER] =
{
0.454978, 0.364747, 0.102999, 0.104523
};
/*
* A rotation matrix to decorrelate intra-vector correlation,
* i.e. correlation among components of LAR vector.
*/
const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER] =
{
{-0.020528, -0.085858, -0.002431, 0.996093},
{-0.033155, 0.036102, 0.998786, 0.004866},
{ 0.202627, 0.974853, -0.028940, 0.088132},
{-0.978479, 0.202454, -0.039785, -0.002811}
};
/*
* A rotation matrix to remove correlation among LAR coefficients
* of different LAR vectors. One might guess that decorrelation matrix
* for the first component should differ from the second component
* but we haven't observed a significant benefit of having different
* decorrelation matrices for different components.
*/
const double WebRtcIsac_kInterVecDecorrMatUb16
[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME] =
{
{ 0.291675, -0.515786, 0.644927, 0.482658},
{-0.647220, 0.479712, 0.289556, 0.516856},
{ 0.643084, 0.485489, -0.289307, 0.516763},
{-0.287185, -0.517823, -0.645389, 0.482553}
};
/*
* The following 16 vectors define CDF of 16 decorrelated LAR
* coefficients.
*/
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14] =
{
0, 2, 20, 159, 1034, 5688, 20892, 44653,
59849, 64485, 65383, 65518, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16] =
{
0, 1, 7, 43, 276, 1496, 6681, 21653,
43891, 58859, 64022, 65248, 65489, 65529, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18] =
{
0, 1, 9, 54, 238, 933, 3192, 9461,
23226, 42146, 56138, 62413, 64623, 65300, 65473, 65521,
65533, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30] =
{
0, 2, 4, 8, 17, 36, 75, 155,
329, 683, 1376, 2662, 5047, 9508, 17526, 29027,
40363, 48997, 55096, 59180, 61789, 63407, 64400, 64967,
65273, 65429, 65497, 65526, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16] =
{
0, 1, 10, 63, 361, 1785, 7407, 22242,
43337, 58125, 63729, 65181, 65472, 65527, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17] =
{
0, 1, 7, 29, 134, 599, 2443, 8590,
22962, 42635, 56911, 63060, 64940, 65408, 65513, 65531,
65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21] =
{
0, 1, 5, 16, 57, 191, 611, 1808,
4847, 11755, 24612, 40910, 53789, 60698, 63729, 64924,
65346, 65486, 65523, 65532, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36] =
{
0, 1, 4, 12, 25, 55, 104, 184,
314, 539, 926, 1550, 2479, 3861, 5892, 8845,
13281, 20018, 29019, 38029, 45581, 51557, 56057, 59284,
61517, 63047, 64030, 64648, 65031, 65261, 65402, 65480,
65518, 65530, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21] =
{
0, 1, 2, 7, 26, 103, 351, 1149,
3583, 10204, 23846, 41711, 55361, 61917, 64382, 65186,
65433, 65506, 65528, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21] =
{
0, 6, 19, 63, 205, 638, 1799, 4784,
11721, 24494, 40803, 53805, 60886, 63822, 64931, 65333,
65472, 65517, 65530, 65533, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28] =
{
0, 1, 3, 11, 31, 86, 221, 506,
1101, 2296, 4486, 8477, 15356, 26079, 38941, 49952,
57165, 61257, 63426, 64549, 65097, 65351, 65463, 65510,
65526, 65532, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55] =
{
0, 3, 12, 23, 42, 65, 89, 115,
150, 195, 248, 327, 430, 580, 784, 1099,
1586, 2358, 3651, 5899, 9568, 14312, 19158, 23776,
28267, 32663, 36991, 41153, 45098, 48680, 51870, 54729,
57141, 59158, 60772, 62029, 63000, 63761, 64322, 64728,
65000, 65192, 65321, 65411, 65463, 65496, 65514, 65523,
65527, 65529, 65531, 65532, 65533, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26] =
{
0, 2, 4, 10, 21, 48, 114, 280,
701, 1765, 4555, 11270, 24267, 41213, 54285, 61003,
63767, 64840, 65254, 65421, 65489, 65514, 65526, 65532,
65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28] =
{
0, 1, 3, 6, 15, 36, 82, 196,
453, 1087, 2557, 5923, 13016, 25366, 40449, 52582,
59539, 62896, 64389, 65033, 65316, 65442, 65494, 65519,
65529, 65533, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34] =
{
0, 2, 4, 8, 18, 35, 73, 146,
279, 524, 980, 1789, 3235, 5784, 10040, 16998,
27070, 38543, 48499, 55421, 59712, 62257, 63748, 64591,
65041, 65278, 65410, 65474, 65508, 65522, 65530, 65533,
65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71] =
{
0, 1, 2, 6, 13, 26, 55, 92,
141, 191, 242, 296, 355, 429, 522, 636,
777, 947, 1162, 1428, 1753, 2137, 2605, 3140,
3743, 4409, 5164, 6016, 6982, 8118, 9451, 10993,
12754, 14810, 17130, 19780, 22864, 26424, 30547, 35222,
40140, 44716, 48698, 52056, 54850, 57162, 59068, 60643,
61877, 62827, 63561, 64113, 64519, 64807, 65019, 65167,
65272, 65343, 65399, 65440, 65471, 65487, 65500, 65509,
65518, 65524, 65527, 65531, 65533, 65534, 65535
};
/*
* An array of pointers to CDFs of decorrelated LARs
*/
const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] = {
WebRtcIsac_kLpcShapeCdfVec01Ub16,
WebRtcIsac_kLpcShapeCdfVec1Ub16,
WebRtcIsac_kLpcShapeCdfVec2Ub16,
WebRtcIsac_kLpcShapeCdfVec3Ub16,
WebRtcIsac_kLpcShapeCdfVec4Ub16,
WebRtcIsac_kLpcShapeCdfVec5Ub16,
WebRtcIsac_kLpcShapeCdfVec6Ub16,
WebRtcIsac_kLpcShapeCdfVec7Ub16,
WebRtcIsac_kLpcShapeCdfVec8Ub16,
WebRtcIsac_kLpcShapeCdfVec01Ub160,
WebRtcIsac_kLpcShapeCdfVec01Ub161,
WebRtcIsac_kLpcShapeCdfVec01Ub162,
WebRtcIsac_kLpcShapeCdfVec01Ub163,
WebRtcIsac_kLpcShapeCdfVec01Ub164,
WebRtcIsac_kLpcShapeCdfVec01Ub165,
WebRtcIsac_kLpcShapeCdfVec01Ub166
};
/*
* The smallest reconstruction points for quantiztion of LAR coefficients.
*/
const double WebRtcIsac_kLpcShapeLeftRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
-0.8250, -0.9750, -1.1250, -2.1750, -0.9750, -1.1250, -1.4250,
-2.6250, -1.4250, -1.2750, -1.8750, -3.6750, -1.7250, -1.8750,
-2.3250, -5.4750
};
/*
* Number of reconstruction points of quantizers for LAR coefficients.
*/
const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
13, 15, 17, 29, 15, 16, 20, 35, 20,
20, 27, 54, 25, 27, 33, 70
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
6, 7, 8, 14, 7, 8, 10, 17, 10,
10, 13, 27, 12, 13, 16, 35
};
/*
* LAR quantization step-size.
*/
const double WebRtcIsac_kLpcShapeQStepSizeUb16 = 0.150000;

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_shape_swb16_tables.h
*
* This file declares tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 16 kHz.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
#include "settings.h"
#include "webrtc/typedefs.h"
extern const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER];
extern const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER];
extern const double WebRtcIsac_kInterVecDecorrMatUb16
[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71];
extern const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const double WebRtcIsac_kLpcShapeLeftRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const double WebRtcIsac_kLpcShapeQStepSizeUb16;
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_

View File

@ -0,0 +1,601 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/* coding tables for the KLT coefficients */
#include "lpc_tables.h"
#include "settings.h"
/* cdf array for model indicator */
const uint16_t WebRtcIsac_kQKltModelCdf[4] = {
0, 15434, 37548, 65535 };
/* pointer to cdf array for model indicator */
const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1] = {
WebRtcIsac_kQKltModelCdf };
/* initial cdf index for decoder of model indicator */
const uint16_t WebRtcIsac_kQKltModelInitIndex[1] = { 1 };
/* offset to go from rounded value to quantization index */
const short WebRtcIsac_kQKltQuantMinGain[12] = {
3, 6, 4, 6, 6, 9, 5, 16, 11, 34, 32, 47 };
const short WebRtcIsac_kQKltQuantMinShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2, 2, 3, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 2, 2, 3, 0, 0, 0, 0,
1, 0, 1, 1, 1, 1, 1, 1, 1, 2,
2, 4, 3, 5, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 2, 1, 2, 2, 3, 4,
4, 7, 0, 0, 1, 1, 1, 1, 1, 1,
1, 2, 3, 2, 3, 4, 4, 5, 7, 13,
0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
5, 6, 7, 11, 9, 13, 12, 26 };
/* maximum quantization index */
const uint16_t WebRtcIsac_kQKltMaxIndGain[12] = {
6, 12, 8, 14, 10, 19, 12, 31, 22, 56, 52, 138 };
const uint16_t WebRtcIsac_kQKltMaxIndShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
2, 2, 2, 2, 4, 4, 5, 6, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 2, 2,
2, 2, 3, 4, 5, 7, 0, 0, 0, 0,
2, 0, 2, 2, 2, 2, 3, 2, 2, 4,
4, 6, 6, 9, 0, 0, 0, 0, 2, 2,
2, 2, 2, 2, 3, 2, 4, 4, 7, 7,
9, 13, 0, 0, 2, 2, 2, 2, 2, 2,
3, 4, 5, 4, 6, 8, 8, 10, 16, 25,
0, 2, 2, 4, 5, 4, 4, 4, 7, 8,
9, 10, 13, 19, 17, 23, 25, 49 };
/* index offset */
const uint16_t WebRtcIsac_kQKltOffsetGain[12] = {
0, 7, 20, 29, 44, 55, 75, 88, 120, 143, 200, 253 };
const uint16_t WebRtcIsac_kQKltOffsetShape[108] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
11, 14, 17, 20, 23, 28, 33, 39, 46, 47,
48, 49, 50, 52, 53, 54, 55, 56, 58, 61,
64, 67, 70, 74, 79, 85, 93, 94, 95, 96,
97, 100, 101, 104, 107, 110, 113, 117, 120, 123,
128, 133, 140, 147, 157, 158, 159, 160, 161, 164,
167, 170, 173, 176, 179, 183, 186, 191, 196, 204,
212, 222, 236, 237, 238, 241, 244, 247, 250, 253,
256, 260, 265, 271, 276, 283, 292, 301, 312, 329,
355, 356, 359, 362, 367, 373, 378, 383, 388, 396,
405, 415, 426, 440, 460, 478, 502, 528 };
/* initial cdf index for KLT coefficients */
const uint16_t WebRtcIsac_kQKltInitIndexGain[12] = {
3, 6, 4, 7, 5, 10, 6, 16, 11, 28, 26, 69};
const uint16_t WebRtcIsac_kQKltInitIndexShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2, 3, 3, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
1, 1, 2, 2, 3, 4, 0, 0, 0, 0,
1, 0, 1, 1, 1, 1, 2, 1, 1, 2,
2, 3, 3, 5, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 2, 1, 2, 2, 4, 4,
5, 7, 0, 0, 1, 1, 1, 1, 1, 1,
2, 2, 3, 2, 3, 4, 4, 5, 8, 13,
0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
5, 5, 7, 10, 9, 12, 13, 25 };
/* quantizer representation levels */
const double WebRtcIsac_kQKltLevelsGain[392] = {
-2.78127126, -1.76745590, -0.77913790, -0.00437329, 0.79961206,
1.81775776, 2.81389782, -5.78753143, -4.88384084, -3.89320940,
-2.88133610, -1.92859977, -0.86347396, 0.02003888, 0.86140400,
1.89667156, 2.97134967, 3.98781964, 4.91727277, 5.82865898,
-4.11195874, -2.80898424, -1.87547977, -0.80943825, -0.00679084,
0.79573851, 1.83953397, 2.67586037, 3.76274082, -6.10933968,
-4.93034581, -3.89281296, -2.91530625, -1.89684163, -0.85319130,
-0.02275767, 0.86862017, 1.91578276, 2.96107339, 3.96543056,
4.91369908, 5.91058154, 6.83848343, 8.07136925, -5.87470395,
-4.84703049, -3.84284597, -2.86168446, -1.89290192, -0.82798145,
-0.00080013, 0.82594974, 1.85754329, 2.88351798, 3.96172628,
-8.85684885, -7.87387461, -6.97811862, -5.93256270, -4.94301439,
-3.95513701, -2.96041544, -1.94031192, -0.87961478, -0.00456201,
0.89911505, 1.91723376, 2.94011511, 3.93302540, 4.97990967,
5.93133404, 7.02181199, 7.92407762, 8.80155440, 10.04665814,
-4.82396678, -3.85612158, -2.89482244, -1.89558408, -0.90036978,
-0.00677823, 0.90607989, 1.90937981, 2.91175777, 3.91637730,
4.97565723, 5.84771228, 7.11145863, -16.07879840, -15.03776309,
-13.93905670, -12.95671800, -11.89171202, -10.95820934, -9.95923714,
-8.94357334, -7.99068299, -6.97481009, -5.94826231, -4.96673988,
-3.97490466, -2.97846970, -1.95130435, -0.94215262, -0.01444043,
0.96770704, 1.95848598, 2.94107862, 3.95666119, 4.97253085,
5.97191122, 6.93277360, 7.96608727, 8.87958779, 10.00264269,
10.86560820, 12.07449071, 13.04491775, 13.97507061, 14.91845261,
-10.85696295, -9.83365357, -9.01245635, -7.95915145, -6.95625003,
-5.95362618, -4.93468444, -3.98760978, -2.95044407, -1.97041277,
-0.97701799, -0.00840234, 0.97834289, 1.98361415, 2.97802439,
3.96415871, 4.95369042, 5.94101770, 6.92756798, 7.94063998,
8.85951828, 9.97077022, 11.00068503, -33.92030406, -32.81426422,
-32.00000000, -31.13243639, -30.11886909, -29.06017570, -28.12598824,
-27.22045482, -25.81215858, -25.07849962, -23.93018013, -23.02097643,
-21.89529725, -20.99091085, -19.98889048, -18.94327044, -17.96562071,
-16.96126218, -15.95054062, -14.98516200, -13.97101012, -13.02106500,
-11.98438006, -11.03216748, -9.95930286, -8.97043946, -7.98085082,
-6.98360995, -5.98998802, -4.98668173, -4.00032906, -3.00420619,
-1.98701132, -0.99324682, -0.00609324, 0.98297834, 1.99483076,
3.00305044, 3.97142097, 4.97525759, 5.98612258, 6.97448236,
7.97575900, 9.01086211, 9.98665542, 11.00541438, 11.98078628,
12.92352471, 14.06849675, 14.99949430, 15.94904834, 16.97440321,
18.04040916, 18.88987609, 20.05312391, 21.00000000, 21.79443341,
-31.98578825, -31.00000000, -29.89060567, -28.98555686, -27.97114102,
-26.84935410, -26.02402230, -24.94195278, -23.92336849, -22.95552382,
-21.97932836, -20.96055470, -19.99649553, -19.03436122, -17.96706525,
-17.01139515, -16.01363516, -14.99154248, -14.00298333, -12.99630613,
-11.99955519, -10.99000421, -10.00819092, -8.99763648, -7.98431793,
-7.01769025, -5.99604690, -4.99980697, -3.99334671, -3.01748192,
-2.02051217, -1.00848371, -0.01942358, 1.00477757, 1.95477872,
2.98593031, 3.98779079, 4.96862849, 6.02694771, 6.93983733,
7.89874717, 8.99615862, 10.02367921, 10.96293452, 11.84351528,
12.92207187, 13.85122329, 15.05146877, 15.99371264, 17.00000000,
18.00000000, 19.00000000, 19.82763573, -47.00000000, -46.00000000,
-44.87138498, -44.00000000, -43.00000000, -42.00000000, -41.00000000,
-39.88966612, -38.98913239, -37.80306486, -37.23584325, -35.94200288,
-34.99881301, -34.11361858, -33.06507360, -32.13129135, -30.90891364,
-29.81511907, -28.99250380, -28.04535391, -26.99767800, -26.04418164,
-24.95687851, -24.04865595, -23.03392645, -21.89366707, -20.93517364,
-19.99388660, -18.91620943, -18.03749683, -16.99532379, -15.98683813,
-15.06421479, -13.99359211, -12.99714098, -11.97022520, -10.98500279,
-9.98834422, -8.95729330, -8.01232284, -7.00253661, -5.99681626,
-5.01207817, -3.95914904, -3.01232178, -1.96615919, -0.97687670,
0.01228030, 0.98412288, 2.01753544, 3.00580570, 3.97783510,
4.98846894, 6.01321400, 7.00867732, 8.00416375, 9.01771966,
9.98637729, 10.98255180, 11.99194163, 13.01807333, 14.00999545,
15.00118556, 16.00089224, 17.00584148, 17.98251763, 18.99942091,
19.96917690, 20.97839265, 21.98207297, 23.00171271, 23.99930737,
24.99746061, 26.00936304, 26.98240132, 28.01126868, 29.01395915,
29.98153507, 31.01376711, 31.99876818, 33.00475317, 33.99753994,
34.99493913, 35.98933585, 36.95620160, 37.98428461, 38.99317544,
40.01832073, 40.98048133, 41.95999283, 42.98232091, 43.96523612,
44.99574268, 45.99524194, 47.05464025, 48.03821548, 48.99354366,
49.96400411, 50.98017973, 51.95184408, 52.96291806, 54.00194392,
54.96603783, 55.95623778, 57.03076595, 58.05889901, 58.99081551,
59.97928121, 61.05071612, 62.03971580, 63.01286038, 64.01290338,
65.02074503, 65.99454594, 67.00399425, 67.96571257, 68.95305727,
69.92030664, 70.95594862, 71.98088567, 73.04764124, 74.00285480,
75.02696330, 75.89837673, 76.93459997, 78.16266309, 78.83317543,
80.00000000, 80.87251574, 82.09803524, 83.10671664, 84.00000000,
84.77023523, 86.00000000, 87.00000000, 87.92946897, 88.69159118,
90.00000000, 90.90535270 };
const double WebRtcIsac_kQKltLevelsShape[578] = {
0.00032397, 0.00008053, -0.00061202, -0.00012620, 0.00030437,
0.00054764, -0.00027902, 0.00069360, 0.00029449, -0.80219239,
0.00091089, -0.74514927, -0.00094283, 0.64030631, -0.60509119,
0.00035575, 0.61851665, -0.62129957, 0.00375219, 0.60054900,
-0.61554359, 0.00054977, 0.63362016, -1.73118727, -0.65422341,
0.00524568, 0.66165298, 1.76785515, -1.83182018, -0.65997434,
-0.00011887, 0.67524299, 1.79933938, -1.76344480, -0.72547708,
-0.00133017, 0.73104704, 1.75305377, 2.85164534, -2.80423916,
-1.71959639, -0.75419722, -0.00329945, 0.77196760, 1.72211069,
2.87339653, 0.00031089, -0.00015311, 0.00018201, -0.00035035,
-0.77357251, 0.00154647, -0.00047625, -0.00045299, 0.00086590,
0.00044762, -0.83383829, 0.00024787, -0.68526258, -0.00122472,
0.64643255, -0.60904942, -0.00448987, 0.62309184, -0.59626442,
-0.00574132, 0.62296546, -0.63222115, 0.00013441, 0.63609545,
-0.66911055, -0.00369971, 0.66346095, 2.07281301, -1.77184694,
-0.67640425, -0.00010145, 0.64818392, 1.74948973, -1.69420224,
-0.71943894, -0.00004680, 0.75303493, 1.81075983, 2.80610041,
-2.80005755, -1.79866753, -0.77409777, -0.00084220, 0.80141293,
1.78291081, 2.73954236, 3.82994169, 0.00015140, -0.00012766,
-0.00034241, -0.00119125, -0.76113497, 0.00069246, 0.76722027,
0.00132862, -0.69107530, 0.00010656, 0.77061578, -0.78012970,
0.00095947, 0.77828502, -0.64787758, 0.00217168, 0.63050167,
-0.58601125, 0.00306596, 0.59466308, -0.58603410, 0.00059779,
0.64257970, 1.76512766, -0.61193600, -0.00259517, 0.59767574,
-0.61026273, 0.00315811, 0.61725479, -1.69169719, -0.65816029,
0.00067575, 0.65576890, 2.00000000, -1.72689193, -0.69780808,
-0.00040990, 0.70668487, 1.74198458, -3.79028154, -3.00000000,
-1.73194459, -0.70179341, -0.00106695, 0.71302629, 1.76849782,
-2.89332364, -1.78585007, -0.78731491, -0.00132610, 0.79692976,
1.75247009, 2.97828682, -5.26238694, -3.69559829, -2.87286122,
-1.84908818, -0.84434577, -0.01167975, 0.84641753, 1.84087672,
2.87628156, 3.83556679, -0.00190204, 0.00092642, 0.00354385,
-0.00012982, -0.67742785, 0.00229509, 0.64935672, -0.58444751,
0.00470733, 0.57299534, -0.58456202, -0.00097715, 0.64593607,
-0.64060330, -0.00638534, 0.59680157, -0.59287537, 0.00490772,
0.58919707, -0.60306173, -0.00417464, 0.60562100, -1.75218757,
-0.63018569, -0.00225922, 0.63863300, -0.63949939, -0.00126421,
0.64268914, -1.75851182, -0.68318060, 0.00510418, 0.69049211,
1.88178506, -1.71136148, -0.72710534, -0.00815559, 0.73412917,
1.79996711, -2.77111145, -1.73940498, -0.78212945, 0.01074476,
0.77688916, 1.76873972, 2.87281379, 3.77554698, -3.75832725,
-2.95463235, -1.80451491, -0.80017226, 0.00149902, 0.80729206,
1.78265046, 2.89391793, -3.78236148, -2.83640598, -1.82532067,
-0.88844327, -0.00620952, 0.88208030, 1.85757631, 2.81712391,
3.88430176, 5.16179367, -7.00000000, -5.93805408, -4.87172597,
-3.87524433, -2.89399744, -1.92359563, -0.92136341, -0.00172725,
0.93087018, 1.90528280, 2.89809686, 3.88085708, 4.89147740,
5.89078692, -0.00239502, 0.00312564, -1.00000000, 0.00178325,
1.00000000, -0.62198029, 0.00143254, 0.65344051, -0.59851220,
-0.00676987, 0.61510140, -0.58894151, 0.00385055, 0.59794203,
-0.59808568, -0.00038214, 0.57625703, -0.63009713, -0.01107985,
0.61278758, -0.64206758, -0.00154369, 0.65480598, 1.80604162,
-1.80909286, -0.67810514, 0.00205762, 0.68571097, 1.79453891,
-3.22682422, -1.73808453, -0.71870305, -0.00738594, 0.71486172,
1.73005326, -1.66891897, -0.73689615, -0.00616203, 0.74262409,
1.73807899, -2.92417482, -1.73866741, -0.78133871, 0.00764425,
0.80027264, 1.78668732, 2.74992588, -4.00000000, -2.75578740,
-1.83697516, -0.83117035, -0.00355191, 0.83527172, 1.82814700,
2.77377675, 3.80718693, -3.81667698, -2.83575471, -1.83372350,
-0.86579471, 0.00547578, 0.87582281, 1.82858793, 2.87265007,
3.91405377, -4.87521600, -3.78999094, -2.86437014, -1.86964365,
-0.90618018, 0.00128243, 0.91497811, 1.87374952, 2.83199819,
3.91519130, 4.76632822, -6.68713448, -6.01252467, -4.94587936,
-3.88795368, -2.91299088, -1.92592211, -0.95504570, -0.00089980,
0.94565200, 1.93239633, 2.91832808, 3.91363475, 4.88920034,
5.96471415, 6.83905252, 7.86195009, 8.81571018,-12.96141759,
-11.73039516,-10.96459719, -9.97382433, -9.04414433, -7.89460619,
-6.96628608, -5.93236595, -4.93337924, -3.95479990, -2.96451499,
-1.96635876, -0.97271229, -0.00402238, 0.98343930, 1.98348291,
2.96641164, 3.95456471, 4.95517089, 5.98975714, 6.90322073,
7.90468849, 8.85639467, 9.97255498, 10.79006309, 11.81988596,
0.04950500, -1.00000000, -0.01226628, 1.00000000, -0.59479469,
-0.10438305, 0.59822144, -2.00000000, -0.67109149, -0.09256692,
0.65171621, 2.00000000, -3.00000000, -1.68391999, -0.76681039,
-0.03354151, 0.71509146, 1.77615472, -2.00000000, -0.68661511,
-0.02497881, 0.66478398, 2.00000000, -2.00000000, -0.67032784,
-0.00920582, 0.64892756, 2.00000000, -2.00000000, -0.68561894,
0.03641869, 0.73021611, 1.68293863, -4.00000000, -2.72024184,
-1.80096059, -0.81696185, 0.03604685, 0.79232033, 1.70070730,
3.00000000, -4.00000000, -2.71795670, -1.80482986, -0.86001162,
0.03764903, 0.87723968, 1.79970771, 2.72685932, 3.67589143,
-5.00000000, -4.00000000, -2.85492548, -1.78996365, -0.83250358,
-0.01376828, 0.84195506, 1.78161105, 2.76754458, 4.00000000,
-6.00000000, -5.00000000, -3.82268811, -2.77563624, -1.82608163,
-0.86486114, -0.02671886, 0.86693165, 1.88422879, 2.86248347,
3.95632216, -7.00000000, -6.00000000, -5.00000000, -3.77533988,
-2.86391432, -1.87052039, -0.90513658, 0.06271236, 0.91083620,
1.85734756, 2.86031688, 3.82019418, 4.94420394, 6.00000000,
-11.00000000,-10.00000000, -9.00000000, -8.00000000, -6.91952415,
-6.00000000, -4.92044374, -3.87845165, -2.87392362, -1.88413020,
-0.91915740, 0.00318517, 0.91602800, 1.89664838, 2.88925058,
3.84123856, 4.78988651, 5.94526812, 6.81953917, 8.00000000,
-9.00000000, -8.00000000, -7.03319143, -5.94530963, -4.86669720,
-3.92438007, -2.88620396, -1.92848070, -0.94365985, 0.01671855,
0.97349410, 1.93419878, 2.89740109, 3.89662823, 4.83235583,
5.88106535, 6.80328232, 8.00000000,-13.00000000,-12.00000000,
-11.00000000,-10.00000000, -9.00000000, -7.86033489, -6.83344055,
-5.89844215, -4.90811454, -3.94841298, -2.95820490, -1.98627966,
-0.99161468, -0.02286136, 0.96055651, 1.95052433, 2.93969396,
3.94304346, 4.88522624, 5.87434241, 6.78309433, 7.87244101,
9.00000000, 10.00000000,-12.09117356,-11.00000000,-10.00000000,
-8.84766108, -7.86934236, -6.98544896, -5.94233429, -4.95583292,
-3.95575986, -2.97085529, -1.98955811, -0.99359873, -0.00485413,
0.98298870, 1.98093258, 2.96430203, 3.95540216, 4.96915010,
5.96775124, 6.99236918, 7.96503302, 8.99864542, 9.85857723,
10.96541926, 11.91647197, 12.71060069,-26.00000000,-25.00000000,
-24.00585596,-23.11642573,-22.14271284,-20.89800711,-19.87815799,
-19.05036354,-17.88555651,-16.86471209,-15.97711073,-14.94012359,
-14.02661226,-12.98243228,-11.97489256,-10.97402777, -9.96425624,
-9.01085220, -7.97372506, -6.98795002, -5.97271328, -5.00191694,
-3.98055849, -2.98458048, -1.99470442, -0.99656768, -0.00825666,
1.00272004, 1.99922218, 2.99357669, 4.01407905, 5.01003897,
5.98115528, 7.00018958, 8.00338125, 8.98981046, 9.98990318,
10.96341479, 11.96866930, 12.99175139, 13.94580443, 14.95745083,
15.98992869, 16.97484646, 17.99630043, 18.93396897, 19.88347741,
20.96532482, 21.92191032, 23.22314702 };
/* cdf tables for quantizer indices */
const uint16_t WebRtcIsac_kQKltCdfGain[404] = {
0, 13, 301, 3730, 61784, 65167, 65489, 65535, 0, 17,
142, 314, 929, 2466, 7678, 56450, 63463, 64740, 65204, 65426,
65527, 65535, 0, 8, 100, 724, 6301, 60105, 65125, 65510,
65531, 65535, 0, 13, 117, 368, 1068, 3010, 11928, 53603,
61177, 63404, 64505, 65108, 65422, 65502, 65531, 65535, 0, 4,
17, 96, 410, 1859, 12125, 54361, 64103, 65305, 65497, 65535,
0, 4, 88, 230, 469, 950, 1746, 3228, 6092, 16592,
44756, 56848, 61256, 63308, 64325, 64920, 65309, 65460, 65502,
65522, 65535, 0, 88, 352, 1675, 6339, 20749, 46686, 59284, 63525,
64949, 65359, 65502, 65527, 65535, 0, 13, 38, 63, 117,
234, 381, 641, 929, 1407, 2043, 2809, 4032, 5753, 8792,
14407, 24308, 38941, 48947, 55403, 59293, 61411, 62688, 63630,
64329, 64840, 65188, 65376, 65472, 65506, 65527, 65531, 65535,
0, 8, 29, 75, 222, 615, 1327, 2801, 5623, 9931, 16094, 24966,
34419, 43458, 50676, 56186, 60055, 62500, 63936, 64765, 65225,
65435, 65514, 65535, 0, 8, 13, 15, 17, 21, 33, 59,
71, 92, 151, 243, 360, 456, 674, 934, 1223, 1583,
1989, 2504, 3031, 3617, 4354, 5154, 6163, 7411, 8780, 10747,
12874, 15591, 18974, 23027, 27436, 32020, 36948, 41830, 46205,
49797, 53042, 56094, 58418, 60360, 61763, 62818, 63559, 64103,
64509, 64798, 65045, 65162, 65288, 65363, 65447, 65506, 65522,
65531, 65533, 65535, 0, 4, 6, 25, 38, 71, 138, 264, 519, 808,
1227, 1825, 2516, 3408, 4279, 5560, 7092, 9197, 11420, 14108,
16947, 20300, 23926, 27459, 31164, 34827, 38575, 42178, 45540,
48747, 51444, 54090, 56426, 58460, 60080, 61595, 62734, 63668,
64275, 64673, 64936, 65112, 65217, 65334, 65426, 65464, 65477,
65489, 65518, 65527, 65529, 65531, 65533, 65535, 0, 2, 4, 8, 10,
12, 14, 16, 21, 33, 50, 71, 84, 92, 105, 138, 180, 255, 318,
377, 435, 473, 511, 590, 682, 758, 913, 1097, 1256, 1449, 1671,
1884, 2169, 2445, 2772, 3157, 3563, 3944, 4375, 4848, 5334, 5820,
6448, 7101, 7716, 8378, 9102, 9956, 10752, 11648, 12707, 13670,
14758, 15910, 17187, 18472, 19627, 20649, 21951, 23169, 24283,
25552, 26862, 28227, 29391, 30764, 31882, 33213, 34432, 35600,
36910, 38116, 39464, 40729, 41872, 43144, 44371, 45514, 46762,
47813, 48968, 50069, 51032, 51974, 52908, 53737, 54603, 55445,
56282, 56990, 57572, 58191, 58840, 59410, 59887, 60264, 60607,
60946, 61269, 61516, 61771, 61960, 62198, 62408, 62558, 62776,
62985, 63207, 63408, 63546, 63739, 63906, 64070, 64237, 64371,
64551, 64677, 64836, 64999, 65095, 65213, 65284, 65338, 65380,
65426, 65447, 65472, 65485, 65487, 65489, 65502, 65510, 65512,
65514, 65516, 65518, 65522, 65531, 65533, 65535 };
const uint16_t WebRtcIsac_kQKltCdfShape[686] = {
0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535,
0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4,
65535, 0, 8, 65514, 65535, 0, 29, 65481, 65535, 0,
121, 65439, 65535, 0, 239, 65284, 65535, 0, 8, 779,
64999, 65527, 65535, 0, 8, 888, 64693, 65522, 65535, 0,
29, 2604, 62843, 65497, 65531, 65535, 0, 25, 176, 4576,
61164, 65275, 65527, 65535, 0, 65535, 0, 65535, 0, 65535,
0, 65535, 0, 4, 65535, 0, 65535, 0, 65535, 0,
65535, 0, 65535, 0, 4, 65535, 0, 33, 65502, 65535,
0, 54, 65481, 65535, 0, 251, 65309, 65535, 0, 611,
65074, 65535, 0, 1273, 64292, 65527, 65535, 0, 4, 1809,
63940, 65518, 65535, 0, 88, 4392, 60603, 65426, 65531, 65535,
0, 25, 419, 7046, 57756, 64961, 65514, 65531, 65535, 0,
65535, 0, 65535, 0, 65535, 0, 65535, 0, 4, 65531,
65535, 0, 65535, 0, 8, 65531, 65535, 0, 4, 65527,
65535, 0, 17, 65510, 65535, 0, 42, 65481, 65535, 0,
197, 65342, 65531, 65535, 0, 385, 65154, 65535, 0, 1005,
64522, 65535, 0, 8, 1985, 63469, 65533, 65535, 0, 38,
3119, 61884, 65514, 65535, 0, 4, 6, 67, 4961, 60804,
65472, 65535, 0, 17, 565, 9182, 56538, 65087, 65514, 65535,
0, 8, 63, 327, 2118, 14490, 52774, 63839, 65376, 65522,
65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
17, 65522, 65535, 0, 59, 65489, 65535, 0, 50, 65522,
65535, 0, 54, 65489, 65535, 0, 310, 65179, 65535, 0,
615, 64836, 65535, 0, 4, 1503, 63965, 65535, 0, 2780,
63383, 65535, 0, 21, 3919, 61051, 65527, 65535, 0, 84,
6674, 59929, 65435, 65535, 0, 4, 255, 7976, 55784, 65150,
65518, 65531, 65535, 0, 4, 8, 582, 10726, 53465, 64949,
65518, 65535, 0, 29, 339, 3006, 17555, 49517, 62956, 65200,
65497, 65531, 65535, 0, 2, 33, 138, 565, 2324, 7670,
22089, 45966, 58949, 63479, 64966, 65380, 65518, 65535, 0, 65535,
0, 65535, 0, 2, 65533, 65535, 0, 46, 65514, 65535,
0, 414, 65091, 65535, 0, 540, 64911, 65535, 0, 419,
65162, 65535, 0, 976, 64790, 65535, 0, 2977, 62495, 65531,
65535, 0, 4, 3852, 61034, 65527, 65535, 0, 4, 29,
6021, 60243, 65468, 65535, 0, 84, 6711, 58066, 65418, 65535,
0, 13, 281, 9550, 54917, 65125, 65506, 65535, 0, 2,
63, 984, 12108, 52644, 64342, 65435, 65527, 65535, 0, 29,
251, 2014, 14871, 47553, 62881, 65229, 65518, 65535, 0, 13,
142, 749, 4220, 18497, 45200, 60913, 64823, 65426, 65527, 65535,
0, 13, 71, 264, 1176, 3789, 10500, 24480, 43488, 56324,
62315, 64493, 65242, 65464, 65514, 65522, 65531, 65535, 0, 4,
13, 38, 109, 205, 448, 850, 1708, 3429, 6276, 11371,
19221, 29734, 40955, 49391, 55411, 59460, 62102, 63793, 64656,
65150, 65401, 65485, 65522, 65531, 65535, 0, 65535, 0, 2, 65533,
65535, 0, 1160, 65476, 65535, 0, 2, 6640, 64763, 65533,
65535, 0, 2, 38, 9923, 61009, 65527, 65535, 0, 2,
4949, 63092, 65533, 65535, 0, 2, 3090, 63398, 65533, 65535,
0, 2, 2520, 58744, 65510, 65535, 0, 2, 13, 544,
8784, 51403, 65148, 65533, 65535, 0, 2, 25, 1017, 10412,
43550, 63651, 65489, 65527, 65535, 0, 2, 4, 29, 783,
13377, 52462, 64524, 65495, 65533, 65535, 0, 2, 4, 6,
100, 1817, 18451, 52590, 63559, 65376, 65531, 65535, 0, 2,
4, 6, 46, 385, 2562, 11225, 37416, 60488, 65026, 65487,
65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 12,
42, 222, 971, 5221, 19811, 45048, 60312, 64486, 65294, 65474,
65525, 65529, 65533, 65535, 0, 2, 4, 8, 71, 167,
666, 2533, 7875, 19622, 38082, 54359, 62108, 64633, 65290, 65495,
65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 13,
109, 586, 1930, 4949, 11600, 22641, 36125, 48312, 56899, 61495,
63927, 64932, 65389, 65489, 65518, 65531, 65533, 65535, 0, 4,
6, 8, 67, 209, 712, 1838, 4195, 8432, 14432, 22834,
31723, 40523, 48139, 53929, 57865, 60657, 62403, 63584, 64363,
64907, 65167, 65372, 65472, 65514, 65535, 0, 2, 4, 13, 25,
42, 46, 50, 75, 113, 147, 281, 448, 657, 909,
1185, 1591, 1976, 2600, 3676, 5317, 7398, 9914, 12941, 16169,
19477, 22885, 26464, 29851, 33360, 37228, 41139, 44802, 48654,
52058, 55181, 57676, 59581, 61022, 62190, 63107, 63676, 64199,
64547, 64924, 65158, 65313, 65430, 65481, 65518, 65535 };
/* pointers to cdf tables for quantizer indices */
const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12] = {
WebRtcIsac_kQKltCdfGain +0 +0, WebRtcIsac_kQKltCdfGain +0 +8,
WebRtcIsac_kQKltCdfGain +0 +22, WebRtcIsac_kQKltCdfGain +0 +32,
WebRtcIsac_kQKltCdfGain +0 +48, WebRtcIsac_kQKltCdfGain +0 +60,
WebRtcIsac_kQKltCdfGain +0 +81, WebRtcIsac_kQKltCdfGain +0 +95,
WebRtcIsac_kQKltCdfGain +0 +128, WebRtcIsac_kQKltCdfGain +0 +152,
WebRtcIsac_kQKltCdfGain +0 +210, WebRtcIsac_kQKltCdfGain +0 +264 };
const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108] = {
WebRtcIsac_kQKltCdfShape +0 +0, WebRtcIsac_kQKltCdfShape +0 +2,
WebRtcIsac_kQKltCdfShape +0 +4, WebRtcIsac_kQKltCdfShape +0 +6,
WebRtcIsac_kQKltCdfShape +0 +8, WebRtcIsac_kQKltCdfShape +0 +10,
WebRtcIsac_kQKltCdfShape +0 +12, WebRtcIsac_kQKltCdfShape +0 +14,
WebRtcIsac_kQKltCdfShape +0 +16, WebRtcIsac_kQKltCdfShape +0 +18,
WebRtcIsac_kQKltCdfShape +0 +21, WebRtcIsac_kQKltCdfShape +0 +25,
WebRtcIsac_kQKltCdfShape +0 +29, WebRtcIsac_kQKltCdfShape +0 +33,
WebRtcIsac_kQKltCdfShape +0 +37, WebRtcIsac_kQKltCdfShape +0 +43,
WebRtcIsac_kQKltCdfShape +0 +49, WebRtcIsac_kQKltCdfShape +0 +56,
WebRtcIsac_kQKltCdfShape +0 +64, WebRtcIsac_kQKltCdfShape +0 +66,
WebRtcIsac_kQKltCdfShape +0 +68, WebRtcIsac_kQKltCdfShape +0 +70,
WebRtcIsac_kQKltCdfShape +0 +72, WebRtcIsac_kQKltCdfShape +0 +75,
WebRtcIsac_kQKltCdfShape +0 +77, WebRtcIsac_kQKltCdfShape +0 +79,
WebRtcIsac_kQKltCdfShape +0 +81, WebRtcIsac_kQKltCdfShape +0 +83,
WebRtcIsac_kQKltCdfShape +0 +86, WebRtcIsac_kQKltCdfShape +0 +90,
WebRtcIsac_kQKltCdfShape +0 +94, WebRtcIsac_kQKltCdfShape +0 +98,
WebRtcIsac_kQKltCdfShape +0 +102, WebRtcIsac_kQKltCdfShape +0 +107,
WebRtcIsac_kQKltCdfShape +0 +113, WebRtcIsac_kQKltCdfShape +0 +120,
WebRtcIsac_kQKltCdfShape +0 +129, WebRtcIsac_kQKltCdfShape +0 +131,
WebRtcIsac_kQKltCdfShape +0 +133, WebRtcIsac_kQKltCdfShape +0 +135,
WebRtcIsac_kQKltCdfShape +0 +137, WebRtcIsac_kQKltCdfShape +0 +141,
WebRtcIsac_kQKltCdfShape +0 +143, WebRtcIsac_kQKltCdfShape +0 +147,
WebRtcIsac_kQKltCdfShape +0 +151, WebRtcIsac_kQKltCdfShape +0 +155,
WebRtcIsac_kQKltCdfShape +0 +159, WebRtcIsac_kQKltCdfShape +0 +164,
WebRtcIsac_kQKltCdfShape +0 +168, WebRtcIsac_kQKltCdfShape +0 +172,
WebRtcIsac_kQKltCdfShape +0 +178, WebRtcIsac_kQKltCdfShape +0 +184,
WebRtcIsac_kQKltCdfShape +0 +192, WebRtcIsac_kQKltCdfShape +0 +200,
WebRtcIsac_kQKltCdfShape +0 +211, WebRtcIsac_kQKltCdfShape +0 +213,
WebRtcIsac_kQKltCdfShape +0 +215, WebRtcIsac_kQKltCdfShape +0 +217,
WebRtcIsac_kQKltCdfShape +0 +219, WebRtcIsac_kQKltCdfShape +0 +223,
WebRtcIsac_kQKltCdfShape +0 +227, WebRtcIsac_kQKltCdfShape +0 +231,
WebRtcIsac_kQKltCdfShape +0 +235, WebRtcIsac_kQKltCdfShape +0 +239,
WebRtcIsac_kQKltCdfShape +0 +243, WebRtcIsac_kQKltCdfShape +0 +248,
WebRtcIsac_kQKltCdfShape +0 +252, WebRtcIsac_kQKltCdfShape +0 +258,
WebRtcIsac_kQKltCdfShape +0 +264, WebRtcIsac_kQKltCdfShape +0 +273,
WebRtcIsac_kQKltCdfShape +0 +282, WebRtcIsac_kQKltCdfShape +0 +293,
WebRtcIsac_kQKltCdfShape +0 +308, WebRtcIsac_kQKltCdfShape +0 +310,
WebRtcIsac_kQKltCdfShape +0 +312, WebRtcIsac_kQKltCdfShape +0 +316,
WebRtcIsac_kQKltCdfShape +0 +320, WebRtcIsac_kQKltCdfShape +0 +324,
WebRtcIsac_kQKltCdfShape +0 +328, WebRtcIsac_kQKltCdfShape +0 +332,
WebRtcIsac_kQKltCdfShape +0 +336, WebRtcIsac_kQKltCdfShape +0 +341,
WebRtcIsac_kQKltCdfShape +0 +347, WebRtcIsac_kQKltCdfShape +0 +354,
WebRtcIsac_kQKltCdfShape +0 +360, WebRtcIsac_kQKltCdfShape +0 +368,
WebRtcIsac_kQKltCdfShape +0 +378, WebRtcIsac_kQKltCdfShape +0 +388,
WebRtcIsac_kQKltCdfShape +0 +400, WebRtcIsac_kQKltCdfShape +0 +418,
WebRtcIsac_kQKltCdfShape +0 +445, WebRtcIsac_kQKltCdfShape +0 +447,
WebRtcIsac_kQKltCdfShape +0 +451, WebRtcIsac_kQKltCdfShape +0 +455,
WebRtcIsac_kQKltCdfShape +0 +461, WebRtcIsac_kQKltCdfShape +0 +468,
WebRtcIsac_kQKltCdfShape +0 +474, WebRtcIsac_kQKltCdfShape +0 +480,
WebRtcIsac_kQKltCdfShape +0 +486, WebRtcIsac_kQKltCdfShape +0 +495,
WebRtcIsac_kQKltCdfShape +0 +505, WebRtcIsac_kQKltCdfShape +0 +516,
WebRtcIsac_kQKltCdfShape +0 +528, WebRtcIsac_kQKltCdfShape +0 +543,
WebRtcIsac_kQKltCdfShape +0 +564, WebRtcIsac_kQKltCdfShape +0 +583,
WebRtcIsac_kQKltCdfShape +0 +608, WebRtcIsac_kQKltCdfShape +0 +635 };
/* left KLT transforms */
const double WebRtcIsac_kKltT1Gain[4] = {
-0.79742827, 0.60341375, 0.60341375, 0.79742827 };
const double WebRtcIsac_kKltT1Shape[324] = {
0.00159597, 0.00049320, 0.00513821, 0.00021066, 0.01338581,
-0.00422367, -0.00272072, 0.00935107, 0.02047622, 0.02691189,
0.00478236, 0.03969702, 0.00886698, 0.04877604, -0.10898362,
-0.05930891, -0.03415047, 0.98889721, 0.00293558, -0.00035282,
0.01156321, -0.00195341, -0.00937631, 0.01052213, -0.02551163,
0.01644059, 0.03189927, 0.07754773, -0.08742313, -0.03026338,
0.05136248, -0.14395974, 0.17725040, 0.22664856, 0.93380230,
0.07076411, 0.00557890, -0.00222834, 0.01377569, 0.01466808,
0.02847361, -0.00603178, 0.02382480, -0.01210452, 0.03797267,
-0.02371480, 0.11260335, -0.07366682, 0.00453436, -0.04136941,
-0.07912843, -0.95031418, 0.25295337, -0.05302216, -0.00617554,
-0.00044040, -0.00653778, 0.01097838, 0.01529174, 0.01374431,
-0.00748512, -0.00020034, 0.02432713, 0.11101570, -0.08556891,
0.09282249, -0.01029446, 0.67556443, -0.67454300, 0.06910063,
0.20866865, -0.10318050, 0.00932175, 0.00524058, 0.00803610,
-0.00594676, -0.01082578, 0.01069906, 0.00546768, 0.01565291,
0.06816200, 0.10201227, 0.16812734, 0.22984074, 0.58213170,
-0.54138651, -0.51379962, 0.06847390, -0.01920037, -0.04592324,
-0.00467394, 0.00328858, 0.00377424, -0.00987448, 0.08222096,
-0.00377301, 0.04551941, -0.02592517, 0.16317082, 0.13077530,
0.22702921, -0.31215289, -0.69645962, -0.38047101, -0.39339411,
0.11124777, 0.02508035, -0.00708074, 0.00400344, 0.00040331,
0.01142402, 0.01725406, 0.01635170, 0.14285366, 0.03949233,
-0.05905676, 0.05877154, -0.17497577, -0.32479440, 0.80754464,
-0.38085603, -0.17055430, -0.03168622, -0.07531451, 0.02942002,
-0.02148095, -0.00754114, -0.00322372, 0.00567812, -0.01701521,
-0.12358320, 0.11473564, 0.09070136, 0.06533068, -0.22560802,
0.19209022, 0.81605094, 0.36592275, -0.09919829, 0.16667122,
0.16300725, 0.04803807, 0.06739263, -0.00156752, -0.01685302,
-0.00905240, -0.02297836, -0.00469939, 0.06310613, -0.16391930,
0.10919511, 0.12529293, 0.85581322, -0.32145522, 0.24539076,
0.07181839, 0.07289591, 0.14066759, 0.10406711, 0.05815518,
0.01072680, -0.00759339, 0.00053486, -0.00044865, 0.03407361,
0.01645348, 0.08758579, 0.27722240, 0.53665485, -0.74853376,
-0.01118192, -0.19805430, 0.06130619, -0.09675299, 0.08978480,
0.03405255, -0.00706867, 0.05102045, 0.03250746, 0.01849966,
-0.01216314, -0.01184187, -0.01579288, 0.00114807, 0.11376166,
0.88342114, -0.36425379, 0.13863190, 0.12524180, -0.13553892,
0.04715856, -0.12341103, 0.04531568, 0.01899360, -0.00206897,
0.00567768, -0.01444163, 0.00411946, -0.00855896, 0.00381663,
-0.01664861, -0.05534280, 0.21328278, 0.20161162, 0.72360394,
0.59130708, -0.08043791, 0.08757349, -0.13893918, -0.05147377,
0.02680690, -0.01144070, 0.00625162, -0.00634215, -0.01248947,
-0.00329455, -0.00609625, -0.00136305, -0.05097048, -0.01029851,
0.25065384, -0.16856837, -0.07123372, 0.15992623, -0.39487617,
-0.79972301, 0.18118185, -0.04826639, -0.01805578, -0.02927253,
-0.16400618, 0.07472763, 0.10376449, 0.01705406, 0.01065801,
-0.01500498, 0.02039914, 0.37776349, -0.84484186, 0.10434286,
0.15616990, 0.13474456, -0.00906238, -0.25238368, -0.03820885,
-0.10650905, -0.03880833, -0.03660028, -0.09640894, 0.00583314,
0.01922097, 0.01489911, -0.02431117, -0.09372217, 0.39404721,
-0.84786223, -0.31277121, 0.03193850, 0.01974060, 0.01887901,
0.00337911, -0.11359599, -0.02792521, -0.03220184, -0.01533311,
0.00015962, -0.04225043, -0.00933965, 0.00675311, 0.00206060,
0.15926771, 0.40199829, -0.80792558, -0.35591604, -0.17169764,
0.02830436, 0.02459982, -0.03438589, 0.00718705, -0.01798329,
-0.01594508, -0.00702430, -0.00952419, -0.00962701, -0.01307212,
-0.01749740, 0.01299602, 0.00587270, -0.36103108, -0.82039266,
-0.43092844, -0.08500097, -0.04361674, -0.00333482, 0.01250434,
-0.02538295, -0.00921797, 0.01645071, -0.01400872, 0.00317607,
0.00003277, -0.01617646, -0.00616863, -0.00882661, 0.00466157,
0.00353237, 0.91803104, -0.39503305, -0.02048964, 0.00060125,
0.01980634, 0.00300109, 0.00313880, 0.00657337, 0.00715163,
0.00000261, 0.00854276, -0.00154825, -0.00516128, 0.00909527,
0.00095609, 0.00701196, -0.00221867, -0.00156741 };
/* right KLT transforms */
const double WebRtcIsac_kKltT2Gain[36] = {
0.14572837, -0.45446306, 0.61990621, -0.52197033, 0.32145074,
-0.11026900, -0.20698282, 0.48962182, -0.27127933, -0.33627476,
0.65094037, -0.32715751, 0.40262573, -0.47844405, -0.33876075,
0.44130653, 0.37383966, -0.39964662, -0.51730480, 0.06611973,
0.49030187, 0.47512886, -0.02141226, -0.51129451, -0.58578569,
-0.39132064, -0.13187771, 0.15649421, 0.40735596, 0.54396897,
0.40381276, 0.40904942, 0.41179766, 0.41167576, 0.40840251,
0.40468132 };
const double WebRtcIsac_kKltT2Shape[36] = {
0.13427386, -0.35132558, 0.52506528, -0.59419077, 0.45075085,
-0.16312057, 0.29857439, -0.58660147, 0.34265431, 0.20879510,
-0.56063262, 0.30238345, 0.43308283, -0.41186999, -0.35288681,
0.42768996, 0.36094634, -0.45284910, -0.47116680, 0.02893449,
0.54326135, 0.45249040, -0.06264420, -0.52283830, 0.57137758,
0.44298139, 0.12617554, -0.20819946, -0.42324603, -0.48876443,
0.39597050, 0.40713935, 0.41389880, 0.41512486, 0.41130400,
0.40575001 };
/* means of log gains and LAR coefficients*/
const double WebRtcIsac_kLpcMeansGain[12] = {
-6.86881911, -5.35075273, -6.86792680, -5.36200897, -6.86401538,
-5.36921533, -6.86802969, -5.36893966, -6.86538097, -5.36315063,
-6.85535304, -5.35155315 };
const double WebRtcIsac_kLpcMeansShape[108] = {
-0.91232981, 0.26258634, -0.33716701, 0.08477430, -0.03378426,
0.14423909, 0.07036185, 0.06155019, 0.01490385, 0.04138740,
0.01427317, 0.01288970, 0.83872106, 0.25750199, 0.07988929,
-0.01957923, 0.00831390, 0.01770300, -0.90957164, 0.25732216,
-0.33385344, 0.08735740, -0.03715332, 0.14584917, 0.06998990,
0.06131968, 0.01504379, 0.04067339, 0.01428039, 0.01406460,
0.83846243, 0.26169862, 0.08109025, -0.01767055, 0.00970539,
0.01954310, -0.90490803, 0.24656405, -0.33578607, 0.08843286,
-0.03749139, 0.14443959, 0.07214669, 0.06170993, 0.01449947,
0.04134309, 0.01314762, 0.01413471, 0.83895203, 0.26748062,
0.08197507, -0.01781298, 0.00885967, 0.01922394, -0.90922472,
0.24495889, -0.33921540, 0.08877169, -0.03581332, 0.14199172,
0.07444032, 0.06185940, 0.01502054, 0.04185113, 0.01276579,
0.01355457, 0.83645358, 0.26631720, 0.08119697, -0.01835449,
0.00788512, 0.01846446, -0.90482253, 0.24658310, -0.34019734,
0.08281090, -0.03486038, 0.14359248, 0.07401336, 0.06001471,
0.01528421, 0.04254560, 0.01321472, 0.01240799, 0.83857127,
0.26281654, 0.08174380, -0.02099842, 0.00755176, 0.01699448,
-0.90132307, 0.25174308, -0.33838268, 0.07883863, -0.02877906,
0.14105407, 0.07220290, 0.06000352, 0.01684879, 0.04226844,
0.01331331, 0.01269244, 0.83832138, 0.25467485, 0.08118028,
-0.02120528, 0.00747832, 0.01567212 };

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_tables.h
*
* header file for coding tables for the LPC coefficients
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
#include "structs.h"
#include "settings.h"
#define KLT_STEPSIZE 1.00000000
#define KLT_NUM_AVG_GAIN 0
#define KLT_NUM_AVG_SHAPE 0
#define KLT_NUM_MODELS 3
#define LPC_GAIN_SCALE 4.000f
#define LPC_LOBAND_SCALE 2.100f
#define LPC_LOBAND_ORDER ORDERLO
#define LPC_HIBAND_SCALE 0.450f
#define LPC_HIBAND_ORDER ORDERHI
#define LPC_GAIN_ORDER 2
#define LPC_SHAPE_ORDER (LPC_LOBAND_ORDER + LPC_HIBAND_ORDER)
#define KLT_ORDER_GAIN (LPC_GAIN_ORDER * SUBFRAMES)
#define KLT_ORDER_SHAPE (LPC_SHAPE_ORDER * SUBFRAMES)
/* cdf array for model indicator */
extern const uint16_t WebRtcIsac_kQKltModelCdf[KLT_NUM_MODELS+1];
/* pointer to cdf array for model indicator */
extern const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1];
/* initial cdf index for decoder of model indicator */
extern const uint16_t WebRtcIsac_kQKltModelInitIndex[1];
/* offset to go from rounded value to quantization index */
extern const short WebRtcIsac_kQKltQuantMinGain[12];
extern const short WebRtcIsac_kQKltQuantMinShape[108];
/* maximum quantization index */
extern const uint16_t WebRtcIsac_kQKltMaxIndGain[12];
extern const uint16_t WebRtcIsac_kQKltMaxIndShape[108];
/* index offset */
extern const uint16_t WebRtcIsac_kQKltOffsetGain[12];
extern const uint16_t WebRtcIsac_kQKltOffsetShape[108];
/* initial cdf index for KLT coefficients */
extern const uint16_t WebRtcIsac_kQKltInitIndexGain[12];
extern const uint16_t WebRtcIsac_kQKltInitIndexShape[108];
/* quantizer representation levels */
extern const double WebRtcIsac_kQKltLevelsGain[392];
extern const double WebRtcIsac_kQKltLevelsShape[578];
/* cdf tables for quantizer indices */
extern const uint16_t WebRtcIsac_kQKltCdfGain[404];
extern const uint16_t WebRtcIsac_kQKltCdfShape[686];
/* pointers to cdf tables for quantizer indices */
extern const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12];
extern const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108];
/* left KLT transforms */
extern const double WebRtcIsac_kKltT1Gain[4];
extern const double WebRtcIsac_kKltT1Shape[324];
/* right KLT transforms */
extern const double WebRtcIsac_kKltT2Gain[36];
extern const double WebRtcIsac_kKltT2Shape[36];
/* means of log gains and LAR coefficients */
extern const double WebRtcIsac_kLpcMeansGain[12];
extern const double WebRtcIsac_kLpcMeansShape[108];
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_ */

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
#include <math.h>
#include "webrtc/typedefs.h"
#if defined(WEBRTC_POSIX)
#define WebRtcIsac_lrint lrint
#elif (defined(WEBRTC_ARCH_X86) && defined(WIN32))
static __inline long int WebRtcIsac_lrint(double x_dbl) {
long int x_int;
__asm {
fld x_dbl
fistp x_int
};
return x_int;
}
#else // Do a slow but correct implementation of lrint
static __inline long int WebRtcIsac_lrint(double x_dbl) {
long int x_int;
x_int = (long int)floor(x_dbl + 0.499999999999);
return x_int;
}
#endif
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_

View File

@ -0,0 +1,623 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "pitch_estimator.h"
#include <math.h>
#include <memory.h>
#include <string.h>
#ifdef WEBRTC_ANDROID
#include <stdlib.h>
#endif
static const double kInterpolWin[8] = {-0.00067556028640, 0.02184247643159, -0.12203175715679, 0.60086484101160,
0.60086484101160, -0.12203175715679, 0.02184247643159, -0.00067556028640};
/* interpolation filter */
__inline static void IntrepolFilter(double *data_ptr, double *intrp)
{
*intrp = kInterpolWin[0] * data_ptr[-3];
*intrp += kInterpolWin[1] * data_ptr[-2];
*intrp += kInterpolWin[2] * data_ptr[-1];
*intrp += kInterpolWin[3] * data_ptr[0];
*intrp += kInterpolWin[4] * data_ptr[1];
*intrp += kInterpolWin[5] * data_ptr[2];
*intrp += kInterpolWin[6] * data_ptr[3];
*intrp += kInterpolWin[7] * data_ptr[4];
}
/* 2D parabolic interpolation */
/* probably some 0.5 factors can be eliminated, and the square-roots can be removed from the Cholesky fact. */
__inline static void Intrpol2D(double T[3][3], double *x, double *y, double *peak_val)
{
double c, b[2], A[2][2];
double t1, t2, d;
double delta1, delta2;
// double T[3][3] = {{-1.25, -.25,-.25}, {-.25, .75, .75}, {-.25, .75, .75}};
// should result in: delta1 = 0.5; delta2 = 0.0; peak_val = 1.0
c = T[1][1];
b[0] = 0.5 * (T[1][2] + T[2][1] - T[0][1] - T[1][0]);
b[1] = 0.5 * (T[1][0] + T[2][1] - T[0][1] - T[1][2]);
A[0][1] = -0.5 * (T[0][1] + T[2][1] - T[1][0] - T[1][2]);
t1 = 0.5 * (T[0][0] + T[2][2]) - c;
t2 = 0.5 * (T[2][0] + T[0][2]) - c;
d = (T[0][1] + T[1][2] + T[1][0] + T[2][1]) - 4.0 * c - t1 - t2;
A[0][0] = -t1 - 0.5 * d;
A[1][1] = -t2 - 0.5 * d;
/* deal with singularities or ill-conditioned cases */
if ( (A[0][0] < 1e-7) || ((A[0][0] * A[1][1] - A[0][1] * A[0][1]) < 1e-7) ) {
*peak_val = T[1][1];
return;
}
/* Cholesky decomposition: replace A by upper-triangular factor */
A[0][0] = sqrt(A[0][0]);
A[0][1] = A[0][1] / A[0][0];
A[1][1] = sqrt(A[1][1] - A[0][1] * A[0][1]);
/* compute [x; y] = -0.5 * inv(A) * b */
t1 = b[0] / A[0][0];
t2 = (b[1] - t1 * A[0][1]) / A[1][1];
delta2 = t2 / A[1][1];
delta1 = 0.5 * (t1 - delta2 * A[0][1]) / A[0][0];
delta2 *= 0.5;
/* limit norm */
t1 = delta1 * delta1 + delta2 * delta2;
if (t1 > 1.0) {
delta1 /= t1;
delta2 /= t1;
}
*peak_val = 0.5 * (b[0] * delta1 + b[1] * delta2) + c;
*x += delta1;
*y += delta2;
}
static void PCorr(const double *in, double *outcorr)
{
double sum, ysum, prod;
const double *x, *inptr;
int k, n;
//ysum = 1e-6; /* use this with float (i.s.o. double)! */
ysum = 1e-13;
sum = 0.0;
x = in + PITCH_MAX_LAG/2 + 2;
for (n = 0; n < PITCH_CORR_LEN2; n++) {
ysum += in[n] * in[n];
sum += x[n] * in[n];
}
outcorr += PITCH_LAG_SPAN2 - 1; /* index of last element in array */
*outcorr = sum / sqrt(ysum);
for (k = 1; k < PITCH_LAG_SPAN2; k++) {
ysum -= in[k-1] * in[k-1];
ysum += in[PITCH_CORR_LEN2 + k - 1] * in[PITCH_CORR_LEN2 + k - 1];
sum = 0.0;
inptr = &in[k];
prod = x[0] * inptr[0];
for (n = 1; n < PITCH_CORR_LEN2; n++) {
sum += prod;
prod = x[n] * inptr[n];
}
sum += prod;
outcorr--;
*outcorr = sum / sqrt(ysum);
}
}
void WebRtcIsac_InitializePitch(const double *in,
const double old_lag,
const double old_gain,
PitchAnalysisStruct *State,
double *lags)
{
double buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
double ratio, log_lag, gain_bias;
double bias;
double corrvec1[PITCH_LAG_SPAN2];
double corrvec2[PITCH_LAG_SPAN2];
int m, k;
// Allocating 10 extra entries at the begining of the CorrSurf
double corrSurfBuff[10 + (2*PITCH_BW+3)*(PITCH_LAG_SPAN2+4)];
double* CorrSurf[2*PITCH_BW+3];
double *CorrSurfPtr1, *CorrSurfPtr2;
double LagWin[3] = {0.2, 0.5, 0.98};
int ind1, ind2, peaks_ind, peak, max_ind;
int peaks[PITCH_MAX_NUM_PEAKS];
double adj, gain_tmp;
double corr, corr_max;
double intrp_a, intrp_b, intrp_c, intrp_d;
double peak_vals[PITCH_MAX_NUM_PEAKS];
double lags1[PITCH_MAX_NUM_PEAKS];
double lags2[PITCH_MAX_NUM_PEAKS];
double T[3][3];
int row;
for(k = 0; k < 2*PITCH_BW+3; k++)
{
CorrSurf[k] = &corrSurfBuff[10 + k * (PITCH_LAG_SPAN2+4)];
}
/* reset CorrSurf matrix */
memset(corrSurfBuff, 0, sizeof(double) * (10 + (2*PITCH_BW+3) * (PITCH_LAG_SPAN2+4)));
//warnings -DH
max_ind = 0;
peak = 0;
/* copy old values from state buffer */
memcpy(buf_dec, State->dec_buffer, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
/* decimation; put result after the old values */
WebRtcIsac_DecimateAllpass(in, State->decimator_state, PITCH_FRAME_LEN,
&buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
/* low-pass filtering */
for (k = PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2; k++)
buf_dec[k] += 0.75 * buf_dec[k-1] - 0.25 * buf_dec[k-2];
/* copy end part back into state buffer */
memcpy(State->dec_buffer, buf_dec+PITCH_FRAME_LEN/2, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
/* compute correlation for first and second half of the frame */
PCorr(buf_dec, corrvec1);
PCorr(buf_dec + PITCH_CORR_STEP2, corrvec2);
/* bias towards pitch lag of previous frame */
log_lag = log(0.5 * old_lag);
gain_bias = 4.0 * old_gain * old_gain;
if (gain_bias > 0.8) gain_bias = 0.8;
for (k = 0; k < PITCH_LAG_SPAN2; k++)
{
ratio = log((double) (k + (PITCH_MIN_LAG/2-2))) - log_lag;
bias = 1.0 + gain_bias * exp(-5.0 * ratio * ratio);
corrvec1[k] *= bias;
}
/* taper correlation functions */
for (k = 0; k < 3; k++) {
gain_tmp = LagWin[k];
corrvec1[k] *= gain_tmp;
corrvec2[k] *= gain_tmp;
corrvec1[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
corrvec2[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
}
corr_max = 0.0;
/* fill middle row of correlation surface */
ind1 = 0;
ind2 = 0;
CorrSurfPtr1 = &CorrSurf[PITCH_BW][2];
for (k = 0; k < PITCH_LAG_SPAN2; k++) {
corr = corrvec1[ind1++] + corrvec2[ind2++];
CorrSurfPtr1[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
}
}
/* fill first and last rows of correlation surface */
ind1 = 0;
ind2 = PITCH_BW;
CorrSurfPtr1 = &CorrSurf[0][2];
CorrSurfPtr2 = &CorrSurf[2*PITCH_BW][PITCH_BW+2];
for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW; k++) {
ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
adj = 0.2 * ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
CorrSurfPtr1[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
}
corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
CorrSurfPtr2[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
}
}
/* fill second and next to last rows of correlation surface */
ind1 = 0;
ind2 = PITCH_BW-1;
CorrSurfPtr1 = &CorrSurf[1][2];
CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-1][PITCH_BW+1];
for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+1; k++) {
ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
adj = 0.9 * ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
CorrSurfPtr1[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
}
corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
CorrSurfPtr2[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
}
}
/* fill remainder of correlation surface */
for (m = 2; m < PITCH_BW; m++) {
ind1 = 0;
ind2 = PITCH_BW - m; /* always larger than ind1 */
CorrSurfPtr1 = &CorrSurf[m][2];
CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-m][PITCH_BW+2-m];
for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+m; k++) {
ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
adj = ratio * (2.0 - ratio); /* adjustment factor; inverse parabola as a function of ratio */
corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
CorrSurfPtr1[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
}
corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
CorrSurfPtr2[k] = corr;
if (corr > corr_max) {
corr_max = corr; /* update maximum */
max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
}
}
}
/* threshold value to qualify as a peak */
corr_max *= 0.6;
peaks_ind = 0;
/* find peaks */
for (m = 1; m < PITCH_BW+1; m++) {
if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
CorrSurfPtr1 = &CorrSurf[m][2];
for (k = 2; k < PITCH_LAG_SPAN2-PITCH_BW-2+m; k++) {
corr = CorrSurfPtr1[k];
if (corr > corr_max) {
if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
/* found a peak; store index into matrix */
peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
}
}
}
}
}
for (m = PITCH_BW+1; m < 2*PITCH_BW; m++) {
if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
CorrSurfPtr1 = &CorrSurf[m][2];
for (k = 2+m-PITCH_BW; k < PITCH_LAG_SPAN2-2; k++) {
corr = CorrSurfPtr1[k];
if (corr > corr_max) {
if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
/* found a peak; store index into matrix */
peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
}
}
}
}
}
if (peaks_ind > 0) {
/* examine each peak */
CorrSurfPtr1 = &CorrSurf[0][0];
for (k = 0; k < peaks_ind; k++) {
peak = peaks[k];
/* compute four interpolated values around current peak */
IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)], &intrp_a);
IntrepolFilter(&CorrSurfPtr1[peak - 1 ], &intrp_b);
IntrepolFilter(&CorrSurfPtr1[peak ], &intrp_c);
IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)], &intrp_d);
/* determine maximum of the interpolated values */
corr = CorrSurfPtr1[peak];
corr_max = intrp_a;
if (intrp_b > corr_max) corr_max = intrp_b;
if (intrp_c > corr_max) corr_max = intrp_c;
if (intrp_d > corr_max) corr_max = intrp_d;
/* determine where the peak sits and fill a 3x3 matrix around it */
row = peak / (PITCH_LAG_SPAN2+4);
lags1[k] = (double) ((peak - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
lags2[k] = (double) (lags1[k] + PITCH_BW - row);
if ( corr > corr_max ) {
T[0][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
T[2][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
T[1][1] = corr;
T[0][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
T[2][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
T[1][0] = intrp_a;
T[0][1] = intrp_b;
T[2][1] = intrp_c;
T[1][2] = intrp_d;
} else {
if (intrp_a == corr_max) {
lags1[k] -= 0.5;
lags2[k] += 0.5;
IntrepolFilter(&CorrSurfPtr1[peak - 2*(PITCH_LAG_SPAN2+5)], &T[0][0]);
IntrepolFilter(&CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)], &T[2][0]);
T[1][1] = intrp_a;
T[0][2] = intrp_b;
T[2][2] = intrp_c;
T[1][0] = CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)];
T[0][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
T[2][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
T[1][2] = corr;
} else if (intrp_b == corr_max) {
lags1[k] -= 0.5;
lags2[k] -= 0.5;
IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+6)], &T[0][0]);
T[2][0] = intrp_a;
T[1][1] = intrp_b;
IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+3)], &T[0][2]);
T[2][2] = intrp_d;
T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
T[0][1] = CorrSurfPtr1[peak - 1];
T[2][1] = corr;
T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
} else if (intrp_c == corr_max) {
lags1[k] += 0.5;
lags2[k] += 0.5;
T[0][0] = intrp_a;
IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)], &T[2][0]);
T[1][1] = intrp_c;
T[0][2] = intrp_d;
IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)], &T[2][2]);
T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
T[0][1] = corr;
T[2][1] = CorrSurfPtr1[peak + 1];
T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
} else {
lags1[k] += 0.5;
lags2[k] -= 0.5;
T[0][0] = intrp_b;
T[2][0] = intrp_c;
T[1][1] = intrp_d;
IntrepolFilter(&CorrSurfPtr1[peak + 2*(PITCH_LAG_SPAN2+4)], &T[0][2]);
IntrepolFilter(&CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)], &T[2][2]);
T[1][0] = corr;
T[0][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
T[2][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
T[1][2] = CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)];
}
}
/* 2D parabolic interpolation gives more accurate lags and peak value */
Intrpol2D(T, &lags1[k], &lags2[k], &peak_vals[k]);
}
/* determine the highest peak, after applying a bias towards short lags */
corr_max = 0.0;
for (k = 0; k < peaks_ind; k++) {
corr = peak_vals[k] * pow(PITCH_PEAK_DECAY, log(lags1[k] + lags2[k]));
if (corr > corr_max) {
corr_max = corr;
peak = k;
}
}
lags1[peak] *= 2.0;
lags2[peak] *= 2.0;
if (lags1[peak] < (double) PITCH_MIN_LAG) lags1[peak] = (double) PITCH_MIN_LAG;
if (lags2[peak] < (double) PITCH_MIN_LAG) lags2[peak] = (double) PITCH_MIN_LAG;
if (lags1[peak] > (double) PITCH_MAX_LAG) lags1[peak] = (double) PITCH_MAX_LAG;
if (lags2[peak] > (double) PITCH_MAX_LAG) lags2[peak] = (double) PITCH_MAX_LAG;
/* store lags of highest peak in output array */
lags[0] = lags1[peak];
lags[1] = lags1[peak];
lags[2] = lags2[peak];
lags[3] = lags2[peak];
}
else
{
row = max_ind / (PITCH_LAG_SPAN2+4);
lags1[0] = (double) ((max_ind - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
lags2[0] = (double) (lags1[0] + PITCH_BW - row);
if (lags1[0] < (double) PITCH_MIN_LAG) lags1[0] = (double) PITCH_MIN_LAG;
if (lags2[0] < (double) PITCH_MIN_LAG) lags2[0] = (double) PITCH_MIN_LAG;
if (lags1[0] > (double) PITCH_MAX_LAG) lags1[0] = (double) PITCH_MAX_LAG;
if (lags2[0] > (double) PITCH_MAX_LAG) lags2[0] = (double) PITCH_MAX_LAG;
/* store lags of highest peak in output array */
lags[0] = lags1[0];
lags[1] = lags1[0];
lags[2] = lags2[0];
lags[3] = lags2[0];
}
}
/* create weighting matrix by orthogonalizing a basis of polynomials of increasing order
* t = (0:4)';
* A = [t.^0, t.^1, t.^2, t.^3, t.^4];
* [Q, dummy] = qr(A);
* P.Weight = Q * diag([0, .1, .5, 1, 1]) * Q'; */
static const double kWeight[5][5] = {
{ 0.29714285714286, -0.30857142857143, -0.05714285714286, 0.05142857142857, 0.01714285714286},
{-0.30857142857143, 0.67428571428571, -0.27142857142857, -0.14571428571429, 0.05142857142857},
{-0.05714285714286, -0.27142857142857, 0.65714285714286, -0.27142857142857, -0.05714285714286},
{ 0.05142857142857, -0.14571428571429, -0.27142857142857, 0.67428571428571, -0.30857142857143},
{ 0.01714285714286, 0.05142857142857, -0.05714285714286, -0.30857142857143, 0.29714285714286}
};
void WebRtcIsac_PitchAnalysis(const double *in, /* PITCH_FRAME_LEN samples */
double *out, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
PitchAnalysisStruct *State,
double *lags,
double *gains)
{
double HPin[PITCH_FRAME_LEN];
double Weighted[PITCH_FRAME_LEN];
double Whitened[PITCH_FRAME_LEN + QLOOKAHEAD];
double inbuf[PITCH_FRAME_LEN + QLOOKAHEAD];
double out_G[PITCH_FRAME_LEN + QLOOKAHEAD]; // could be removed by using out instead
double out_dG[4][PITCH_FRAME_LEN + QLOOKAHEAD];
double old_lag, old_gain;
double nrg_wht, tmp;
double Wnrg, Wfluct, Wgain;
double H[4][4];
double grad[4];
double dG[4];
int k, m, n, iter;
/* high pass filtering using second order pole-zero filter */
WebRtcIsac_Highpass(in, HPin, State->hp_state, PITCH_FRAME_LEN);
/* copy from state into buffer */
memcpy(Whitened, State->whitened_buf, sizeof(double) * QLOOKAHEAD);
/* compute weighted and whitened signals */
WebRtcIsac_WeightingFilter(HPin, &Weighted[0], &Whitened[QLOOKAHEAD], &(State->Wghtstr));
/* copy from buffer into state */
memcpy(State->whitened_buf, Whitened+PITCH_FRAME_LEN, sizeof(double) * QLOOKAHEAD);
old_lag = State->PFstr_wght.oldlagp[0];
old_gain = State->PFstr_wght.oldgainp[0];
/* inital pitch estimate */
WebRtcIsac_InitializePitch(Weighted, old_lag, old_gain, State, lags);
/* Iterative optimization of lags - to be done */
/* compute energy of whitened signal */
nrg_wht = 0.0;
for (k = 0; k < PITCH_FRAME_LEN + QLOOKAHEAD; k++)
nrg_wht += Whitened[k] * Whitened[k];
/* Iterative optimization of gains */
/* set weights for energy, gain fluctiation, and spectral gain penalty functions */
Wnrg = 1.0 / nrg_wht;
Wgain = 0.005;
Wfluct = 3.0;
/* set initial gains */
for (k = 0; k < 4; k++)
gains[k] = PITCH_MAX_GAIN_06;
/* two iterations should be enough */
for (iter = 0; iter < 2; iter++) {
/* compute Jacobian of pre-filter output towards gains */
WebRtcIsac_PitchfilterPre_gains(Whitened, out_G, out_dG, &(State->PFstr_wght), lags, gains);
/* gradient and approximate Hessian (lower triangle) for minimizing the filter's output power */
for (k = 0; k < 4; k++) {
tmp = 0.0;
for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
tmp += out_G[n] * out_dG[k][n];
grad[k] = tmp * Wnrg;
}
for (k = 0; k < 4; k++) {
for (m = 0; m <= k; m++) {
tmp = 0.0;
for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
tmp += out_dG[m][n] * out_dG[k][n];
H[k][m] = tmp * Wnrg;
}
}
/* add gradient and Hessian (lower triangle) for dampening fast gain changes */
for (k = 0; k < 4; k++) {
tmp = kWeight[k+1][0] * old_gain;
for (m = 0; m < 4; m++)
tmp += kWeight[k+1][m+1] * gains[m];
grad[k] += tmp * Wfluct;
}
for (k = 0; k < 4; k++) {
for (m = 0; m <= k; m++) {
H[k][m] += kWeight[k+1][m+1] * Wfluct;
}
}
/* add gradient and Hessian for dampening gain */
for (k = 0; k < 3; k++) {
tmp = 1.0 / (1 - gains[k]);
grad[k] += tmp * tmp * Wgain;
H[k][k] += 2.0 * tmp * (tmp * tmp * Wgain);
}
tmp = 1.0 / (1 - gains[3]);
grad[3] += 1.33 * (tmp * tmp * Wgain);
H[3][3] += 2.66 * tmp * (tmp * tmp * Wgain);
/* compute Cholesky factorization of Hessian
* by overwritting the upper triangle; scale factors on diagonal
* (for non pc-platforms store the inverse of the diagonals seperately to minimize divisions) */
H[0][1] = H[1][0] / H[0][0];
H[0][2] = H[2][0] / H[0][0];
H[0][3] = H[3][0] / H[0][0];
H[1][1] -= H[0][0] * H[0][1] * H[0][1];
H[1][2] = (H[2][1] - H[0][1] * H[2][0]) / H[1][1];
H[1][3] = (H[3][1] - H[0][1] * H[3][0]) / H[1][1];
H[2][2] -= H[0][0] * H[0][2] * H[0][2] + H[1][1] * H[1][2] * H[1][2];
H[2][3] = (H[3][2] - H[0][2] * H[3][0] - H[1][2] * H[1][1] * H[1][3]) / H[2][2];
H[3][3] -= H[0][0] * H[0][3] * H[0][3] + H[1][1] * H[1][3] * H[1][3] + H[2][2] * H[2][3] * H[2][3];
/* Compute update as delta_gains = -inv(H) * grad */
/* copy and negate */
for (k = 0; k < 4; k++)
dG[k] = -grad[k];
/* back substitution */
dG[1] -= dG[0] * H[0][1];
dG[2] -= dG[0] * H[0][2] + dG[1] * H[1][2];
dG[3] -= dG[0] * H[0][3] + dG[1] * H[1][3] + dG[2] * H[2][3];
/* scale */
for (k = 0; k < 4; k++)
dG[k] /= H[k][k];
/* back substitution */
dG[2] -= dG[3] * H[2][3];
dG[1] -= dG[3] * H[1][3] + dG[2] * H[1][2];
dG[0] -= dG[3] * H[0][3] + dG[2] * H[0][2] + dG[1] * H[0][1];
/* update gains and check range */
for (k = 0; k < 4; k++) {
gains[k] += dG[k];
if (gains[k] > PITCH_MAX_GAIN)
gains[k] = PITCH_MAX_GAIN;
else if (gains[k] < 0.0)
gains[k] = 0.0;
}
}
/* update state for next frame */
WebRtcIsac_PitchfilterPre(Whitened, out, &(State->PFstr_wght), lags, gains);
/* concatenate previous input's end and current input */
memcpy(inbuf, State->inbuf, sizeof(double) * QLOOKAHEAD);
memcpy(inbuf+QLOOKAHEAD, in, sizeof(double) * PITCH_FRAME_LEN);
/* lookahead pitch filtering for masking analysis */
WebRtcIsac_PitchfilterPre_la(inbuf, out, &(State->PFstr), lags, gains);
/* store last part of input */
for (k = 0; k < QLOOKAHEAD; k++)
State->inbuf[k] = inbuf[k + PITCH_FRAME_LEN];
}

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_estimator.h
*
* Pitch functions
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
#include "structs.h"
void WebRtcIsac_PitchAnalysis(const double *in, /* PITCH_FRAME_LEN samples */
double *out, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
PitchAnalysisStruct *State,
double *lags,
double *gains);
void WebRtcIsac_InitializePitch(const double *in,
const double old_lag,
const double old_gain,
PitchAnalysisStruct *State,
double *lags);
void WebRtcIsac_PitchfilterPre(double *indat,
double *outdat,
PitchFiltstr *pfp,
double *lags,
double *gains);
void WebRtcIsac_PitchfilterPost(double *indat,
double *outdat,
PitchFiltstr *pfp,
double *lags,
double *gains);
void WebRtcIsac_PitchfilterPre_la(double *indat,
double *outdat,
PitchFiltstr *pfp,
double *lags,
double *gains);
void WebRtcIsac_PitchfilterPre_gains(double *indat,
double *outdat,
double out_dG[][PITCH_FRAME_LEN + QLOOKAHEAD],
PitchFiltstr *pfp,
double *lags,
double *gains);
void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata);
void WebRtcIsac_Highpass(const double *in,
double *out,
double *state,
size_t N);
void WebRtcIsac_DecimateAllpass(const double *in,
double *state_in, /* array of size:
* 2*ALLPASSSECTIONS+1 */
size_t N, /* number of input samples */
double *out); /* array of size N/2 */
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "pitch_gain_tables.h"
#include "settings.h"
/* header file for coding tables for the pitch filter side-info in the entropy coder */
/********************* Pitch Filter Gain Coefficient Tables ************************/
/* cdf for quantized pitch filter gains */
const uint16_t WebRtcIsac_kQPitchGainCdf[255] = {
0, 2, 4, 6, 64, 901, 903, 905, 16954, 16956,
16961, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17411,
17514, 17516, 17583, 18790, 18796, 18802, 20760, 20777, 20782, 21722,
21724, 21728, 21738, 21740, 21742, 21744, 21746, 21748, 22224, 22227,
22230, 23214, 23229, 23239, 25086, 25108, 25120, 26088, 26094, 26098,
26175, 26177, 26179, 26181, 26183, 26185, 26484, 26507, 26522, 27705,
27731, 27750, 29767, 29799, 29817, 30866, 30883, 30885, 31025, 31029,
31031, 31033, 31035, 31037, 31114, 31126, 31134, 32687, 32722, 32767,
35718, 35742, 35757, 36943, 36952, 36954, 37115, 37128, 37130, 37132,
37134, 37136, 37143, 37145, 37152, 38843, 38863, 38897, 47458, 47467,
47474, 49040, 49061, 49063, 49145, 49157, 49159, 49161, 49163, 49165,
49167, 49169, 49171, 49757, 49770, 49782, 61333, 61344, 61346, 62860,
62883, 62885, 62887, 62889, 62891, 62893, 62895, 62897, 62899, 62901,
62903, 62905, 62907, 62909, 65496, 65498, 65500, 65521, 65523, 65525,
65527, 65529, 65531, 65533, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535};
/* index limits and ranges */
const int16_t WebRtcIsac_kIndexLowerLimitGain[3] = {
-7, -2, -1};
const int16_t WebRtcIsac_kIndexUpperLimitGain[3] = {
0, 3, 1};
const uint16_t WebRtcIsac_kIndexMultsGain[2] = {
18, 3};
/* size of cdf table */
const uint16_t WebRtcIsac_kQCdfTableSizeGain[1] = {
256};
///////////////////////////FIXED POINT
/* mean values of pitch filter gains in FIXED point */
const int16_t WebRtcIsac_kQMeanGain1Q12[144] = {
843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839, 1843, 1843, 1843, 1843, 1843,
1843, 1843, 814, 846, 1092, 1013, 1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843,
1843, 1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263, 1380, 1447, 1559, 1676,
1645, 1749, 1843, 1843, 1843, 1843, 81, 477, 563, 611, 706, 806, 849, 1012, 1192, 1128,
1330, 1489, 1425, 1576, 1826, 1741, 1843, 1843, 0, 290, 305, 356, 488, 575, 602, 741,
890, 835, 1079, 1196, 1182, 1376, 1519, 1506, 1680, 1843, 0, 47, 97, 69, 289, 381,
385, 474, 617, 664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0, 0, 0, 0,
112, 120, 190, 283, 442, 343, 526, 809, 684, 935, 1134, 1020, 1265, 1506, 0, 0,
0, 0, 0, 0, 0, 111, 256, 87, 373, 597, 430, 684, 935, 770, 1020, 1265};
const int16_t WebRtcIsac_kQMeanGain2Q12[144] = {
1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784, 1606, 1843, 1843, 1711, 1843,
1843, 1814, 1389, 1275, 1040, 1564, 1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720,
1475, 1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253, 1111, 1495, 1343, 1178,
1770, 1465, 1234, 1814, 1581, 1342, 1040, 793, 713, 1053, 895, 737, 1128, 1003, 861, 1277,
1094, 981, 1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648, 540, 948, 744,
572, 1009, 844, 636, 1234, 934, 685, 1342, 1217, 984, 537, 318, 124, 603, 423, 350,
687, 479, 322, 791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27, 0, 397,
222, 38, 513, 271, 124, 624, 325, 157, 737, 484, 233, 849, 597, 343, 27, 0,
0, 141, 0, 0, 256, 69, 0, 370, 87, 0, 484, 229, 0, 597, 343, 87};
const int16_t WebRtcIsac_kQMeanGain3Q12[144] = {
1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639, 1393, 1760, 1525, 1285, 1656,
1419, 1176, 1835, 1718, 1475, 1841, 1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299,
1040, 1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260, 1115, 1398, 1151, 1025,
1172, 1080, 790, 1176, 928, 677, 1475, 1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057,
893, 800, 1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830, 710, 875, 751,
601, 795, 642, 583, 790, 544, 475, 677, 474, 140, 987, 750, 482, 697, 573, 450,
691, 487, 303, 661, 394, 332, 537, 303, 220, 424, 168, 0, 737, 484, 229, 624,
348, 153, 441, 261, 136, 397, 166, 51, 283, 27, 0, 168, 0, 0, 484, 229,
0, 370, 57, 0, 256, 43, 0, 141, 0, 0, 27, 0, 0, 0, 0, 0};
const int16_t WebRtcIsac_kQMeanGain4Q12[144] = {
1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434, 1656, 843, 1092, 1336, 504,
757, 1007, 1843, 1843, 1843, 1838, 1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821,
1092, 249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268, 1409, 805, 961, 1131,
444, 670, 843, 0, 249, 504, 1425, 1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490,
704, 867, 81, 450, 555, 0, 0, 249, 1247, 1428, 1530, 881, 1073, 1283, 610, 759,
939, 278, 464, 645, 0, 200, 270, 0, 0, 0, 935, 1163, 1410, 528, 790, 1068,
377, 499, 717, 173, 240, 274, 0, 43, 62, 0, 0, 0, 684, 935, 1182, 343,
551, 735, 161, 262, 423, 0, 55, 27, 0, 0, 0, 0, 0, 0, 430, 684,
935, 87, 377, 597, 0, 46, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0};

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_gain_tables.h
*
* This file contains tables for the pitch filter side-info in the entropy coder.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
#include "webrtc/typedefs.h"
/* header file for coding tables for the pitch filter side-info in the entropy coder */
/********************* Pitch Filter Gain Coefficient Tables ************************/
/* cdf for quantized pitch filter gains */
extern const uint16_t WebRtcIsac_kQPitchGainCdf[255];
/* index limits and ranges */
extern const int16_t WebRtcIsac_kIndexLowerLimitGain[3];
extern const int16_t WebRtcIsac_kIndexUpperLimitGain[3];
extern const uint16_t WebRtcIsac_kIndexMultsGain[2];
/* mean values of pitch filter gains */
//(Y)
extern const int16_t WebRtcIsac_kQMeanGain1Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain2Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain3Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain4Q12[144];
//(Y)
/* size of cdf table */
extern const uint16_t WebRtcIsac_kQCdfTableSizeGain[1];
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_ */

Some files were not shown because too many files have changed in this diff Show More