Bump to WebRTC M131 release

Ongoing fixes and improvements, transient suppressor is gone. Also,
dropping isac because it doesn't seem to be useful, and is just build
system deadweight now.

Upstream references:

  Version: 131.0.6778.200
  WebRTC: 79aff54b0fa9238ce3518dd9eaf9610cd6f22e82
  Chromium: 2a19506ad24af755f2a215a4c61f775393e0db42
This commit is contained in:
Arun Raghavan 2024-12-24 19:32:07 -05:00
parent 8bdb53d91c
commit b5c48b97f6
263 changed files with 4628 additions and 20416 deletions

View File

@ -99,7 +99,6 @@ build-container-aarch64:
- DESTDIR=$PWD/_install ninja install -C builddir
# Test that the pc files are usable
- PKG_CONFIG_PATH=$PWD/_install/usr/lib/pkgconfig pkg-config --cflags --libs webrtc-audio-processing-1
- PKG_CONFIG_PATH=$PWD/_install/usr/lib/pkgconfig pkg-config --cflags --libs webrtc-audio-coding-1
artifacts:
expire_in: '5 days'
when: 'always'
@ -116,7 +115,6 @@ build-container-aarch64:
- DESTDIR=$PWD/_install ninja install -C builddir
# Test that the pc files are usable
- PKG_CONFIG_LIBDIR=$PWD/_install/usr/lib/pkgconfig pkg-config --cflags --libs webrtc-audio-processing-1
- PKG_CONFIG_LIBDIR=$PWD/_install/usr/lib/pkgconfig pkg-config --cflags --libs webrtc-audio-coding-1
artifacts:
expire_in: '5 days'
when: 'always'

View File

@ -21,10 +21,6 @@ apm_major_version = major_version
apm_minor_version = minor_version
apm_project_name = 'webrtc-audio-processing-' + apm_major_version
ac_major_version = major_version
ac_minor_version = minor_version
ac_project_name = 'webrtc-audio-coding-' + ac_major_version
include_subdir = apm_project_name
cc = meson.get_compiler('c')
@ -203,20 +199,4 @@ audio_processing_dep = declare_dependency(
meson.override_dependency(apm_project_name, audio_processing_dep)
pkgconfig.generate(
libwebrtc_audio_coding,
description: 'WebRTC Audio Coding library',
subdirs: include_subdir,
extra_cflags: [
'-DWEBRTC_LIBRARY_IMPL',
] + platform_cflags,
)
audio_coding_dep = declare_dependency(
link_with: libwebrtc_audio_coding,
include_directories: [webrtc_inc]
)
meson.override_dependency(ac_project_name, audio_coding_dep)
subdir('examples')

View File

@ -79,6 +79,9 @@ if (!build_with_chromium) {
"video:sv_loopback",
"video:video_loopback",
]
if (use_libfuzzer) {
deps += [ "test/fuzzers" ]
}
if (!is_asan) {
# Do not build :webrtc_lib_link_test because lld complains on some OS
# (e.g. when target_os = "mac") when is_asan=true. For more details,
@ -139,10 +142,6 @@ config("common_inherited_config") {
cflags = []
ldflags = []
if (rtc_jni_generator_legacy_symbols) {
defines += [ "RTC_JNI_GENERATOR_LEGACY_SYMBOLS" ]
}
if (rtc_objc_prefix != "") {
defines += [ "RTC_OBJC_TYPE_PREFIX=${rtc_objc_prefix}" ]
}
@ -174,6 +173,7 @@ config("common_inherited_config") {
defines += [ "RTC_ENABLE_WIN_WGC" ]
}
if (!rtc_use_perfetto) {
# Some tests need to declare their own trace event handlers. If this define is
# not set, the first time TRACE_EVENT_* is called it will store the return
# value for the current handler in an static variable, so that subsequent
@ -185,6 +185,7 @@ config("common_inherited_config") {
} else {
defines += [ "WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS=0" ]
}
}
if (build_with_chromium) {
defines += [ "WEBRTC_CHROMIUM_BUILD" ]
include_dirs = [
@ -269,6 +270,33 @@ config("rtc_prod_config") {
}
}
group("tracing") {
all_dependent_configs = [ "//third_party/perfetto/gn:public_config" ]
if (rtc_use_perfetto) {
if (build_with_chromium) {
public_deps = # no-presubmit-check TODO(webrtc:8603)
[ "//third_party/perfetto:libperfetto" ]
} else {
public_deps = [ # no-presubmit-check TODO(webrtc:8603)
":webrtc_libperfetto",
"//third_party/perfetto/include/perfetto/tracing",
]
}
} else {
public_deps = # no-presubmit-check TODO(webrtc:8603)
[ "//third_party/perfetto/include/perfetto/tracing" ]
}
}
if (rtc_use_perfetto) {
rtc_library("webrtc_libperfetto") {
deps = [
"//third_party/perfetto/src/tracing:client_api_without_backends",
"//third_party/perfetto/src/tracing:platform_impl",
]
}
}
config("common_config") {
cflags = []
cflags_c = []
@ -342,10 +370,6 @@ config("common_config") {
defines += [ "RTC_DISABLE_METRICS" ]
}
if (rtc_exclude_transient_suppressor) {
defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ]
}
if (rtc_exclude_audio_processing_module) {
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
}
@ -395,19 +419,11 @@ config("common_config") {
}
if (is_clang) {
cflags += [ "-Wc++11-narrowing" ]
if (!is_fuchsia) {
# Compiling with the Fuchsia SDK results in Wundef errors
# TODO(bugs.fuchsia.dev/100722): Remove from (!is_fuchsia) branch when
# Fuchsia build errors are fixed.
cflags += [ "-Wundef" ]
}
if (!is_nacl) {
# Flags NaCl (Clang 3.7) do not recognize.
cflags += [ "-Wunused-lambda-capture" ]
}
cflags += [
"-Wc++11-narrowing",
"-Wundef",
"-Wunused-lambda-capture",
]
}
if (is_win && !is_clang) {
@ -478,12 +494,28 @@ config("common_config") {
"/U_UNICODE",
]
}
if (rtc_use_perfetto) {
defines += [ "RTC_USE_PERFETTO" ]
}
}
config("common_objc") {
frameworks = [ "Foundation.framework" ]
}
if (!rtc_build_ssl) {
config("external_ssl_library") {
if (rtc_ssl_root != "") {
include_dirs = [ rtc_ssl_root ]
}
libs = [
"crypto",
"ssl",
]
}
}
if (!build_with_chromium) {
# Target to build all the WebRTC production code.
rtc_static_library("webrtc") {
@ -500,9 +532,11 @@ if (!build_with_chromium) {
deps = [
"api:create_peerconnection_factory",
"api:enable_media",
"api:libjingle_peerconnection_api",
"api:rtc_error",
"api:transport_api",
"api/audio_codecs:opus_audio_decoder_factory",
"api/crypto",
"api/rtc_event_log:rtc_event_log_factory",
"api/task_queue",
@ -588,6 +622,16 @@ if (use_libfuzzer || use_afl) {
}
if (rtc_include_tests && !build_with_chromium) {
rtc_unittests_resources = [ "resources/reference_video_640x360_30fps.y4m" ]
if (is_ios) {
bundle_data("rtc_unittests_bundle_data") {
testonly = true
sources = rtc_unittests_resources
outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
}
}
rtc_test("rtc_unittests") {
testonly = true
@ -601,12 +645,15 @@ if (rtc_include_tests && !build_with_chromium) {
"api/test/metrics:metrics_unittests",
"api/transport:stun_unittest",
"api/video/test:rtc_api_video_unittests",
"api/video_codecs:libaom_av1_encoder_factory_test",
"api/video_codecs:simple_encoder_wrapper_unittests",
"api/video_codecs/test:video_codecs_api_unittests",
"api/voip:compile_all_headers",
"call:fake_network_pipe_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
"rtc_base:async_dns_resolver_unittests",
"rtc_base:async_packet_socket_unittest",
"rtc_base:callback_list_unittests",
"rtc_base:rtc_base_approved_unittests",
"rtc_base:rtc_base_unittests",
@ -628,8 +675,17 @@ if (rtc_include_tests && !build_with_chromium) {
"test/network:network_emulation_unittests",
]
data = rtc_unittests_resources
if (rtc_enable_protobuf) {
deps += [ "logging:rtc_event_log_tests" ]
deps += [
"api/test/network_emulation:network_config_schedule_proto",
"logging:rtc_event_log_tests",
]
}
if (is_ios) {
deps += [ ":rtc_unittests_bundle_data" ]
}
if (is_android) {
@ -800,10 +856,10 @@ rtc_static_library("dcsctp") {
group("poison_audio_codecs") {
}
group("poison_default_task_queue") {
group("poison_default_echo_detector") {
}
group("poison_default_echo_detector") {
group("poison_environment_construction") {
}
group("poison_software_video_codecs") {

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include <array>
#include <cstddef>
#include <iterator>
#include <type_traits>

View File

@ -12,6 +12,13 @@
#include <string.h>
#include <cstdint>
#include <optional>
#include "api/array_view.h"
#include "api/audio/audio_view.h"
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
@ -22,6 +29,20 @@ AudioFrame::AudioFrame() {
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
AudioFrame::AudioFrame(int sample_rate_hz,
size_t num_channels,
ChannelLayout layout /*= CHANNEL_LAYOUT_UNSUPPORTED*/)
: samples_per_channel_(SampleRateToDefaultChannelSize(sample_rate_hz)),
sample_rate_hz_(sample_rate_hz),
num_channels_(num_channels),
channel_layout_(layout == CHANNEL_LAYOUT_UNSUPPORTED
? GuessChannelLayout(num_channels)
: layout) {
RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels);
RTC_DCHECK_GT(sample_rate_hz_, 0);
RTC_DCHECK_GT(samples_per_channel_, 0u);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
@ -41,7 +62,7 @@ void AudioFrame::ResetWithoutMuting() {
vad_activity_ = kVadUnknown;
profile_timestamp_ms_ = 0;
packet_infos_ = RtpPacketInfos();
absolute_capture_timestamp_ms_ = absl::nullopt;
absolute_capture_timestamp_ms_ = std::nullopt;
}
void AudioFrame::UpdateFrame(uint32_t timestamp,
@ -51,6 +72,7 @@ void AudioFrame::UpdateFrame(uint32_t timestamp,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
@ -63,9 +85,9 @@ void AudioFrame::UpdateFrame(uint32_t timestamp,
}
const size_t length = samples_per_channel * num_channels;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
RTC_CHECK_LE(length, data_.size());
if (data != nullptr) {
memcpy(data_, data, sizeof(int16_t) * length);
memcpy(data_.data(), data, sizeof(int16_t) * length);
muted_ = false;
} else {
muted_ = true;
@ -76,6 +98,16 @@ void AudioFrame::CopyFrom(const AudioFrame& src) {
if (this == &src)
return;
if (muted_ && !src.muted()) {
// TODO: bugs.webrtc.org/5647 - Since the default value for `muted_` is
// false and `data_` may still be uninitialized (because we don't initialize
// data_ as part of construction), we clear the full buffer here before
// copying over new values. If we don't, msan might complain in some tests.
// Consider locking down construction, avoiding the default constructor and
// prefering construction that initializes all state.
ClearSamples(data_);
}
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;
@ -89,11 +121,10 @@ void AudioFrame::CopyFrom(const AudioFrame& src) {
channel_layout_ = src.channel_layout_;
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
const size_t length = samples_per_channel_ * num_channels_;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
if (!src.muted()) {
memcpy(data_, src.data(), sizeof(int16_t) * length);
muted_ = false;
auto data = src.data_view();
RTC_CHECK_LE(data.size(), data_.size());
if (!muted_ && !data.empty()) {
memcpy(&data_[0], &data[0], sizeof(int16_t) * data.size());
}
}
@ -110,17 +141,56 @@ int64_t AudioFrame::ElapsedProfileTimeMs() const {
}
const int16_t* AudioFrame::data() const {
return muted_ ? empty_data() : data_;
return muted_ ? zeroed_data().begin() : data_.data();
}
InterleavedView<const int16_t> AudioFrame::data_view() const {
// If you get a nullptr from `data_view()`, it's likely because the
// samples_per_channel_ and/or num_channels_ members haven't been properly
// set. Since `data_view()` returns an InterleavedView<> (which internally
// uses rtc::ArrayView<>), we inherit the behavior in InterleavedView when the
// view size is 0 that ArrayView<>::data() returns nullptr. So, even when an
// AudioFrame is muted and we want to return `zeroed_data()`, if
// samples_per_channel_ or num_channels_ is 0, the view will point to
// nullptr.
return InterleavedView<const int16_t>(muted_ ? &zeroed_data()[0] : &data_[0],
samples_per_channel_, num_channels_);
}
// TODO(henrik.lundin) Can we skip zeroing the buffer?
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
int16_t* AudioFrame::mutable_data() {
// TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
// Consider instead if we should rather zero the buffer when `muted_` is set
// to `true`.
if (muted_) {
memset(data_, 0, kMaxDataSizeBytes);
ClearSamples(data_);
muted_ = false;
}
return data_;
return &data_[0];
}
InterleavedView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
size_t num_channels) {
const size_t total_samples = samples_per_channel * num_channels;
RTC_CHECK_LE(total_samples, data_.size());
RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
// Sanity check for valid argument values during development.
// If `samples_per_channel` is < `num_channels` but larger than 0,
// then chances are the order of arguments is incorrect.
RTC_DCHECK((samples_per_channel == 0 && num_channels == 0) ||
num_channels <= samples_per_channel)
<< "samples_per_channel=" << samples_per_channel
<< "num_channels=" << num_channels;
// TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
// Consider instead if we should rather zero the whole buffer when `muted_` is
// set to `true`.
if (muted_) {
ClearSamples(data_, total_samples);
muted_ = false;
}
samples_per_channel_ = samples_per_channel;
num_channels_ = num_channels;
return InterleavedView<int16_t>(&data_[0], samples_per_channel, num_channels);
}
void AudioFrame::Mute() {
@ -131,10 +201,35 @@ bool AudioFrame::muted() const {
return muted_;
}
void AudioFrame::SetLayoutAndNumChannels(ChannelLayout layout,
size_t num_channels) {
channel_layout_ = layout;
num_channels_ = num_channels;
#if RTC_DCHECK_IS_ON
// Do a sanity check that the layout and num_channels match.
// If this lookup yield 0u, then the layout is likely CHANNEL_LAYOUT_DISCRETE.
auto expected_num_channels = ChannelLayoutToChannelCount(layout);
if (expected_num_channels) { // If expected_num_channels is 0
RTC_DCHECK_EQ(expected_num_channels, num_channels_);
}
#endif
RTC_CHECK_LE(samples_per_channel_ * num_channels_, data_.size());
}
void AudioFrame::SetSampleRateAndChannelSize(int sample_rate) {
sample_rate_hz_ = sample_rate;
// We could call `AudioProcessing::GetFrameSize()` here, but that requires
// adding a dependency on the ":audio_processing" build target, which can
// complicate the dependency tree. Some refactoring is probably in order to
// get some consistency around this since there are many places across the
// code that assume this default buffer size.
samples_per_channel_ = SampleRateToDefaultChannelSize(sample_rate_hz_);
}
// static
const int16_t* AudioFrame::empty_data() {
rtc::ArrayView<const int16_t> AudioFrame::zeroed_data() {
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
return &null_data[0];
return rtc::ArrayView<const int16_t>(null_data, kMaxDataSizeSamples);
}
} // namespace webrtc

View File

@ -14,11 +14,34 @@
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <optional>
#include "api/array_view.h"
#include "api/audio/audio_view.h"
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/checks.h"
namespace webrtc {
// Default webrtc buffer size in milliseconds.
constexpr size_t kDefaultAudioBufferLengthMs = 10u;
// Default total number of audio buffers per second based on the default length.
constexpr size_t kDefaultAudioBuffersPerSec =
1000u / kDefaultAudioBufferLengthMs;
// Returns the number of samples a buffer needs to hold for ~10ms of a single
// audio channel at a given sample rate.
// See also `AudioProcessing::GetFrameSize()`.
inline size_t SampleRateToDefaultChannelSize(size_t sample_rate) {
// Basic sanity check. 192kHz is the highest supported input sample rate.
RTC_DCHECK_LE(sample_rate, 192000);
return sample_rate / kDefaultAudioBuffersPerSec;
}
/////////////////////////////////////////////////////////////////////
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
@ -57,6 +80,15 @@ class AudioFrame {
AudioFrame();
// Construct an audio frame with frame length properties and channel
// information. `samples_per_channel()` will be initialized to a 10ms buffer
// size and if `layout` is not specified (default value of
// CHANNEL_LAYOUT_UNSUPPORTED is set), then the channel layout is derived
// (guessed) from `num_channels`.
AudioFrame(int sample_rate_hz,
size_t num_channels,
ChannelLayout layout = CHANNEL_LAYOUT_UNSUPPORTED);
AudioFrame(const AudioFrame&) = delete;
AudioFrame& operator=(const AudioFrame&) = delete;
@ -68,6 +100,7 @@ class AudioFrame {
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
// TODO: b/335805780 - Accept InterleavedView.
void UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
@ -90,20 +123,40 @@ class AudioFrame {
int64_t ElapsedProfileTimeMs() const;
// data() returns a zeroed static buffer if the frame is muted.
// mutable_frame() always returns a non-static buffer; the first call to
// mutable_frame() zeros the non-static buffer and marks the frame unmuted.
// TODO: b/335805780 - Return InterleavedView.
const int16_t* data() const;
// Returns a read-only view of all the valid samples held by the AudioFrame.
// For a muted AudioFrame, the samples will all be 0.
InterleavedView<const int16_t> data_view() const;
// mutable_frame() always returns a non-static buffer; the first call to
// mutable_frame() zeros the buffer and marks the frame as unmuted.
// TODO: b/335805780 - Return an InterleavedView.
int16_t* mutable_data();
// Grants write access to the audio buffer. The size of the returned writable
// view is determined by the `samples_per_channel` and `num_channels`
// dimensions which the function checks for correctness and stores in the
// internal member variables; `samples_per_channel()` and `num_channels()`
// respectively.
// If the state is currently muted, the returned view will be zeroed out.
InterleavedView<int16_t> mutable_data(size_t samples_per_channel,
size_t num_channels);
// Prefer to mute frames using AudioFrameOperations::Mute.
void Mute();
// Frame is muted by default.
bool muted() const;
size_t max_16bit_samples() const { return kMaxDataSizeSamples; }
size_t max_16bit_samples() const { return data_.size(); }
size_t samples_per_channel() const { return samples_per_channel_; }
size_t num_channels() const { return num_channels_; }
ChannelLayout channel_layout() const { return channel_layout_; }
// Sets the `channel_layout` property as well as `num_channels`.
void SetLayoutAndNumChannels(ChannelLayout layout, size_t num_channels);
int sample_rate_hz() const { return sample_rate_hz_; }
void set_absolute_capture_timestamp_ms(
@ -111,10 +164,14 @@ class AudioFrame {
absolute_capture_timestamp_ms_ = absolute_capture_time_stamp_ms;
}
absl::optional<int64_t> absolute_capture_timestamp_ms() const {
std::optional<int64_t> absolute_capture_timestamp_ms() const {
return absolute_capture_timestamp_ms_;
}
// Sets the sample_rate_hz and samples_per_channel properties based on a
// given sample rate and calculates a default 10ms samples_per_channel value.
void SetSampleRateAndChannelSize(int sample_rate);
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
@ -126,14 +183,13 @@ class AudioFrame {
size_t samples_per_channel_ = 0;
int sample_rate_hz_ = 0;
size_t num_channels_ = 0;
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
SpeechType speech_type_ = kUndefined;
VADActivity vad_activity_ = kVadUnknown;
// Monotonically increasing timestamp intended for profiling of audio frames.
// Typically used for measuring elapsed time between two different points in
// the audio path. No lock is used to save resources and we are thread safe
// by design.
// TODO(nisse@webrtc.org): consider using absl::optional.
// TODO(nisse@webrtc.org): consider using std::optional.
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
@ -154,18 +210,19 @@ class AudioFrame {
private:
// A permanently zeroed out buffer to represent muted frames. This is a
// header-only class, so the only way to avoid creating a separate empty
// header-only class, so the only way to avoid creating a separate zeroed
// buffer per translation unit is to wrap a static in an inline function.
static const int16_t* empty_data();
static rtc::ArrayView<const int16_t> zeroed_data();
int16_t data_[kMaxDataSizeSamples];
std::array<int16_t, kMaxDataSizeSamples> data_;
bool muted_ = true;
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in `packet_infos_`.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
std::optional<int64_t> absolute_capture_timestamp_ms_;
};
} // namespace webrtc

View File

@ -8,10 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/include/audio_processing.h"
#include "api/audio/audio_processing.h"
#include <string>
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/system/arch.h"
namespace webrtc {
namespace {

View File

@ -0,0 +1,944 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_PROCESSING_H_
#define API_AUDIO_AUDIO_PROCESSING_H_
// MSVC++ requires this to be set before any other includes to get M_PI.
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#include <stddef.h> // size_t
#include <stdio.h> // FILE
#include <string.h>
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "api/audio/audio_processing_statistics.h"
#include "api/audio/echo_control.h"
#include "api/ref_count.h"
#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_base.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class AecDump;
class AudioBuffer;
class StreamConfig;
class ProcessingConfig;
class EchoDetector;
// The Audio Processing Module (APM) provides a collection of voice processing
// components designed for real-time communications software.
//
// APM operates on two audio streams on a frame-by-frame basis. Frames of the
// primary stream, on which all processing is applied, are passed to
// `ProcessStream()`. Frames of the reverse direction stream are passed to
// `ProcessReverseStream()`. On the client-side, this will typically be the
// near-end (capture) and far-end (render) streams, respectively. APM should be
// placed in the signal chain as close to the audio hardware abstraction layer
// (HAL) as possible.
//
// On the server-side, the reverse stream will normally not be used, with
// processing occurring on each incoming stream.
//
// Component interfaces follow a similar pattern and are accessed through
// corresponding getters in APM. All components are disabled at create-time,
// with default settings that are recommended for most situations. New settings
// can be applied without enabling a component. Enabling a component triggers
// memory allocation and initialization to allow it to start processing the
// streams.
//
// Thread safety is provided with the following assumptions to reduce locking
// overhead:
// 1. The stream getters and setters are called from the same thread as
// ProcessStream(). More precisely, stream functions are never called
// concurrently with ProcessStream().
// 2. Parameter getters are never called concurrently with the corresponding
// setter.
//
// APM accepts only linear PCM audio data in chunks of ~10 ms (see
// AudioProcessing::GetFrameSize() for details) and sample rates ranging from
// 8000 Hz to 384000 Hz. The int16 interfaces use interleaved data, while the
// float interfaces use deinterleaved data.
//
// Usage example, omitting error checking:
// rtc::scoped_refptr<AudioProcessing> apm = AudioProcessingBuilder().Create();
//
// AudioProcessing::Config config;
// config.echo_canceller.enabled = true;
// config.echo_canceller.mobile_mode = false;
//
// config.gain_controller1.enabled = true;
// config.gain_controller1.mode =
// AudioProcessing::Config::GainController1::kAdaptiveAnalog;
// config.gain_controller1.analog_level_minimum = 0;
// config.gain_controller1.analog_level_maximum = 255;
//
// config.gain_controller2.enabled = true;
//
// config.high_pass_filter.enabled = true;
//
// apm->ApplyConfig(config)
//
// // Start a voice call...
//
// // ... Render frame arrives bound for the audio HAL ...
// apm->ProcessReverseStream(render_frame);
//
// // ... Capture frame arrives from the audio HAL ...
// // Call required set_stream_ functions.
// apm->set_stream_delay_ms(delay_ms);
// apm->set_stream_analog_level(analog_level);
//
// apm->ProcessStream(capture_frame);
//
// // Call required stream_ functions.
// analog_level = apm->recommended_stream_analog_level();
// has_voice = apm->stream_has_voice();
//
// // Repeat render and capture processing for the duration of the call...
// // Start a new call...
// apm->Initialize();
//
// // Close the application...
// apm.reset();
//
class RTC_EXPORT AudioProcessing : public RefCountInterface {
public:
// The struct below constitutes the new parameter scheme for the audio
// processing. It is being introduced gradually and until it is fully
// introduced, it is prone to change.
// TODO(peah): Remove this comment once the new config scheme is fully rolled
// out.
//
// The parameters and behavior of the audio processing module are controlled
// by changing the default values in the AudioProcessing::Config struct.
// The config is applied by passing the struct to the ApplyConfig method.
//
// This config is intended to be used during setup, and to enable/disable
// top-level processing effects. Use during processing may cause undesired
// submodule resets, affecting the audio quality. Use the RuntimeSetting
// construct for runtime configuration.
struct RTC_EXPORT Config {
// Sets the properties of the audio processing pipeline.
struct RTC_EXPORT Pipeline {
// Ways to downmix a multi-channel track to mono.
enum class DownmixMethod {
kAverageChannels, // Average across channels.
kUseFirstChannel // Use the first channel.
};
// Maximum allowed processing rate used internally. May only be set to
// 32000 or 48000 and any differing values will be treated as 48000.
int maximum_internal_processing_rate = 48000;
// Allow multi-channel processing of render audio.
bool multi_channel_render = false;
// Allow multi-channel processing of capture audio when AEC3 is active
// or a custom AEC is injected..
bool multi_channel_capture = false;
// Indicates how to downmix multi-channel capture audio to mono (when
// needed).
DownmixMethod capture_downmix_method = DownmixMethod::kAverageChannels;
} pipeline;
// Enabled the pre-amplifier. It amplifies the capture signal
// before any other processing is done.
// TODO(webrtc:5298): Deprecate and use the pre-gain functionality in
// capture_level_adjustment instead.
struct PreAmplifier {
bool enabled = false;
float fixed_gain_factor = 1.0f;
} pre_amplifier;
// Functionality for general level adjustment in the capture pipeline. This
// should not be used together with the legacy PreAmplifier functionality.
struct CaptureLevelAdjustment {
bool operator==(const CaptureLevelAdjustment& rhs) const;
bool operator!=(const CaptureLevelAdjustment& rhs) const {
return !(*this == rhs);
}
bool enabled = false;
// The `pre_gain_factor` scales the signal before any processing is done.
float pre_gain_factor = 1.0f;
// The `post_gain_factor` scales the signal after all processing is done.
float post_gain_factor = 1.0f;
struct AnalogMicGainEmulation {
bool operator==(const AnalogMicGainEmulation& rhs) const;
bool operator!=(const AnalogMicGainEmulation& rhs) const {
return !(*this == rhs);
}
bool enabled = false;
// Initial analog gain level to use for the emulated analog gain. Must
// be in the range [0...255].
int initial_level = 255;
} analog_mic_gain_emulation;
} capture_level_adjustment;
struct HighPassFilter {
bool enabled = false;
bool apply_in_full_band = true;
} high_pass_filter;
struct EchoCanceller {
bool enabled = false;
bool mobile_mode = false;
bool export_linear_aec_output = false;
// Enforce the highpass filter to be on (has no effect for the mobile
// mode).
bool enforce_high_pass_filtering = true;
} echo_canceller;
// Enables background noise suppression.
struct NoiseSuppression {
bool enabled = false;
enum Level { kLow, kModerate, kHigh, kVeryHigh };
Level level = kModerate;
bool analyze_linear_aec_output_when_available = false;
} noise_suppression;
// TODO(bugs.webrtc.org/357281131): Deprecated. Stop using and remove.
// Enables transient suppression.
struct TransientSuppression {
bool enabled = false;
} transient_suppression;
// Enables automatic gain control (AGC) functionality.
// The automatic gain control (AGC) component brings the signal to an
// appropriate range. This is done by applying a digital gain directly and,
// in the analog mode, prescribing an analog gain to be applied at the audio
// HAL.
// Recommended to be enabled on the client-side.
struct RTC_EXPORT GainController1 {
bool operator==(const GainController1& rhs) const;
bool operator!=(const GainController1& rhs) const {
return !(*this == rhs);
}
bool enabled = false;
enum Mode {
// Adaptive mode intended for use if an analog volume control is
// available on the capture device. It will require the user to provide
// coupling between the OS mixer controls and AGC through the
// stream_analog_level() functions.
// It consists of an analog gain prescription for the audio device and a
// digital compression stage.
kAdaptiveAnalog,
// Adaptive mode intended for situations in which an analog volume
// control is unavailable. It operates in a similar fashion to the
// adaptive analog mode, but with scaling instead applied in the digital
// domain. As with the analog mode, it additionally uses a digital
// compression stage.
kAdaptiveDigital,
// Fixed mode which enables only the digital compression stage also used
// by the two adaptive modes.
// It is distinguished from the adaptive modes by considering only a
// short time-window of the input signal. It applies a fixed gain
// through most of the input level range, and compresses (gradually
// reduces gain with increasing level) the input signal at higher
// levels. This mode is preferred on embedded devices where the capture
// signal level is predictable, so that a known gain can be applied.
kFixedDigital
};
Mode mode = kAdaptiveAnalog;
// Sets the target peak level (or envelope) of the AGC in dBFs (decibels
// from digital full-scale). The convention is to use positive values. For
// instance, passing in a value of 3 corresponds to -3 dBFs, or a target
// level 3 dB below full-scale. Limited to [0, 31].
int target_level_dbfs = 3;
// Sets the maximum gain the digital compression stage may apply, in dB. A
// higher number corresponds to greater compression, while a value of 0
// will leave the signal uncompressed. Limited to [0, 90].
// For updates after APM setup, use a RuntimeSetting instead.
int compression_gain_db = 9;
// When enabled, the compression stage will hard limit the signal to the
// target level. Otherwise, the signal will be compressed but not limited
// above the target level.
bool enable_limiter = true;
// Enables the analog gain controller functionality.
struct AnalogGainController {
bool enabled = true;
// TODO(bugs.webrtc.org/7494): Deprecated. Stop using and remove.
int startup_min_volume = 0;
// Lowest analog microphone level that will be applied in response to
// clipping.
int clipped_level_min = 70;
// If true, an adaptive digital gain is applied.
bool enable_digital_adaptive = true;
// Amount the microphone level is lowered with every clipping event.
// Limited to (0, 255].
int clipped_level_step = 15;
// Proportion of clipped samples required to declare a clipping event.
// Limited to (0.f, 1.f).
float clipped_ratio_threshold = 0.1f;
// Time in frames to wait after a clipping event before checking again.
// Limited to values higher than 0.
int clipped_wait_frames = 300;
// Enables clipping prediction functionality.
struct ClippingPredictor {
bool enabled = false;
enum Mode {
// Clipping event prediction mode with fixed step estimation.
kClippingEventPrediction,
// Clipped peak estimation mode with adaptive step estimation.
kAdaptiveStepClippingPeakPrediction,
// Clipped peak estimation mode with fixed step estimation.
kFixedStepClippingPeakPrediction,
};
Mode mode = kClippingEventPrediction;
// Number of frames in the sliding analysis window.
int window_length = 5;
// Number of frames in the sliding reference window.
int reference_window_length = 5;
// Reference window delay (unit: number of frames).
int reference_window_delay = 5;
// Clipping prediction threshold (dBFS).
float clipping_threshold = -1.0f;
// Crest factor drop threshold (dB).
float crest_factor_margin = 3.0f;
// If true, the recommended clipped level step is used to modify the
// analog gain. Otherwise, the predictor runs without affecting the
// analog gain.
bool use_predicted_step = true;
} clipping_predictor;
} analog_gain_controller;
} gain_controller1;
// Parameters for AGC2, an Automatic Gain Control (AGC) sub-module which
// replaces the AGC sub-module parametrized by `gain_controller1`.
// AGC2 brings the captured audio signal to the desired level by combining
// three different controllers (namely, input volume controller, adapative
// digital controller and fixed digital controller) and a limiter.
// TODO(bugs.webrtc.org:7494): Name `GainController` when AGC1 removed.
struct RTC_EXPORT GainController2 {
bool operator==(const GainController2& rhs) const;
bool operator!=(const GainController2& rhs) const {
return !(*this == rhs);
}
// AGC2 must be created if and only if `enabled` is true.
bool enabled = false;
// Parameters for the input volume controller, which adjusts the input
// volume applied when the audio is captured (e.g., microphone volume on
// a soundcard, input volume on HAL).
struct InputVolumeController {
bool operator==(const InputVolumeController& rhs) const;
bool operator!=(const InputVolumeController& rhs) const {
return !(*this == rhs);
}
bool enabled = false;
} input_volume_controller;
// Parameters for the adaptive digital controller, which adjusts and
// applies a digital gain after echo cancellation and after noise
// suppression.
struct RTC_EXPORT AdaptiveDigital {
bool operator==(const AdaptiveDigital& rhs) const;
bool operator!=(const AdaptiveDigital& rhs) const {
return !(*this == rhs);
}
bool enabled = false;
float headroom_db = 5.0f;
float max_gain_db = 50.0f;
float initial_gain_db = 15.0f;
float max_gain_change_db_per_second = 6.0f;
float max_output_noise_level_dbfs = -50.0f;
} adaptive_digital;
// Parameters for the fixed digital controller, which applies a fixed
// digital gain after the adaptive digital controller and before the
// limiter.
struct FixedDigital {
// By setting `gain_db` to a value greater than zero, the limiter can be
// turned into a compressor that first applies a fixed gain.
float gain_db = 0.0f;
} fixed_digital;
} gain_controller2;
std::string ToString() const;
};
// Specifies the properties of a setting to be passed to AudioProcessing at
// runtime.
class RuntimeSetting {
public:
enum class Type {
kNotSpecified,
kCapturePreGain,
kCaptureCompressionGain,
kCaptureFixedPostGain,
kPlayoutVolumeChange,
kCustomRenderProcessingRuntimeSetting,
kPlayoutAudioDeviceChange,
kCapturePostGain,
kCaptureOutputUsed
};
// Play-out audio device properties.
struct PlayoutAudioDeviceInfo {
int id; // Identifies the audio device.
int max_volume; // Maximum play-out volume.
};
RuntimeSetting() : type_(Type::kNotSpecified), value_(0.0f) {}
~RuntimeSetting() = default;
static RuntimeSetting CreateCapturePreGain(float gain) {
return {Type::kCapturePreGain, gain};
}
static RuntimeSetting CreateCapturePostGain(float gain) {
return {Type::kCapturePostGain, gain};
}
// Corresponds to Config::GainController1::compression_gain_db, but for
// runtime configuration.
static RuntimeSetting CreateCompressionGainDb(int gain_db) {
RTC_DCHECK_GE(gain_db, 0);
RTC_DCHECK_LE(gain_db, 90);
return {Type::kCaptureCompressionGain, static_cast<float>(gain_db)};
}
// Corresponds to Config::GainController2::fixed_digital::gain_db, but for
// runtime configuration.
static RuntimeSetting CreateCaptureFixedPostGain(float gain_db) {
RTC_DCHECK_GE(gain_db, 0.0f);
RTC_DCHECK_LE(gain_db, 90.0f);
return {Type::kCaptureFixedPostGain, gain_db};
}
// Creates a runtime setting to notify play-out (aka render) audio device
// changes.
static RuntimeSetting CreatePlayoutAudioDeviceChange(
PlayoutAudioDeviceInfo audio_device) {
return {Type::kPlayoutAudioDeviceChange, audio_device};
}
// Creates a runtime setting to notify play-out (aka render) volume changes.
// `volume` is the unnormalized volume, the maximum of which
static RuntimeSetting CreatePlayoutVolumeChange(int volume) {
return {Type::kPlayoutVolumeChange, volume};
}
static RuntimeSetting CreateCustomRenderSetting(float payload) {
return {Type::kCustomRenderProcessingRuntimeSetting, payload};
}
static RuntimeSetting CreateCaptureOutputUsedSetting(
bool capture_output_used) {
return {Type::kCaptureOutputUsed, capture_output_used};
}
Type type() const { return type_; }
// Getters do not return a value but instead modify the argument to protect
// from implicit casting.
void GetFloat(float* value) const {
RTC_DCHECK(value);
*value = value_.float_value;
}
void GetInt(int* value) const {
RTC_DCHECK(value);
*value = value_.int_value;
}
void GetBool(bool* value) const {
RTC_DCHECK(value);
*value = value_.bool_value;
}
void GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo* value) const {
RTC_DCHECK(value);
*value = value_.playout_audio_device_info;
}
private:
RuntimeSetting(Type id, float value) : type_(id), value_(value) {}
RuntimeSetting(Type id, int value) : type_(id), value_(value) {}
RuntimeSetting(Type id, PlayoutAudioDeviceInfo value)
: type_(id), value_(value) {}
Type type_;
union U {
U() {}
U(int value) : int_value(value) {}
U(float value) : float_value(value) {}
U(PlayoutAudioDeviceInfo value) : playout_audio_device_info(value) {}
float float_value;
int int_value;
bool bool_value;
PlayoutAudioDeviceInfo playout_audio_device_info;
} value_;
};
~AudioProcessing() override {}
// Initializes internal states, while retaining all user settings. This
// should be called before beginning to process a new audio stream. However,
// it is not necessary to call before processing the first stream after
// creation.
//
// It is also not necessary to call if the audio parameters (sample
// rate and number of channels) have changed. Passing updated parameters
// directly to `ProcessStream()` and `ProcessReverseStream()` is permissible.
// If the parameters are known at init-time though, they may be provided.
// TODO(webrtc:5298): Change to return void.
virtual int Initialize() = 0;
// The int16 interfaces require:
// - only `NativeRate`s be used
// - that the input, output and reverse rates must match
// - that `processing_config.output_stream()` matches
// `processing_config.input_stream()`.
//
// The float interfaces accept arbitrary rates and support differing input and
// output layouts, but the output must have either one channel or the same
// number of channels as the input.
virtual int Initialize(const ProcessingConfig& processing_config) = 0;
// TODO(peah): This method is a temporary solution used to take control
// over the parameters in the audio processing module and is likely to change.
virtual void ApplyConfig(const Config& config) = 0;
// TODO(ajm): Only intended for internal use. Make private and friend the
// necessary classes?
virtual int proc_sample_rate_hz() const = 0;
virtual int proc_split_sample_rate_hz() const = 0;
virtual size_t num_input_channels() const = 0;
virtual size_t num_proc_channels() const = 0;
virtual size_t num_output_channels() const = 0;
virtual size_t num_reverse_channels() const = 0;
// Set to true when the output of AudioProcessing will be muted or in some
// other way not used. Ideally, the captured audio would still be processed,
// but some components may change behavior based on this information.
// Default false. This method takes a lock. To achieve this in a lock-less
// manner the PostRuntimeSetting can instead be used.
virtual void set_output_will_be_muted(bool muted) = 0;
// Enqueues a runtime setting.
virtual void SetRuntimeSetting(RuntimeSetting setting) = 0;
// Enqueues a runtime setting. Returns a bool indicating whether the
// enqueueing was successfull.
virtual bool PostRuntimeSetting(RuntimeSetting setting) = 0;
// Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio as
// specified in `input_config` and `output_config`. `src` and `dest` may use
// the same memory, if desired.
virtual int ProcessStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element of
// `src` points to a channel buffer, arranged according to `input_stream`. At
// output, the channels will be arranged according to `output_stream` in
// `dest`.
//
// The output must have one channel or as many channels as the input. `src`
// and `dest` may use the same memory, if desired.
virtual int ProcessStream(const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config,
float* const* dest) = 0;
// Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio for
// the reverse direction audio stream as specified in `input_config` and
// `output_config`. `src` and `dest` may use the same memory, if desired.
virtual int ProcessReverseStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element of
// `data` points to a channel buffer, arranged according to `reverse_config`.
virtual int ProcessReverseStream(const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config,
float* const* dest) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element
// of `data` points to a channel buffer, arranged according to
// `reverse_config`.
virtual int AnalyzeReverseStream(const float* const* data,
const StreamConfig& reverse_config) = 0;
// Returns the most recently produced ~10 ms of the linear AEC output at a
// rate of 16 kHz. If there is more than one capture channel, a mono
// representation of the input is returned. Returns true/false to indicate
// whether an output returned.
virtual bool GetLinearAecOutput(
rtc::ArrayView<std::array<float, 160>> linear_output) const = 0;
// This must be called prior to ProcessStream() if and only if adaptive analog
// gain control is enabled, to pass the current analog level from the audio
// HAL. Must be within the range [0, 255].
virtual void set_stream_analog_level(int level) = 0;
// When an analog mode is set, this should be called after
// `set_stream_analog_level()` and `ProcessStream()` to obtain the recommended
// new analog level for the audio HAL. It is the user's responsibility to
// apply this level.
virtual int recommended_stream_analog_level() const = 0;
// This must be called if and only if echo processing is enabled.
//
// Sets the `delay` in ms between ProcessReverseStream() receiving a far-end
// frame and ProcessStream() receiving a near-end frame containing the
// corresponding echo. On the client-side this can be expressed as
// delay = (t_render - t_analyze) + (t_process - t_capture)
// where,
// - t_analyze is the time a frame is passed to ProcessReverseStream() and
// t_render is the time the first sample of the same frame is rendered by
// the audio hardware.
// - t_capture is the time the first sample of a frame is captured by the
// audio hardware and t_process is the time the same frame is passed to
// ProcessStream().
virtual int set_stream_delay_ms(int delay) = 0;
virtual int stream_delay_ms() const = 0;
// Call to signal that a key press occurred (true) or did not occur (false)
// with this chunk of audio.
virtual void set_stream_key_pressed(bool key_pressed) = 0;
// Creates and attaches an webrtc::AecDump for recording debugging
// information.
// The `worker_queue` may not be null and must outlive the created
// AecDump instance. |max_log_size_bytes == -1| means the log size
// will be unlimited. `handle` may not be null. The AecDump takes
// responsibility for `handle` and closes it in the destructor. A
// return value of true indicates that the file has been
// sucessfully opened, while a value of false indicates that
// opening the file failed.
virtual bool CreateAndAttachAecDump(
absl::string_view file_name,
int64_t max_log_size_bytes,
absl::Nonnull<TaskQueueBase*> worker_queue) = 0;
virtual bool CreateAndAttachAecDump(
absl::Nonnull<FILE*> handle,
int64_t max_log_size_bytes,
absl::Nonnull<TaskQueueBase*> worker_queue) = 0;
// TODO(webrtc:5298) Deprecated variant.
// Attaches provided webrtc::AecDump for recording debugging
// information. Log file and maximum file size logic is supposed to
// be handled by implementing instance of AecDump. Calling this
// method when another AecDump is attached resets the active AecDump
// with a new one. This causes the d-tor of the earlier AecDump to
// be called. The d-tor call may block until all pending logging
// tasks are completed.
virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) = 0;
// If no AecDump is attached, this has no effect. If an AecDump is
// attached, it's destructor is called. The d-tor may block until
// all pending logging tasks are completed.
virtual void DetachAecDump() = 0;
// Get audio processing statistics.
virtual AudioProcessingStats GetStatistics() = 0;
// TODO(webrtc:5298) Deprecated variant. The `has_remote_tracks` argument
// should be set if there are active remote tracks (this would usually be true
// during a call). If there are no remote tracks some of the stats will not be
// set by AudioProcessing, because they only make sense if there is at least
// one remote track.
virtual AudioProcessingStats GetStatistics(bool has_remote_tracks) = 0;
// Returns the last applied configuration.
virtual AudioProcessing::Config GetConfig() const = 0;
enum Error {
// Fatal errors.
kNoError = 0,
kUnspecifiedError = -1,
kCreationFailedError = -2,
kUnsupportedComponentError = -3,
kUnsupportedFunctionError = -4,
kNullPointerError = -5,
kBadParameterError = -6,
kBadSampleRateError = -7,
kBadDataLengthError = -8,
kBadNumberChannelsError = -9,
kFileError = -10,
kStreamParameterNotSetError = -11,
kNotEnabledError = -12,
// Warnings are non-fatal.
// This results when a set_stream_ parameter is out of range. Processing
// will continue, but the parameter may have been truncated.
kBadStreamParameterWarning = -13
};
// Native rates supported by the integer interfaces.
enum NativeRate {
kSampleRate8kHz = 8000,
kSampleRate16kHz = 16000,
kSampleRate32kHz = 32000,
kSampleRate48kHz = 48000
};
// TODO(kwiberg): We currently need to support a compiler (Visual C++) that
// complains if we don't explicitly state the size of the array here. Remove
// the size when that's no longer the case.
static constexpr int kNativeSampleRatesHz[4] = {
kSampleRate8kHz, kSampleRate16kHz, kSampleRate32kHz, kSampleRate48kHz};
static constexpr size_t kNumNativeSampleRates =
arraysize(kNativeSampleRatesHz);
static constexpr int kMaxNativeSampleRateHz =
kNativeSampleRatesHz[kNumNativeSampleRates - 1];
// APM processes audio in chunks of about 10 ms. See GetFrameSize() for
// details.
static constexpr int kChunkSizeMs = 10;
// Returns floor(sample_rate_hz/100): the number of samples per channel used
// as input and output to the audio processing module in calls to
// ProcessStream, ProcessReverseStream, AnalyzeReverseStream, and
// GetLinearAecOutput.
//
// This is exactly 10 ms for sample rates divisible by 100. For example:
// - 48000 Hz (480 samples per channel),
// - 44100 Hz (441 samples per channel),
// - 16000 Hz (160 samples per channel).
//
// Sample rates not divisible by 100 are received/produced in frames of
// approximately 10 ms. For example:
// - 22050 Hz (220 samples per channel, or ~9.98 ms per frame),
// - 11025 Hz (110 samples per channel, or ~9.98 ms per frame).
// These nondivisible sample rates yield lower audio quality compared to
// multiples of 100. Internal resampling to 10 ms frames causes a simulated
// clock drift effect which impacts the performance of (for example) echo
// cancellation.
static int GetFrameSize(int sample_rate_hz) { return sample_rate_hz / 100; }
};
// Experimental interface for a custom analysis submodule.
class CustomAudioAnalyzer {
public:
// (Re-) Initializes the submodule.
virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
// Analyzes the given capture or render signal.
virtual void Analyze(const AudioBuffer* audio) = 0;
// Returns a string representation of the module state.
virtual std::string ToString() const = 0;
virtual ~CustomAudioAnalyzer() {}
};
// Interface for a custom processing submodule.
class CustomProcessing {
public:
// (Re-)Initializes the submodule.
virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
// Processes the given capture or render signal.
virtual void Process(AudioBuffer* audio) = 0;
// Returns a string representation of the module state.
virtual std::string ToString() const = 0;
// Handles RuntimeSettings. TODO(webrtc:9262): make pure virtual
// after updating dependencies.
virtual void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting);
virtual ~CustomProcessing() {}
};
class RTC_EXPORT AudioProcessingBuilder {
public:
AudioProcessingBuilder();
AudioProcessingBuilder(const AudioProcessingBuilder&) = delete;
AudioProcessingBuilder& operator=(const AudioProcessingBuilder&) = delete;
~AudioProcessingBuilder();
// Sets the APM configuration.
AudioProcessingBuilder& SetConfig(const AudioProcessing::Config& config) {
config_ = config;
return *this;
}
// Sets the echo controller factory to inject when APM is created.
AudioProcessingBuilder& SetEchoControlFactory(
std::unique_ptr<EchoControlFactory> echo_control_factory) {
echo_control_factory_ = std::move(echo_control_factory);
return *this;
}
// Sets the capture post-processing sub-module to inject when APM is created.
AudioProcessingBuilder& SetCapturePostProcessing(
std::unique_ptr<CustomProcessing> capture_post_processing) {
capture_post_processing_ = std::move(capture_post_processing);
return *this;
}
// Sets the render pre-processing sub-module to inject when APM is created.
AudioProcessingBuilder& SetRenderPreProcessing(
std::unique_ptr<CustomProcessing> render_pre_processing) {
render_pre_processing_ = std::move(render_pre_processing);
return *this;
}
// Sets the echo detector to inject when APM is created.
AudioProcessingBuilder& SetEchoDetector(
rtc::scoped_refptr<EchoDetector> echo_detector) {
echo_detector_ = std::move(echo_detector);
return *this;
}
// Sets the capture analyzer sub-module to inject when APM is created.
AudioProcessingBuilder& SetCaptureAnalyzer(
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
capture_analyzer_ = std::move(capture_analyzer);
return *this;
}
// Creates an APM instance with the specified config or the default one if
// unspecified. Injects the specified components transferring the ownership
// to the newly created APM instance - i.e., except for the config, the
// builder is reset to its initial state.
rtc::scoped_refptr<AudioProcessing> Create();
private:
AudioProcessing::Config config_;
std::unique_ptr<EchoControlFactory> echo_control_factory_;
std::unique_ptr<CustomProcessing> capture_post_processing_;
std::unique_ptr<CustomProcessing> render_pre_processing_;
rtc::scoped_refptr<EchoDetector> echo_detector_;
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
};
class StreamConfig {
public:
// sample_rate_hz: The sampling rate of the stream.
// num_channels: The number of audio channels in the stream.
StreamConfig(int sample_rate_hz = 0,
size_t num_channels = 0) // NOLINT(runtime/explicit)
: sample_rate_hz_(sample_rate_hz),
num_channels_(num_channels),
num_frames_(calculate_frames(sample_rate_hz)) {}
void set_sample_rate_hz(int value) {
sample_rate_hz_ = value;
num_frames_ = calculate_frames(value);
}
void set_num_channels(size_t value) { num_channels_ = value; }
int sample_rate_hz() const { return sample_rate_hz_; }
// The number of channels in the stream.
size_t num_channels() const { return num_channels_; }
size_t num_frames() const { return num_frames_; }
size_t num_samples() const { return num_channels_ * num_frames_; }
bool operator==(const StreamConfig& other) const {
return sample_rate_hz_ == other.sample_rate_hz_ &&
num_channels_ == other.num_channels_;
}
bool operator!=(const StreamConfig& other) const { return !(*this == other); }
private:
static size_t calculate_frames(int sample_rate_hz) {
return static_cast<size_t>(AudioProcessing::GetFrameSize(sample_rate_hz));
}
int sample_rate_hz_;
size_t num_channels_;
size_t num_frames_;
};
class ProcessingConfig {
public:
enum StreamName {
kInputStream,
kOutputStream,
kReverseInputStream,
kReverseOutputStream,
kNumStreamNames,
};
const StreamConfig& input_stream() const {
return streams[StreamName::kInputStream];
}
const StreamConfig& output_stream() const {
return streams[StreamName::kOutputStream];
}
const StreamConfig& reverse_input_stream() const {
return streams[StreamName::kReverseInputStream];
}
const StreamConfig& reverse_output_stream() const {
return streams[StreamName::kReverseOutputStream];
}
StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
StreamConfig& reverse_input_stream() {
return streams[StreamName::kReverseInputStream];
}
StreamConfig& reverse_output_stream() {
return streams[StreamName::kReverseOutputStream];
}
bool operator==(const ProcessingConfig& other) const {
for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
if (this->streams[i] != other.streams[i]) {
return false;
}
}
return true;
}
bool operator!=(const ProcessingConfig& other) const {
return !(*this == other);
}
StreamConfig streams[StreamName::kNumStreamNames];
};
// Interface for an echo detector submodule.
class EchoDetector : public RefCountInterface {
public:
// (Re-)Initializes the submodule.
virtual void Initialize(int capture_sample_rate_hz,
int num_capture_channels,
int render_sample_rate_hz,
int num_render_channels) = 0;
// Analysis (not changing) of the first channel of the render signal.
virtual void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) = 0;
// Analysis (not changing) of the capture signal.
virtual void AnalyzeCaptureAudio(
rtc::ArrayView<const float> capture_audio) = 0;
struct Metrics {
std::optional<double> echo_likelihood;
std::optional<double> echo_likelihood_recent_max;
};
// Collect current metrics from the echo detector.
virtual Metrics GetMetrics() const = 0;
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_PROCESSING_H_

View File

@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/include/audio_processing_statistics.h"
#include "api/audio/audio_processing_statistics.h"
namespace webrtc {

View File

@ -0,0 +1,68 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_PROCESSING_STATISTICS_H_
#define API_AUDIO_AUDIO_PROCESSING_STATISTICS_H_
#include <stdint.h>
#include <optional>
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// This version of the stats uses Optionals, it will replace the regular
// AudioProcessingStatistics struct.
struct RTC_EXPORT AudioProcessingStats {
AudioProcessingStats();
AudioProcessingStats(const AudioProcessingStats& other);
~AudioProcessingStats();
// Deprecated.
// TODO(bugs.webrtc.org/11226): Remove.
// True if voice is detected in the last capture frame, after processing.
// It is conservative in flagging audio as speech, with low likelihood of
// incorrectly flagging a frame as voice.
// Only reported if voice detection is enabled in AudioProcessing::Config.
std::optional<bool> voice_detected;
// AEC Statistics.
// ERL = 10log_10(P_far / P_echo)
std::optional<double> echo_return_loss;
// ERLE = 10log_10(P_echo / P_out)
std::optional<double> echo_return_loss_enhancement;
// Fraction of time that the AEC linear filter is divergent, in a 1-second
// non-overlapped aggregation window.
std::optional<double> divergent_filter_fraction;
// The delay metrics consists of the delay median and standard deviation. It
// also consists of the fraction of delay estimates that can make the echo
// cancellation perform poorly. The values are aggregated until the first
// call to `GetStatistics()` and afterwards aggregated and updated every
// second. Note that if there are several clients pulling metrics from
// `GetStatistics()` during a session the first call from any of them will
// change to one second aggregation window for all.
std::optional<int32_t> delay_median_ms;
std::optional<int32_t> delay_standard_deviation_ms;
// Residual echo detector likelihood.
std::optional<double> residual_echo_likelihood;
// Maximum residual echo likelihood from the last time period.
std::optional<double> residual_echo_likelihood_recent_max;
// The instantaneous delay estimate produced in the AEC. The unit is in
// milliseconds and the value is the instantaneous value at the time of the
// call to `GetStatistics()`.
std::optional<int32_t> delay_ms;
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_PROCESSING_STATISTICS_H_

View File

@ -0,0 +1,269 @@
/*
* Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_VIEW_H_
#define API_AUDIO_AUDIO_VIEW_H_
#include "api/array_view.h"
#include "api/audio/channel_layout.h"
#include "rtc_base/checks.h"
namespace webrtc {
// This file contains 3 types of view classes:
//
// * MonoView<>: A single channel contiguous buffer of samples.
//
// * InterleavedView<>: Channel samples are interleaved (side-by-side) in
// the buffer. A single channel InterleavedView<> is the same thing as a
// MonoView<>
//
// * DeinterleavedView<>: Each channel's samples are contiguous within the
// buffer. Channels can be enumerated and accessing the individual channel
// data is done via MonoView<>.
//
// The views are comparable to and built on rtc::ArrayView<> but add
// audio specific properties for the dimensions of the buffer and the above
// specialized [de]interleaved support.
//
// There are also a few generic utility functions that can simplify
// generic code for supporting more than one type of view.
// MonoView<> represents a view over a single contiguous, audio buffer. This
// can be either an single channel (mono) interleaved buffer (e.g. AudioFrame),
// or a de-interleaved channel (e.g. from AudioBuffer).
template <typename T>
using MonoView = rtc::ArrayView<T>;
// InterleavedView<> is a view over an interleaved audio buffer (e.g. from
// AudioFrame).
template <typename T>
class InterleavedView {
public:
using value_type = T;
InterleavedView() = default;
template <typename U>
InterleavedView(U* data, size_t samples_per_channel, size_t num_channels)
: num_channels_(num_channels),
samples_per_channel_(samples_per_channel),
data_(data, num_channels * samples_per_channel) {
RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels);
RTC_DCHECK(num_channels_ == 0u || samples_per_channel_ != 0u);
}
// Construct an InterleavedView from a C-style array. Samples per channels
// is calculated based on the array size / num_channels.
template <typename U, size_t N>
InterleavedView(U (&array)[N], // NOLINT
size_t num_channels)
: InterleavedView(array, N / num_channels, num_channels) {
RTC_DCHECK_EQ(N % num_channels, 0u);
}
template <typename U>
InterleavedView(const InterleavedView<U>& other)
: num_channels_(other.num_channels()),
samples_per_channel_(other.samples_per_channel()),
data_(other.data()) {}
size_t num_channels() const { return num_channels_; }
size_t samples_per_channel() const { return samples_per_channel_; }
rtc::ArrayView<T> data() const { return data_; }
bool empty() const { return data_.empty(); }
size_t size() const { return data_.size(); }
MonoView<T> AsMono() const {
RTC_DCHECK_EQ(num_channels(), 1u);
RTC_DCHECK_EQ(data_.size(), samples_per_channel_);
return data_;
}
// A simple wrapper around memcpy that includes checks for properties.
// TODO(tommi): Consider if this can be utility function for both interleaved
// and deinterleaved views.
template <typename U>
void CopyFrom(const InterleavedView<U>& source) {
static_assert(sizeof(T) == sizeof(U), "");
RTC_DCHECK_EQ(num_channels(), source.num_channels());
RTC_DCHECK_EQ(samples_per_channel(), source.samples_per_channel());
RTC_DCHECK_GE(data_.size(), source.data().size());
const auto data = source.data();
memcpy(&data_[0], &data[0], data.size() * sizeof(U));
}
T& operator[](size_t idx) const { return data_[idx]; }
T* begin() const { return data_.begin(); }
T* end() const { return data_.end(); }
const T* cbegin() const { return data_.cbegin(); }
const T* cend() const { return data_.cend(); }
std::reverse_iterator<T*> rbegin() const { return data_.rbegin(); }
std::reverse_iterator<T*> rend() const { return data_.rend(); }
std::reverse_iterator<const T*> crbegin() const { return data_.crbegin(); }
std::reverse_iterator<const T*> crend() const { return data_.crend(); }
private:
// TODO(tommi): Consider having these both be stored as uint16_t to
// save a few bytes per view. Use `dchecked_cast` to support size_t during
// construction.
size_t num_channels_ = 0u;
size_t samples_per_channel_ = 0u;
rtc::ArrayView<T> data_;
};
template <typename T>
class DeinterleavedView {
public:
using value_type = T;
DeinterleavedView() = default;
template <typename U>
DeinterleavedView(U* data, size_t samples_per_channel, size_t num_channels)
: num_channels_(num_channels),
samples_per_channel_(samples_per_channel),
data_(data, num_channels * samples_per_channel_) {}
template <typename U>
DeinterleavedView(const DeinterleavedView<U>& other)
: num_channels_(other.num_channels()),
samples_per_channel_(other.samples_per_channel()),
data_(other.data()) {}
// Returns a deinterleaved channel where `idx` is the zero based index,
// in the range [0 .. num_channels()-1].
MonoView<T> operator[](size_t idx) const {
RTC_DCHECK_LT(idx, num_channels_);
return MonoView<T>(&data_[idx * samples_per_channel_],
samples_per_channel_);
}
size_t num_channels() const { return num_channels_; }
size_t samples_per_channel() const { return samples_per_channel_; }
rtc::ArrayView<T> data() const { return data_; }
bool empty() const { return data_.empty(); }
size_t size() const { return data_.size(); }
// Returns the first (and possibly only) channel.
MonoView<T> AsMono() const {
RTC_DCHECK_GE(num_channels(), 1u);
return (*this)[0];
}
private:
// TODO(tommi): Consider having these be stored as uint16_t to save a few
// bytes per view. Use `dchecked_cast` to support size_t during construction.
size_t num_channels_ = 0u;
size_t samples_per_channel_ = 0u;
rtc::ArrayView<T> data_;
};
template <typename T>
constexpr size_t NumChannels(const MonoView<T>& view) {
return 1u;
}
template <typename T>
size_t NumChannels(const InterleavedView<T>& view) {
return view.num_channels();
}
template <typename T>
size_t NumChannels(const DeinterleavedView<T>& view) {
return view.num_channels();
}
template <typename T>
constexpr bool IsMono(const MonoView<T>& view) {
return true;
}
template <typename T>
constexpr bool IsInterleavedView(const MonoView<T>& view) {
return true;
}
template <typename T>
constexpr bool IsInterleavedView(const InterleavedView<T>& view) {
return true;
}
template <typename T>
constexpr bool IsInterleavedView(const DeinterleavedView<const T>& view) {
return false;
}
template <typename T>
bool IsMono(const InterleavedView<T>& view) {
return NumChannels(view) == 1u;
}
template <typename T>
bool IsMono(const DeinterleavedView<T>& view) {
return NumChannels(view) == 1u;
}
template <typename T>
size_t SamplesPerChannel(const MonoView<T>& view) {
return view.size();
}
template <typename T>
size_t SamplesPerChannel(const InterleavedView<T>& view) {
return view.samples_per_channel();
}
template <typename T>
size_t SamplesPerChannel(const DeinterleavedView<T>& view) {
return view.samples_per_channel();
}
// A simple wrapper around memcpy that includes checks for properties.
// The parameter order is the same as for memcpy(), first destination then
// source.
template <typename D, typename S>
void CopySamples(D& destination, const S& source) {
static_assert(
sizeof(typename D::value_type) == sizeof(typename S::value_type), "");
// Here we'd really like to do
// static_assert(IsInterleavedView(destination) == IsInterleavedView(source),
// "");
// but the compiler doesn't like it inside this template function for
// some reason. The following check is an approximation but unfortunately
// means that copying between a MonoView and single channel interleaved or
// deinterleaved views wouldn't work.
// static_assert(sizeof(destination) == sizeof(source),
// "Incompatible view types");
RTC_DCHECK_EQ(NumChannels(destination), NumChannels(source));
RTC_DCHECK_EQ(SamplesPerChannel(destination), SamplesPerChannel(source));
RTC_DCHECK_GE(destination.size(), source.size());
memcpy(&destination[0], &source[0],
source.size() * sizeof(typename S::value_type));
}
// Sets all the samples in a view to 0. This template function is a simple
// wrapper around `memset()` but adds the benefit of automatically calculating
// the byte size from the number of samples and sample type.
template <typename T>
void ClearSamples(T& view) {
memset(&view[0], 0, view.size() * sizeof(typename T::value_type));
}
// Same as `ClearSamples()` above but allows for clearing only the first
// `sample_count` number of samples.
template <typename T>
void ClearSamples(T& view, size_t sample_count) {
RTC_DCHECK_LE(sample_count, view.size());
memset(&view[0], 0, sample_count * sizeof(typename T::value_type));
}
} // namespace webrtc
#endif // API_AUDIO_AUDIO_VIEW_H_

View File

@ -10,10 +10,15 @@
#include "api/audio_codecs/audio_decoder.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "api/array_view.h"
#include "rtc_base/buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/sanitizer.h"
#include "rtc_base/trace_event.h"
@ -32,14 +37,14 @@ class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
return ret < 0 ? 0 : static_cast<size_t>(ret);
}
absl::optional<DecodeResult> Decode(
std::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const override {
auto speech_type = AudioDecoder::kSpeech;
const int ret = decoder_->Decode(
payload_.data(), payload_.size(), decoder_->SampleRateHz(),
decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
return ret < 0 ? absl::nullopt
: absl::optional<DecodeResult>(
return ret < 0 ? std::nullopt
: std::optional<DecodeResult>(
{static_cast<size_t>(ret), speech_type});
}

View File

@ -15,9 +15,9 @@
#include <stdint.h>
#include <memory>
#include <optional>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
@ -57,11 +57,11 @@ class AudioDecoder {
// Decodes this frame of audio and writes the result in `decoded`.
// `decoded` must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an absl::optional containing the
// call to Duration() . On success, returns an std::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
// absl::optional. Decode may be called at most once per frame object.
virtual absl::optional<DecodeResult> Decode(
// std::optional. Decode may be called at most once per frame object.
virtual std::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const = 0;
};

View File

@ -10,6 +10,15 @@
#include "api/audio_codecs/audio_encoder.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "rtc_base/buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/trace_event.h"
@ -87,12 +96,12 @@ void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, absl::nullopt);
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, std::nullopt);
}
void AudioEncoder::OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms) {}
std::optional<int64_t> bwe_period_ms) {}
void AudioEncoder::OnReceivedUplinkAllocation(BitrateAllocationUpdate update) {
OnReceivedUplinkBandwidth(update.target_bitrate.bps(),

View File

@ -11,15 +11,19 @@
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/data_rate.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
@ -35,30 +39,30 @@ struct ANAStats {
// Number of actions taken by the ANA bitrate controller since the start of
// the call. If this value is not set, it indicates that the bitrate
// controller is disabled.
absl::optional<uint32_t> bitrate_action_counter;
std::optional<uint32_t> bitrate_action_counter;
// Number of actions taken by the ANA channel controller since the start of
// the call. If this value is not set, it indicates that the channel
// controller is disabled.
absl::optional<uint32_t> channel_action_counter;
std::optional<uint32_t> channel_action_counter;
// Number of actions taken by the ANA DTX controller since the start of the
// call. If this value is not set, it indicates that the DTX controller is
// disabled.
absl::optional<uint32_t> dtx_action_counter;
std::optional<uint32_t> dtx_action_counter;
// Number of actions taken by the ANA FEC controller since the start of the
// call. If this value is not set, it indicates that the FEC controller is
// disabled.
absl::optional<uint32_t> fec_action_counter;
std::optional<uint32_t> fec_action_counter;
// Number of times the ANA frame length controller decided to increase the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_increase_counter;
std::optional<uint32_t> frame_length_increase_counter;
// Number of times the ANA frame length controller decided to decrease the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_decrease_counter;
std::optional<uint32_t> frame_length_decrease_counter;
// The uplink packet loss fractions as set by the ANA FEC controller. If this
// value is not set, it indicates that the ANA FEC controller is not active.
absl::optional<float> uplink_packet_loss_fraction;
std::optional<float> uplink_packet_loss_fraction;
};
// This is the interface class for encoders in AudioCoding module. Each codec
@ -219,7 +223,7 @@ class AudioEncoder {
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkBandwidth(int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms);
std::optional<int64_t> bwe_period_ms);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
@ -240,12 +244,19 @@ class AudioEncoder {
// Get statistics related to audio network adaptation.
virtual ANAStats GetANAStats() const;
// The range of frame lengths that are supported or nullopt if there's no sch
// information. This is used to calculated the full bitrate range, including
// overhead.
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
// The range of frame lengths that are supported or nullopt if there's no such
// information. This is used together with the bitrate range to calculate the
// full bitrate range, including overhead.
virtual std::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
// The range of payload bitrates that are supported. This is used together
// with the frame length range to calculate the full bitrate range, including
// overhead.
virtual std::optional<std::pair<DataRate, DataRate>> GetBitrateRange() const {
return std::nullopt;
}
// The maximum number of audio channels supported by WebRTC encoders.
static constexpr int kMaxNumberOfChannels = 24;

View File

@ -11,6 +11,7 @@
#ifndef API_FUNCTION_VIEW_H_
#define API_FUNCTION_VIEW_H_
#include <cstddef>
#include <type_traits>
#include <utility>

View File

@ -13,9 +13,12 @@
#include <type_traits>
#include <utility>
#include "absl/base/nullability.h"
#include "api/ref_count.h"
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counted_object.h"
namespace rtc {
namespace webrtc {
namespace webrtc_make_ref_counted_internal {
// Determines if the given class has AddRef and Release methods.
@ -83,7 +86,7 @@ template <
typename std::enable_if<std::is_convertible_v<T*, RefCountInterface*> &&
std::is_abstract_v<T>,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
absl::Nonnull<scoped_refptr<T>> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new RefCountedObject<T>(std::forward<Args>(args)...));
}
@ -96,7 +99,7 @@ template <
!std::is_convertible_v<T*, RefCountInterface*> &&
webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
absl::Nonnull<scoped_refptr<T>> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new T(std::forward<Args>(args)...));
}
@ -110,11 +113,18 @@ template <
!webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<FinalRefCountedObject<T>> make_ref_counted(Args&&... args) {
absl::Nonnull<scoped_refptr<FinalRefCountedObject<T>>> make_ref_counted(
Args&&... args) {
return scoped_refptr<FinalRefCountedObject<T>>(
new FinalRefCountedObject<T>(std::forward<Args>(args)...));
}
} // namespace webrtc
namespace rtc {
// Backwards compatibe alias.
// TODO: bugs.webrtc.org/42225969 - deprecate and remove.
using ::webrtc::make_ref_counted;
} // namespace rtc
#endif // API_MAKE_REF_COUNTED_H_

View File

@ -1,5 +1,7 @@
api_sources = [
'audio/audio_frame.cc',
'audio/audio_processing.cc',
'audio/audio_processing_statistics.cc',
'audio/channel_layout.cc',
'audio/echo_canceller3_config.cc',
'audio_codecs/audio_decoder.cc',

67
webrtc/api/ref_count.h Normal file
View File

@ -0,0 +1,67 @@
/*
* Copyright 2011 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_REF_COUNT_H_
#define API_REF_COUNT_H_
namespace webrtc {
// Refcounted objects should implement the following informal interface:
//
// void AddRef() const ;
// RefCountReleaseStatus Release() const;
//
// You may access members of a reference-counted object, including the AddRef()
// and Release() methods, only if you already own a reference to it, or if
// you're borrowing someone else's reference. (A newly created object is a
// special case: the reference count is zero on construction, and the code that
// creates the object should immediately call AddRef(), bringing the reference
// count from zero to one, e.g., by constructing an rtc::scoped_refptr).
//
// AddRef() creates a new reference to the object.
//
// Release() releases a reference to the object; the caller now has one less
// reference than before the call. Returns kDroppedLastRef if the number of
// references dropped to zero because of this (in which case the object destroys
// itself). Otherwise, returns kOtherRefsRemained, to signal that at the precise
// time the caller's reference was dropped, other references still remained (but
// if other threads own references, this may of course have changed by the time
// Release() returns).
//
// The caller of Release() must treat it in the same way as a delete operation:
// Regardless of the return value from Release(), the caller mustn't access the
// object. The object might still be alive, due to references held by other
// users of the object, but the object can go away at any time, e.g., as the
// result of another thread calling Release().
//
// Calling AddRef() and Release() manually is discouraged. It's recommended to
// use rtc::scoped_refptr to manage all pointers to reference counted objects.
// Note that rtc::scoped_refptr depends on compile-time duck-typing; formally
// implementing the below RefCountInterface is not required.
enum class RefCountReleaseStatus { kDroppedLastRef, kOtherRefsRemained };
// Interfaces where refcounting is part of the public api should
// inherit this abstract interface. The implementation of these
// methods is usually provided by the RefCountedObject template class,
// applied as a leaf in the inheritance tree.
class RefCountInterface {
public:
virtual void AddRef() const = 0;
virtual RefCountReleaseStatus Release() const = 0;
// Non-public destructor, because Release() has exclusive responsibility for
// destroying the object.
protected:
virtual ~RefCountInterface() {}
};
} // namespace webrtc
#endif // API_REF_COUNT_H_

View File

@ -12,9 +12,10 @@
#include <type_traits>
#include "api/ref_count.h"
#include "rtc_base/ref_counter.h"
namespace rtc {
namespace webrtc {
class RefCountedBase {
public:
@ -93,6 +94,14 @@ class RefCountedNonVirtual {
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
};
} // namespace webrtc
// Backwards compatibe aliases.
// TODO: https://issues.webrtc.org/42225969 - deprecate and remove.
namespace rtc {
using RefCountedBase = webrtc::RefCountedBase;
template <typename T>
using RefCountedNonVirtual = webrtc::RefCountedNonVirtual<T>;
} // namespace rtc
#endif // API_REF_COUNTED_BASE_H_

View File

@ -10,8 +10,20 @@
#include "api/rtp_headers.h"
#include "api/video/video_content_type.h"
#include "api/video/video_rotation.h"
#include "rtc_base/checks.h"
namespace webrtc {
AudioLevel::AudioLevel() : voice_activity_(false), audio_level_(0) {}
AudioLevel::AudioLevel(bool voice_activity, int audio_level)
: voice_activity_(voice_activity), audio_level_(audio_level) {
RTC_CHECK_GE(audio_level, 0);
RTC_CHECK_LE(audio_level, 127);
}
RTPHeaderExtension::RTPHeaderExtension()
: hasTransmissionTimeOffset(false),
transmissionTimeOffset(0),
@ -19,9 +31,6 @@ RTPHeaderExtension::RTPHeaderExtension()
absoluteSendTime(0),
hasTransportSequenceNumber(false),
transportSequenceNumber(0),
hasAudioLevel(false),
voiceActivity(false),
audioLevel(0),
hasVideoRotation(false),
videoRotation(kVideoRotation_0),
hasVideoContentType(false),

View File

@ -14,15 +14,16 @@
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/units/timestamp.h"
#include "api/video/color_space.h"
#include "api/video/video_content_type.h"
#include "api/video/video_rotation.h"
#include "api/video/video_timing.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -74,7 +75,30 @@ struct AbsoluteCaptureTime {
// systems NTP clock:
//
// Capture NTP Clock = Sender NTP Clock + Capture Clock Offset
absl::optional<int64_t> estimated_capture_clock_offset;
std::optional<int64_t> estimated_capture_clock_offset;
};
// The audio level extension is used to indicate the voice activity and the
// audio level of the payload in the RTP stream. See:
// https://tools.ietf.org/html/rfc6464#section-3.
class AudioLevel {
public:
AudioLevel();
AudioLevel(bool voice_activity, int audio_level);
AudioLevel(const AudioLevel& other) = default;
AudioLevel& operator=(const AudioLevel& other) = default;
// Flag indicating whether the encoder believes the audio packet contains
// voice activity.
bool voice_activity() const { return voice_activity_; }
// Audio level in -dBov. Values range from 0 to 127, representing 0 to -127
// dBov. 127 represents digital silence.
int level() const { return audio_level_; }
private:
bool voice_activity_;
int audio_level_;
};
inline bool operator==(const AbsoluteCaptureTime& lhs,
@ -107,16 +131,18 @@ struct RTPHeaderExtension {
int32_t transmissionTimeOffset;
bool hasAbsoluteSendTime;
uint32_t absoluteSendTime;
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
std::optional<AbsoluteCaptureTime> absolute_capture_time;
bool hasTransportSequenceNumber;
uint16_t transportSequenceNumber;
absl::optional<FeedbackRequest> feedback_request;
std::optional<FeedbackRequest> feedback_request;
// Audio Level includes both level in dBov and voiced/unvoiced bit. See:
// https://tools.ietf.org/html/rfc6464#section-3
bool hasAudioLevel;
bool voiceActivity;
uint8_t audioLevel;
std::optional<AudioLevel> audio_level() const { return audio_level_; }
void set_audio_level(std::optional<AudioLevel> audio_level) {
audio_level_ = audio_level;
}
// For Coordination of Video Orientation. See
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@ -124,7 +150,7 @@ struct RTPHeaderExtension {
bool hasVideoRotation;
VideoRotation videoRotation;
// TODO(ilnik): Refactor this and one above to be absl::optional() and remove
// TODO(ilnik): Refactor this and one above to be std::optional() and remove
// a corresponding bool flag.
bool hasVideoContentType;
VideoContentType videoContentType;
@ -143,7 +169,10 @@ struct RTPHeaderExtension {
// https://tools.ietf.org/html/rfc8843
std::string mid;
absl::optional<ColorSpace> color_space;
std::optional<ColorSpace> color_space;
private:
std::optional<AudioLevel> audio_level_;
};
enum { kRtpCsrcSize = 15 }; // RFC 3550 page 13

View File

@ -10,8 +10,15 @@
#include "api/rtp_packet_info.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include "api/rtp_headers.h"
#include "api/units/timestamp.h"
namespace webrtc {
@ -37,8 +44,8 @@ RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header,
csrcs_.assign(&rtp_header.arrOfCSRCs[0], &rtp_header.arrOfCSRCs[csrcs_count]);
if (extension.hasAudioLevel) {
audio_level_ = extension.audioLevel;
if (extension.audio_level()) {
audio_level_ = extension.audio_level()->level();
}
absolute_capture_time_ = extension.absolute_capture_time;

View File

@ -12,10 +12,10 @@
#define API_RTP_PACKET_INFO_H_
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
@ -56,26 +56,26 @@ class RTC_EXPORT RtpPacketInfo {
Timestamp receive_time() const { return receive_time_; }
void set_receive_time(Timestamp value) { receive_time_ = value; }
absl::optional<uint8_t> audio_level() const { return audio_level_; }
RtpPacketInfo& set_audio_level(absl::optional<uint8_t> value) {
std::optional<uint8_t> audio_level() const { return audio_level_; }
RtpPacketInfo& set_audio_level(std::optional<uint8_t> value) {
audio_level_ = value;
return *this;
}
const absl::optional<AbsoluteCaptureTime>& absolute_capture_time() const {
const std::optional<AbsoluteCaptureTime>& absolute_capture_time() const {
return absolute_capture_time_;
}
RtpPacketInfo& set_absolute_capture_time(
const absl::optional<AbsoluteCaptureTime>& value) {
const std::optional<AbsoluteCaptureTime>& value) {
absolute_capture_time_ = value;
return *this;
}
const absl::optional<TimeDelta>& local_capture_clock_offset() const {
const std::optional<TimeDelta>& local_capture_clock_offset() const {
return local_capture_clock_offset_;
}
RtpPacketInfo& set_local_capture_clock_offset(
absl::optional<TimeDelta> value) {
std::optional<TimeDelta> value) {
local_capture_clock_offset_ = value;
return *this;
}
@ -92,18 +92,18 @@ class RTC_EXPORT RtpPacketInfo {
// Fields from the Audio Level header extension:
// https://tools.ietf.org/html/rfc6464#section-3
absl::optional<uint8_t> audio_level_;
std::optional<uint8_t> audio_level_;
// Fields from the Absolute Capture Time header extension:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
absl::optional<AbsoluteCaptureTime> absolute_capture_time_;
std::optional<AbsoluteCaptureTime> absolute_capture_time_;
// Clock offset between the local clock and the capturer's clock.
// Do not confuse with `AbsoluteCaptureTime::estimated_capture_clock_offset`
// which instead represents the clock offset between a remote sender and the
// capturer. The following holds:
// Capture's NTP Clock = Local NTP Clock + Local-Capture Clock Offset
absl::optional<TimeDelta> local_capture_clock_offset_;
std::optional<TimeDelta> local_capture_clock_offset_;
};
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs);

View File

@ -11,7 +11,6 @@
#ifndef API_RTP_PACKET_INFOS_H_
#define API_RTP_PACKET_INFOS_H_
#include <cstdint>
#include <utility>
#include <vector>

View File

@ -22,13 +22,13 @@
// };
//
// void some_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// scoped_refptr<MyFoo> foo = make_ref_counted<MyFoo>();
// foo->Method(param);
// // `foo` is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// scoped_refptr<MyFoo> foo = make_ref_counted<MyFoo>();
// ...
// foo = nullptr; // explicitly releases `foo`
// ...
@ -41,7 +41,7 @@
// references between the two objects, like so:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> a = make_ref_counted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
@ -52,7 +52,7 @@
// object, simply use the assignment operator:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> a = make_ref_counted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b = a;
@ -63,20 +63,23 @@
#ifndef API_SCOPED_REFPTR_H_
#define API_SCOPED_REFPTR_H_
#include <memory>
#include <cstddef>
#include <utility>
namespace rtc {
#include "absl/base/nullability.h"
namespace webrtc {
template <class T>
class scoped_refptr {
class ABSL_NULLABILITY_COMPATIBLE scoped_refptr {
public:
typedef T element_type;
using absl_nullability_compatible = void;
using element_type = T;
scoped_refptr() : ptr_(nullptr) {}
scoped_refptr(std::nullptr_t) : ptr_(nullptr) {} // NOLINT(runtime/explicit)
explicit scoped_refptr(T* p) : ptr_(p) {
explicit scoped_refptr(absl::Nullable<T*> p) : ptr_(p) {
if (ptr_)
ptr_->AddRef();
}
@ -119,7 +122,7 @@ class scoped_refptr {
return retVal;
}
scoped_refptr<T>& operator=(T* p) {
scoped_refptr<T>& operator=(absl::Nullable<T*> p) {
// AddRef first so that self assignment should work
if (p)
p->AddRef();
@ -149,7 +152,7 @@ class scoped_refptr {
return *this;
}
void swap(T** pp) noexcept {
void swap(absl::Nonnull<T**> pp) noexcept {
T* p = ptr_;
ptr_ = *pp;
*pp = p;
@ -162,61 +165,65 @@ class scoped_refptr {
};
template <typename T, typename U>
bool operator==(const rtc::scoped_refptr<T>& a,
const rtc::scoped_refptr<U>& b) {
bool operator==(const scoped_refptr<T>& a, const scoped_refptr<U>& b) {
return a.get() == b.get();
}
template <typename T, typename U>
bool operator!=(const rtc::scoped_refptr<T>& a,
const rtc::scoped_refptr<U>& b) {
bool operator!=(const scoped_refptr<T>& a, const scoped_refptr<U>& b) {
return !(a == b);
}
template <typename T>
bool operator==(const rtc::scoped_refptr<T>& a, std::nullptr_t) {
bool operator==(const scoped_refptr<T>& a, std::nullptr_t) {
return a.get() == nullptr;
}
template <typename T>
bool operator!=(const rtc::scoped_refptr<T>& a, std::nullptr_t) {
bool operator!=(const scoped_refptr<T>& a, std::nullptr_t) {
return !(a == nullptr);
}
template <typename T>
bool operator==(std::nullptr_t, const rtc::scoped_refptr<T>& a) {
bool operator==(std::nullptr_t, const scoped_refptr<T>& a) {
return a.get() == nullptr;
}
template <typename T>
bool operator!=(std::nullptr_t, const rtc::scoped_refptr<T>& a) {
bool operator!=(std::nullptr_t, const scoped_refptr<T>& a) {
return !(a == nullptr);
}
// Comparison with raw pointer.
template <typename T, typename U>
bool operator==(const rtc::scoped_refptr<T>& a, const U* b) {
bool operator==(const scoped_refptr<T>& a, const U* b) {
return a.get() == b;
}
template <typename T, typename U>
bool operator!=(const rtc::scoped_refptr<T>& a, const U* b) {
bool operator!=(const scoped_refptr<T>& a, const U* b) {
return !(a == b);
}
template <typename T, typename U>
bool operator==(const T* a, const rtc::scoped_refptr<U>& b) {
bool operator==(const T* a, const scoped_refptr<U>& b) {
return a == b.get();
}
template <typename T, typename U>
bool operator!=(const T* a, const rtc::scoped_refptr<U>& b) {
bool operator!=(const T* a, const scoped_refptr<U>& b) {
return !(a == b);
}
// Ordered comparison, needed for use as a std::map key.
template <typename T, typename U>
bool operator<(const rtc::scoped_refptr<T>& a, const rtc::scoped_refptr<U>& b) {
bool operator<(const scoped_refptr<T>& a, const scoped_refptr<U>& b) {
return a.get() < b.get();
}
} // namespace webrtc
namespace rtc {
// Backwards compatible alias.
// TODO: bugs.webrtc.org/42225969 - Deprecate and remove.
using ::webrtc::scoped_refptr;
} // namespace rtc
#endif // API_SCOPED_REFPTR_H_

View File

@ -10,6 +10,7 @@
#ifndef API_SEQUENCE_CHECKER_H_
#define API_SEQUENCE_CHECKER_H_
#include "api/task_queue/task_queue_base.h"
#include "rtc_base/checks.h"
#include "rtc_base/synchronization/sequence_checker_internal.h"
#include "rtc_base/thread_annotations.h"

View File

@ -1,32 +0,0 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_TASK_QUEUE_QUEUED_TASK_H_
#define API_TASK_QUEUE_QUEUED_TASK_H_
namespace webrtc {
// Base interface for asynchronously executed tasks.
// The interface basically consists of a single function, Run(), that executes
// on the target queue. For more details see the Run() method and TaskQueue.
class QueuedTask {
public:
virtual ~QueuedTask() = default;
// Main routine that will run when the task is executed on the desired queue.
// The task should return |true| to indicate that it should be deleted or
// |false| to indicate that the queue should consider ownership of the task
// having been transferred. Returning |false| can be useful if a task has
// re-posted itself to a different queue or is otherwise being re-used.
virtual bool Run() = 0;
};
} // namespace webrtc
#endif // API_TASK_QUEUE_QUEUED_TASK_H_

View File

@ -11,9 +11,6 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/functional/any_invocable.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#if defined(ABSL_HAVE_THREAD_LOCAL)

View File

@ -10,7 +10,6 @@
#ifndef API_TASK_QUEUE_TASK_QUEUE_BASE_H_
#define API_TASK_QUEUE_TASK_QUEUE_BASE_H_
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"

View File

@ -10,6 +10,8 @@
#include "api/units/data_rate.h"
#include <string>
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"

View File

@ -11,10 +11,7 @@
#ifndef API_UNITS_DATA_RATE_H_
#define API_UNITS_DATA_RATE_H_
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // WEBRTC_UNIT_TEST
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
@ -23,6 +20,7 @@
#include "api/units/frequency.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -50,6 +48,9 @@ class DataRate final : public rtc_units_impl::RelativeUnit<DataRate> {
DataRate() = delete;
template <typename Sink>
friend void AbslStringify(Sink& sink, DataRate value);
template <typename T = int64_t>
constexpr T bps() const {
return ToValue<T>();
@ -137,18 +138,15 @@ inline constexpr DataRate operator*(const Frequency frequency,
return size * frequency;
}
std::string ToString(DataRate value);
RTC_EXPORT std::string ToString(DataRate value);
inline std::string ToLogString(DataRate value) {
return ToString(value);
}
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataRate value) {
return stream << ToString(value);
template <typename Sink>
void AbslStringify(Sink& sink, DataRate value) {
sink.Append(ToString(value));
}
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -10,6 +10,8 @@
#include "api/units/data_size.h"
#include <string>
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"

View File

@ -11,13 +11,11 @@
#ifndef API_UNITS_DATA_SIZE_H_
#define API_UNITS_DATA_SIZE_H_
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // WEBRTC_UNIT_TEST
#include <cstdint>
#include <string>
#include <type_traits>
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -33,6 +31,9 @@ class DataSize final : public rtc_units_impl::RelativeUnit<DataSize> {
DataSize() = delete;
template <typename Sink>
friend void AbslStringify(Sink& sink, DataSize value);
template <typename T = int64_t>
constexpr T bytes() const {
return ToValue<T>();
@ -48,18 +49,15 @@ class DataSize final : public rtc_units_impl::RelativeUnit<DataSize> {
static constexpr bool one_sided = true;
};
std::string ToString(DataSize value);
RTC_EXPORT std::string ToString(DataSize value);
inline std::string ToLogString(DataSize value) {
return ToString(value);
}
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataSize value) {
return stream << ToString(value);
template <typename Sink>
void AbslStringify(Sink& sink, DataSize value) {
sink.Append(ToString(value));
}
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -9,6 +9,9 @@
*/
#include "api/units/frequency.h"
#include <cstdint>
#include <string>
#include "rtc_base/strings/string_builder.h"
namespace webrtc {

View File

@ -10,16 +10,15 @@
#ifndef API_UNITS_FREQUENCY_H_
#define API_UNITS_FREQUENCY_H_
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // WEBRTC_UNIT_TEST
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <string>
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -44,6 +43,9 @@ class Frequency final : public rtc_units_impl::RelativeUnit<Frequency> {
Frequency() = delete;
template <typename Sink>
friend void AbslStringify(Sink& sink, Frequency value);
template <typename T = int64_t>
constexpr T hertz() const {
return ToFraction<1000, T>();
@ -84,18 +86,15 @@ inline constexpr double operator*(TimeDelta time_delta, Frequency frequency) {
return frequency * time_delta;
}
std::string ToString(Frequency value);
RTC_EXPORT std::string ToString(Frequency value);
inline std::string ToLogString(Frequency value) {
return ToString(value);
}
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Frequency value) {
return stream << ToString(value);
template <typename Sink>
void AbslStringify(Sink& sink, Frequency value) {
sink.Append(ToString(value));
}
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_FREQUENCY_H_

View File

@ -10,6 +10,8 @@
#include "api/units/time_delta.h"
#include <string>
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"

View File

@ -11,14 +11,12 @@
#ifndef API_UNITS_TIME_DELTA_H_
#define API_UNITS_TIME_DELTA_H_
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // WEBRTC_UNIT_TEST
#include <cstdint>
#include <cstdlib>
#include <string>
#include <type_traits>
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -55,6 +53,9 @@ class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
TimeDelta() = delete;
template <typename Sink>
friend void AbslStringify(Sink& sink, TimeDelta value);
template <typename T = int64_t>
constexpr T seconds() const {
return ToFraction<1000000, T>();
@ -92,18 +93,15 @@ class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
static constexpr bool one_sided = false;
};
std::string ToString(TimeDelta value);
RTC_EXPORT std::string ToString(TimeDelta value);
inline std::string ToLogString(TimeDelta value) {
return ToString(value);
}
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
TimeDelta value) {
return stream << ToString(value);
template <typename Sink>
void AbslStringify(Sink& sink, TimeDelta value) {
sink.Append(ToString(value));
}
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -10,6 +10,8 @@
#include "api/units/timestamp.h"
#include <string>
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"

View File

@ -11,15 +11,13 @@
#ifndef API_UNITS_TIMESTAMP_H_
#define API_UNITS_TIMESTAMP_H_
#ifdef WEBRTC_UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // WEBRTC_UNIT_TEST
#include <cstdint>
#include <string>
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/units/unit_base.h" // IWYU pragma: export
namespace webrtc {
@ -47,6 +45,9 @@ class Timestamp final : public rtc_units_impl::UnitBase<Timestamp> {
Timestamp() = delete;
template <typename Sink>
friend void AbslStringify(Sink& sink, Timestamp value);
template <typename T = int64_t>
constexpr T seconds() const {
return ToFraction<1000000, T>();
@ -121,18 +122,15 @@ class Timestamp final : public rtc_units_impl::UnitBase<Timestamp> {
static constexpr bool one_sided = true;
};
std::string ToString(Timestamp value);
RTC_EXPORT std::string ToString(Timestamp value);
inline std::string ToLogString(Timestamp value) {
return ToString(value);
}
#ifdef WEBRTC_UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Timestamp value) {
return stream << ToString(value);
template <typename Sink>
void AbslStringify(Sink& sink, Timestamp value) {
sink.Append(ToString(value));
}
#endif // WEBRTC_UNIT_TEST
} // namespace webrtc

View File

@ -10,6 +10,12 @@
#include "api/video/color_space.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include "api/video/hdr_metadata.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
@ -95,8 +101,8 @@ ColorSpace::ColorSpace(PrimaryID primaries,
range_(range),
chroma_siting_horizontal_(chroma_siting_horz),
chroma_siting_vertical_(chroma_siting_vert),
hdr_metadata_(hdr_metadata ? absl::make_optional(*hdr_metadata)
: absl::nullopt) {}
hdr_metadata_(hdr_metadata ? std::make_optional(*hdr_metadata)
: std::nullopt) {}
ColorSpace::PrimaryID ColorSpace::primaries() const {
return primaries_;
@ -257,7 +263,7 @@ bool ColorSpace::set_chroma_siting_vertical_from_uint8(uint8_t enum_value) {
void ColorSpace::set_hdr_metadata(const HdrMetadata* hdr_metadata) {
hdr_metadata_ =
hdr_metadata ? absl::make_optional(*hdr_metadata) : absl::nullopt;
hdr_metadata ? std::make_optional(*hdr_metadata) : std::nullopt;
}
} // namespace webrtc

View File

@ -13,9 +13,9 @@
#include <stdint.h>
#include <optional>
#include <string>
#include "absl/types/optional.h"
#include "api/video/hdr_metadata.h"
#include "rtc_base/system/rtc_export.h"
@ -174,7 +174,7 @@ class RTC_EXPORT ColorSpace {
RangeID range_ = RangeID::kInvalid;
ChromaSiting chroma_siting_horizontal_ = ChromaSiting::kUnspecified;
ChromaSiting chroma_siting_vertical_ = ChromaSiting::kUnspecified;
absl::optional<HdrMetadata> hdr_metadata_;
std::optional<HdrMetadata> hdr_metadata_;
};
} // namespace webrtc

View File

@ -10,6 +10,8 @@
#include "api/video/video_content_type.h"
#include <cstdint>
#include "rtc_base/checks.h"
namespace webrtc {

View File

@ -11,6 +11,8 @@
#include "api/video/video_timing.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "api/array_view.h"
#include "api/units/time_delta.h"

View File

@ -23,14 +23,14 @@ rtc_library("audio_frame_operations") {
]
deps = [
"../../api:array_view",
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../rtc_base:safe_conversions",
"../../system_wrappers:field_trial",
"//third_party/abseil-cpp/absl/base:core_headers",
]
absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
}
if (rtc_include_tests) {
@ -48,7 +48,6 @@ if (rtc_include_tests) {
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:stringutils",
"../../test:field_trial",
"../../test:test_support",
"//testing/gtest",
]

View File

@ -29,72 +29,17 @@ const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
} // namespace
void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
AudioFrame* result_frame) {
// Sanity check.
RTC_DCHECK(result_frame);
RTC_DCHECK_GT(result_frame->num_channels_, 0);
RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
bool no_previous_data = result_frame->muted();
if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
// Special case we have no data to start with.
RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
no_previous_data = true;
}
if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
result_frame->vad_activity_ = AudioFrame::kVadActive;
} else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
result_frame->vad_activity_ = AudioFrame::kVadUnknown;
}
if (result_frame->speech_type_ != frame_to_add.speech_type_)
result_frame->speech_type_ = AudioFrame::kUndefined;
if (!frame_to_add.muted()) {
const int16_t* in_data = frame_to_add.data();
int16_t* out_data = result_frame->mutable_data();
size_t length =
frame_to_add.samples_per_channel_ * frame_to_add.num_channels_;
if (no_previous_data) {
std::copy(in_data, in_data + length, out_data);
} else {
for (size_t i = 0; i < length; i++) {
const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
static_cast<int32_t>(in_data[i]);
out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
}
}
}
}
int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
if (frame->num_channels_ != 1) {
return -1;
}
UpmixChannels(2, frame);
return 0;
}
int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
if (frame->num_channels_ != 2) {
return -1;
}
DownmixChannels(1, frame);
return frame->num_channels_ == 1 ? 0 : -1;
}
void AudioFrameOperations::QuadToStereo(const int16_t* src_audio,
size_t samples_per_channel,
int16_t* dst_audio) {
for (size_t i = 0; i < samples_per_channel; i++) {
dst_audio[i * 2] =
void AudioFrameOperations::QuadToStereo(
InterleavedView<const int16_t> src_audio,
InterleavedView<int16_t> dst_audio) {
RTC_DCHECK_EQ(NumChannels(src_audio), 4);
RTC_DCHECK_EQ(NumChannels(dst_audio), 2);
RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
for (size_t i = 0; i < SamplesPerChannel(src_audio); ++i) {
auto dst_frame = i * 2;
dst_audio[dst_frame] =
(static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
dst_audio[i * 2 + 1] =
dst_audio[dst_frame + 1] =
(static_cast<int32_t>(src_audio[4 * i + 2]) + src_audio[4 * i + 3]) >>
1;
}
@ -109,30 +54,34 @@ int AudioFrameOperations::QuadToStereo(AudioFrame* frame) {
AudioFrame::kMaxDataSizeSamples);
if (!frame->muted()) {
QuadToStereo(frame->data(), frame->samples_per_channel_,
frame->mutable_data());
}
// Note that `src` and `dst` will map in to the same buffer, but the call
// to `mutable_data()` changes the layout of `frame`, so `src` and `dst`
// will have different dimensions (important to call `data_view()` first).
auto src = frame->data_view();
auto dst = frame->mutable_data(frame->samples_per_channel_, 2);
QuadToStereo(src, dst);
} else {
frame->num_channels_ = 2;
}
return 0;
}
void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
size_t src_channels,
size_t samples_per_channel,
size_t dst_channels,
int16_t* dst_audio) {
if (src_channels > 1 && dst_channels == 1) {
DownmixInterleavedToMono(src_audio, samples_per_channel, src_channels,
dst_audio);
return;
} else if (src_channels == 4 && dst_channels == 2) {
QuadToStereo(src_audio, samples_per_channel, dst_audio);
return;
void AudioFrameOperations::DownmixChannels(
InterleavedView<const int16_t> src_audio,
InterleavedView<int16_t> dst_audio) {
RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
if (NumChannels(src_audio) > 1 && IsMono(dst_audio)) {
// TODO(tommi): change DownmixInterleavedToMono to support InterleavedView
// and MonoView.
DownmixInterleavedToMono(&src_audio.data()[0], SamplesPerChannel(src_audio),
NumChannels(src_audio), &dst_audio.data()[0]);
} else if (NumChannels(src_audio) == 4 && NumChannels(dst_audio) == 2) {
QuadToStereo(src_audio, dst_audio);
} else {
RTC_DCHECK_NOTREACHED() << "src_channels: " << NumChannels(src_audio)
<< ", dst_channels: " << NumChannels(dst_audio);
}
RTC_DCHECK_NOTREACHED() << "src_channels: " << src_channels
<< ", dst_channels: " << dst_channels;
}
void AudioFrameOperations::DownmixChannels(size_t dst_channels,
@ -169,14 +118,16 @@ void AudioFrameOperations::UpmixChannels(size_t target_number_of_channels,
if (!frame->muted()) {
// Up-mixing done in place. Going backwards through the frame ensure nothing
// is irrevocably overwritten.
int16_t* frame_data = frame->mutable_data();
for (int i = frame->samples_per_channel_ - 1; i >= 0; i--) {
auto frame_data = frame->mutable_data(frame->samples_per_channel_,
target_number_of_channels);
for (int i = frame->samples_per_channel_ - 1; i >= 0; --i) {
for (size_t j = 0; j < target_number_of_channels; ++j) {
frame_data[target_number_of_channels * i + j] = frame_data[i];
}
}
}
} else {
frame->num_channels_ = target_number_of_channels;
}
}
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
@ -250,35 +201,6 @@ void AudioFrameOperations::Mute(AudioFrame* frame) {
Mute(frame, true, true);
}
void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
RTC_DCHECK(frame);
RTC_DCHECK_GT(frame->num_channels_, 0);
if (frame->num_channels_ < 1 || frame->muted()) {
return;
}
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
i++) {
frame_data[i] = frame_data[i] >> 1;
}
}
int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
if (frame->num_channels_ != 2) {
return -1;
} else if (frame->muted()) {
return 0;
}
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_; i++) {
frame_data[2 * i] = static_cast<int16_t>(left * frame_data[2 * i]);
frame_data[2 * i + 1] = static_cast<int16_t>(right * frame_data[2 * i + 1]);
}
return 0;
}
int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
if (frame->muted()) {
return 0;

View File

@ -15,6 +15,7 @@
#include <stdint.h>
#include "absl/base/attributes.h"
#include "api/array_view.h"
#include "api/audio/audio_frame.h"
namespace webrtc {
@ -24,33 +25,11 @@ namespace webrtc {
// than a class.
class AudioFrameOperations {
public:
// Add samples in `frame_to_add` with samples in `result_frame`
// putting the results in `results_frame`. The fields
// `vad_activity_` and `speech_type_` of the result frame are
// updated. If `result_frame` is empty (`samples_per_channel_`==0),
// the samples in `frame_to_add` are added to it. The number of
// channels and number of samples per channel must match except when
// `result_frame` is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
// `frame.num_channels_` will be updated. This version checks for sufficient
// buffer size and that `num_channels_` is mono. Use UpmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int MonoToStereo(AudioFrame* frame);
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is stereo. Use DownmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int StereoToMono(AudioFrame* frame);
// Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
// operation, meaning `src_audio` and `dst_audio` may point to the same
// buffer.
static void QuadToStereo(const int16_t* src_audio,
size_t samples_per_channel,
int16_t* dst_audio);
static void QuadToStereo(InterleavedView<const int16_t> src_audio,
InterleavedView<int16_t> dst_audio);
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is 4 channels.
@ -60,11 +39,8 @@ class AudioFrameOperations {
// This is an in-place operation, meaning `src_audio` and `dst_audio`
// may point to the same buffer. Supported channel combinations are
// Stereo to Mono, Quad to Mono, and Quad to Stereo.
static void DownmixChannels(const int16_t* src_audio,
size_t src_channels,
size_t samples_per_channel,
size_t dst_channels,
int16_t* dst_audio);
static void DownmixChannels(InterleavedView<const int16_t> src_audio,
InterleavedView<int16_t> dst_audio);
// `frame.num_channels_` will be updated. This version checks that
// `num_channels_` and `dst_channels` are valid and performs relevant downmix.
@ -94,11 +70,6 @@ class AudioFrameOperations {
// Zero out contents of frame.
static void Mute(AudioFrame* frame);
// Halve samples in `frame`.
static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame* frame);
static int ScaleWithSat(float scale, AudioFrame* frame);
};

View File

@ -46,6 +46,7 @@ rtc_library("common_audio") {
":common_audio_c",
":sinc_resampler",
"../api:array_view",
"../api/audio:audio_frame_api",
"../rtc_base:checks",
"../rtc_base:gtest_prod",
"../rtc_base:logging",
@ -58,7 +59,6 @@ rtc_library("common_audio") {
"../system_wrappers",
"third_party/ooura:fft_size_256",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
defines = []

View File

@ -23,6 +23,44 @@
namespace webrtc {
// TODO: b/335805780 - Remove this method. Instead, use Deinterleave() from
// audio_util.h which requires size checked buffer views.
template <typename T>
void Deinterleave(const T* interleaved,
size_t samples_per_channel,
size_t num_channels,
T* const* deinterleaved) {
for (size_t i = 0; i < num_channels; ++i) {
T* channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
channel[j] = interleaved[interleaved_idx];
interleaved_idx += num_channels;
}
}
}
// `Interleave()` variant for cases where the deinterleaved channels aren't
// represented by a `DeinterleavedView`.
// TODO: b/335805780 - Remove this method. Instead, use Deinterleave() from
// audio_util.h which requires size checked buffer views.
template <typename T>
void Interleave(const T* const* deinterleaved,
size_t samples_per_channel,
size_t num_channels,
InterleavedView<T>& interleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), num_channels);
RTC_DCHECK_EQ(SamplesPerChannel(interleaved), samples_per_channel);
for (size_t i = 0; i < num_channels; ++i) {
const T* channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
interleaved[interleaved_idx] = channel[j];
interleaved_idx += num_channels;
}
}
}
// Helper to encapsulate a contiguous data buffer, full or split into frequency
// bands, with access to a pointer arrays of the deinterleaved channels and
// bands. The buffer is zero initialized at creation.

View File

@ -18,12 +18,24 @@
#include <cstring>
#include <limits>
#include "api/audio/audio_view.h"
#include "rtc_base/checks.h"
namespace webrtc {
typedef std::numeric_limits<int16_t> limits_int16;
// TODO(tommi, peah): Move these constants to their own header, e.g.
// `audio_constants.h`. Also consider if they should be in api/.
// Absolute highest acceptable sample rate supported for audio processing,
// capture and codecs. Note that for some components some cases a lower limit
// applies which typically is 48000 but in some cases is lower.
constexpr int kMaxSampleRateHz = 384000;
// Number of samples per channel for 10ms of audio at the highest sample rate.
constexpr size_t kMaxSamplesPerChannel10ms = kMaxSampleRateHz / 100u;
// The conversion functions use the following naming convention:
// S16: int16_t [-32768, 32767]
// Float: float [-1.0, 1.0]
@ -94,6 +106,7 @@ inline float FloatS16ToDbfs(float v) {
// Copy audio from `src` channels to `dest` channels unless `src` and `dest`
// point to the same address. `src` and `dest` must have the same number of
// channels, and there must be sufficient space allocated in `dest`.
// TODO: b/335805780 - Accept ArrayView.
template <typename T>
void CopyAudioIfNeeded(const T* const* src,
int num_frames,
@ -111,12 +124,15 @@ void CopyAudioIfNeeded(const T* const* src,
// `deinterleaved` buffers (`num_channel` buffers with `samples_per_channel`
// per buffer).
template <typename T>
void Deinterleave(const T* interleaved,
size_t samples_per_channel,
size_t num_channels,
T* const* deinterleaved) {
void Deinterleave(const InterleavedView<const T>& interleaved,
const DeinterleavedView<T>& deinterleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), NumChannels(deinterleaved));
RTC_DCHECK_EQ(SamplesPerChannel(interleaved),
SamplesPerChannel(deinterleaved));
const auto num_channels = NumChannels(interleaved);
const auto samples_per_channel = SamplesPerChannel(interleaved);
for (size_t i = 0; i < num_channels; ++i) {
T* channel = deinterleaved[i];
MonoView<T> channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
channel[j] = interleaved[interleaved_idx];
@ -129,52 +145,24 @@ void Deinterleave(const T* interleaved,
// `interleaved`. There must be sufficient space allocated in `interleaved`
// (`samples_per_channel` * `num_channels`).
template <typename T>
void Interleave(const T* const* deinterleaved,
size_t samples_per_channel,
size_t num_channels,
T* interleaved) {
for (size_t i = 0; i < num_channels; ++i) {
const T* channel = deinterleaved[i];
void Interleave(const DeinterleavedView<const T>& deinterleaved,
const InterleavedView<T>& interleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), NumChannels(deinterleaved));
RTC_DCHECK_EQ(SamplesPerChannel(interleaved),
SamplesPerChannel(deinterleaved));
for (size_t i = 0; i < deinterleaved.num_channels(); ++i) {
const auto channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
for (size_t j = 0; j < deinterleaved.samples_per_channel(); ++j) {
interleaved[interleaved_idx] = channel[j];
interleaved_idx += num_channels;
interleaved_idx += deinterleaved.num_channels();
}
}
}
// Copies audio from a single channel buffer pointed to by `mono` to each
// channel of `interleaved`. There must be sufficient space allocated in
// `interleaved` (`samples_per_channel` * `num_channels`).
template <typename T>
void UpmixMonoToInterleaved(const T* mono,
int num_frames,
int num_channels,
T* interleaved) {
int interleaved_idx = 0;
for (int i = 0; i < num_frames; ++i) {
for (int j = 0; j < num_channels; ++j) {
interleaved[interleaved_idx++] = mono[i];
}
}
}
template <typename T, typename Intermediate>
void DownmixToMono(const T* const* input_channels,
size_t num_frames,
int num_channels,
T* out) {
for (size_t i = 0; i < num_frames; ++i) {
Intermediate value = input_channels[0][i];
for (int j = 1; j < num_channels; ++j) {
value += input_channels[j][i];
}
out[i] = value / num_channels;
}
}
// Downmixes an interleaved multichannel signal to a single channel by averaging
// all channels.
// TODO: b/335805780 - Accept InterleavedView and DeinterleavedView.
template <typename T, typename Intermediate>
void DownmixInterleavedToMonoImpl(const T* interleaved,
size_t num_frames,
@ -197,12 +185,14 @@ void DownmixInterleavedToMonoImpl(const T* interleaved,
}
}
// TODO: b/335805780 - Accept InterleavedView and DeinterleavedView.
template <typename T>
void DownmixInterleavedToMono(const T* interleaved,
size_t num_frames,
int num_channels,
T* deinterleaved);
// TODO: b/335805780 - Accept InterleavedView and DeinterleavedView.
template <>
void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
size_t num_frames,

View File

@ -14,45 +14,44 @@
#include <memory>
#include <vector>
#include "api/audio/audio_view.h"
namespace webrtc {
class PushSincResampler;
// Wraps PushSincResampler to provide stereo support.
// TODO(ajm): add support for an arbitrary number of channels.
// Note: This implementation assumes 10ms buffer sizes throughout.
template <typename T>
class PushResampler {
class PushResampler final {
public:
PushResampler();
virtual ~PushResampler();
// Must be called whenever the parameters change. Free to be called at any
// time as it is a no-op if parameters have not changed since the last call.
int InitializeIfNeeded(int src_sample_rate_hz,
int dst_sample_rate_hz,
PushResampler(size_t src_samples_per_channel,
size_t dst_samples_per_channel,
size_t num_channels);
~PushResampler();
// Returns the total number of samples provided in destination (e.g. 32 kHz,
// 2 channel audio gives 640 samples).
int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
int Resample(InterleavedView<const T> src, InterleavedView<T> dst);
// For when a deinterleaved/mono channel already exists and we can skip the
// deinterleaved operation.
int Resample(MonoView<const T> src, MonoView<T> dst);
private:
int src_sample_rate_hz_;
int dst_sample_rate_hz_;
size_t num_channels_;
// Vector that is needed to provide the proper inputs and outputs to the
// interleave/de-interleave methods used in Resample. This needs to be
// heap-allocated on the state to support an arbitrary number of channels
// without doing run-time heap-allocations in the Resample method.
std::vector<T*> channel_data_array_;
// Ensures that source and destination buffers for deinterleaving are
// correctly configured prior to resampling that requires deinterleaving.
void EnsureInitialized(size_t src_samples_per_channel,
size_t dst_samples_per_channel,
size_t num_channels);
struct ChannelResampler {
std::unique_ptr<PushSincResampler> resampler;
std::vector<T> source;
std::vector<T> destination;
};
// Buffers used for when a deinterleaving step is necessary.
std::unique_ptr<T[]> source_;
std::unique_ptr<T[]> destination_;
DeinterleavedView<T> source_view_;
DeinterleavedView<T> destination_view_;
std::vector<ChannelResampler> channel_resamplers_;
std::vector<std::unique_ptr<PushSincResampler>> resamplers_;
};
} // namespace webrtc

View File

@ -15,105 +15,109 @@
#include <memory>
#include "api/audio/audio_frame.h"
#include "common_audio/include/audio_util.h"
#include "common_audio/resampler/push_sinc_resampler.h"
#include "rtc_base/checks.h"
namespace webrtc {
template <typename T>
PushResampler<T>::PushResampler()
: src_sample_rate_hz_(0), dst_sample_rate_hz_(0), num_channels_(0) {}
namespace {
// Maximum concurrent number of channels for `PushResampler<>`.
// Note that this may be different from what the maximum is for audio codecs.
constexpr int kMaxNumberOfChannels = 8;
} // namespace
template <typename T>
PushResampler<T>::~PushResampler() {}
PushResampler<T>::PushResampler() = default;
template <typename T>
int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
int dst_sample_rate_hz,
PushResampler<T>::PushResampler(size_t src_samples_per_channel,
size_t dst_samples_per_channel,
size_t num_channels) {
// These checks used to be factored out of this template function due to
// Windows debug build issues with clang. http://crbug.com/615050
RTC_DCHECK_GT(src_sample_rate_hz, 0);
RTC_DCHECK_GT(dst_sample_rate_hz, 0);
RTC_DCHECK_GT(num_channels, 0);
if (src_sample_rate_hz == src_sample_rate_hz_ &&
dst_sample_rate_hz == dst_sample_rate_hz_ &&
num_channels == num_channels_) {
// No-op if settings haven't changed.
return 0;
}
if (src_sample_rate_hz <= 0 || dst_sample_rate_hz <= 0 || num_channels <= 0) {
return -1;
}
src_sample_rate_hz_ = src_sample_rate_hz;
dst_sample_rate_hz_ = dst_sample_rate_hz;
num_channels_ = num_channels;
const size_t src_size_10ms_mono =
static_cast<size_t>(src_sample_rate_hz / 100);
const size_t dst_size_10ms_mono =
static_cast<size_t>(dst_sample_rate_hz / 100);
channel_resamplers_.clear();
for (size_t i = 0; i < num_channels; ++i) {
channel_resamplers_.push_back(ChannelResampler());
auto channel_resampler = channel_resamplers_.rbegin();
channel_resampler->resampler = std::make_unique<PushSincResampler>(
src_size_10ms_mono, dst_size_10ms_mono);
channel_resampler->source.resize(src_size_10ms_mono);
channel_resampler->destination.resize(dst_size_10ms_mono);
}
channel_data_array_.resize(num_channels_);
return 0;
EnsureInitialized(src_samples_per_channel, dst_samples_per_channel,
num_channels);
}
template <typename T>
int PushResampler<T>::Resample(const T* src,
size_t src_length,
T* dst,
size_t dst_capacity) {
// These checks used to be factored out of this template function due to
// Windows debug build issues with clang. http://crbug.com/615050
const size_t src_size_10ms = (src_sample_rate_hz_ / 100) * num_channels_;
const size_t dst_size_10ms = (dst_sample_rate_hz_ / 100) * num_channels_;
RTC_DCHECK_EQ(src_length, src_size_10ms);
RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
PushResampler<T>::~PushResampler() = default;
if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
template <typename T>
void PushResampler<T>::EnsureInitialized(size_t src_samples_per_channel,
size_t dst_samples_per_channel,
size_t num_channels) {
RTC_DCHECK_GT(src_samples_per_channel, 0);
RTC_DCHECK_GT(dst_samples_per_channel, 0);
RTC_DCHECK_GT(num_channels, 0);
RTC_DCHECK_LE(src_samples_per_channel, kMaxSamplesPerChannel10ms);
RTC_DCHECK_LE(dst_samples_per_channel, kMaxSamplesPerChannel10ms);
RTC_DCHECK_LE(num_channels, kMaxNumberOfChannels);
if (src_samples_per_channel == SamplesPerChannel(source_view_) &&
dst_samples_per_channel == SamplesPerChannel(destination_view_) &&
num_channels == NumChannels(source_view_)) {
// No-op if settings haven't changed.
return;
}
// Allocate two buffers for all source and destination channels.
// Then organize source and destination views together with an array of
// resamplers for each channel in the deinterlaved buffers.
source_.reset(new T[src_samples_per_channel * num_channels]);
destination_.reset(new T[dst_samples_per_channel * num_channels]);
source_view_ = DeinterleavedView<T>(source_.get(), src_samples_per_channel,
num_channels);
destination_view_ = DeinterleavedView<T>(
destination_.get(), dst_samples_per_channel, num_channels);
resamplers_.resize(num_channels);
for (size_t i = 0; i < num_channels; ++i) {
resamplers_[i] = std::make_unique<PushSincResampler>(
src_samples_per_channel, dst_samples_per_channel);
}
}
template <typename T>
int PushResampler<T>::Resample(InterleavedView<const T> src,
InterleavedView<T> dst) {
EnsureInitialized(SamplesPerChannel(src), SamplesPerChannel(dst),
NumChannels(src));
RTC_DCHECK_EQ(NumChannels(src), NumChannels(source_view_));
RTC_DCHECK_EQ(NumChannels(dst), NumChannels(destination_view_));
RTC_DCHECK_EQ(SamplesPerChannel(src), SamplesPerChannel(source_view_));
RTC_DCHECK_EQ(SamplesPerChannel(dst), SamplesPerChannel(destination_view_));
if (SamplesPerChannel(src) == SamplesPerChannel(dst)) {
// The old resampler provides this memcpy facility in the case of matching
// sample rates, so reproduce it here for the sinc resampler.
memcpy(dst, src, src_length * sizeof(T));
return static_cast<int>(src_length);
CopySamples(dst, src);
return static_cast<int>(src.data().size());
}
const size_t src_length_mono = src_length / num_channels_;
const size_t dst_capacity_mono = dst_capacity / num_channels_;
Deinterleave(src, source_view_);
for (size_t ch = 0; ch < num_channels_; ++ch) {
channel_data_array_[ch] = channel_resamplers_[ch].source.data();
for (size_t i = 0; i < resamplers_.size(); ++i) {
size_t dst_length_mono =
resamplers_[i]->Resample(source_view_[i], destination_view_[i]);
RTC_DCHECK_EQ(dst_length_mono, SamplesPerChannel(dst));
}
Deinterleave(src, src_length_mono, num_channels_, channel_data_array_.data());
Interleave<T>(destination_view_, dst);
return static_cast<int>(dst.size());
}
size_t dst_length_mono = 0;
template <typename T>
int PushResampler<T>::Resample(MonoView<const T> src, MonoView<T> dst) {
RTC_DCHECK_EQ(resamplers_.size(), 1);
RTC_DCHECK_EQ(SamplesPerChannel(src), SamplesPerChannel(source_view_));
RTC_DCHECK_EQ(SamplesPerChannel(dst), SamplesPerChannel(destination_view_));
for (auto& resampler : channel_resamplers_) {
dst_length_mono = resampler.resampler->Resample(
resampler.source.data(), src_length_mono, resampler.destination.data(),
dst_capacity_mono);
if (SamplesPerChannel(src) == SamplesPerChannel(dst)) {
CopySamples(dst, src);
return static_cast<int>(src.size());
}
for (size_t ch = 0; ch < num_channels_; ++ch) {
channel_data_array_[ch] = channel_resamplers_[ch].destination.data();
}
Interleave(channel_data_array_.data(), dst_length_mono, num_channels_, dst);
return static_cast<int>(dst_length_mono * num_channels_);
return resamplers_[0]->Resample(src, dst);
}
// Explictly generate required instantiations.

View File

@ -16,6 +16,7 @@
#include <memory>
#include "api/audio/audio_view.h"
#include "common_audio/resampler/sinc_resampler.h"
namespace webrtc {
@ -40,6 +41,12 @@ class PushSincResampler : public SincResamplerCallback {
// at least as large as `destination_frames`. Returns the number of samples
// provided in destination (for convenience, since this will always be equal
// to `destination_frames`).
template <typename S, typename D>
size_t Resample(const MonoView<S>& source, const MonoView<D>& destination) {
return Resample(&source[0], SamplesPerChannel(source), &destination[0],
SamplesPerChannel(destination));
}
size_t Resample(const int16_t* source,
size_t source_frames,
int16_t* destination,

View File

@ -26,10 +26,8 @@ size_t WebRtcSpl_FilterAR(const int16_t* a,
int16_t* state,
size_t state_length,
int16_t* state_low,
size_t state_low_length,
int16_t* filtered,
int16_t* filtered_low,
size_t filtered_low_length)
int16_t* filtered_low)
{
int64_t o;
int32_t oLOW;

View File

@ -621,10 +621,8 @@ size_t WebRtcSpl_FilterAR(const int16_t* ar_coef,
int16_t* filter_state,
size_t filter_state_length,
int16_t* filter_state_low,
size_t filter_state_low_length,
int16_t* out_vector,
int16_t* out_vector_low,
size_t out_vector_low_length);
int16_t* out_vector_low);
// WebRtcSpl_FilterMAFastQ12(...)
//
@ -1464,9 +1462,6 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
// - filter_state : Current state (higher part) of the filter.
// - filter_state_length : Length (in samples) of `filter_state`.
// - filter_state_low : Current state (lower part) of the filter.
// - filter_state_low_length : Length (in samples) of `filter_state_low`.
// - out_vector_low_length : Maximum length (in samples) of
// `out_vector_low`.
//
// Output:
// - filter_state : Updated state (upper part) vector.

View File

@ -55,10 +55,10 @@ void SmoothingFilterImpl::AddSample(float sample) {
last_sample_ = sample;
}
absl::optional<float> SmoothingFilterImpl::GetAverage() {
std::optional<float> SmoothingFilterImpl::GetAverage() {
if (!init_end_time_ms_) {
// `init_end_time_ms_` undefined since we have not received any sample.
return absl::nullopt;
return std::nullopt;
}
ExtrapolateLastSample(rtc::TimeMillis());
return state_;

View File

@ -13,7 +13,7 @@
#include <stdint.h>
#include "absl/types/optional.h"
#include <optional>
namespace webrtc {
@ -21,7 +21,7 @@ class SmoothingFilter {
public:
virtual ~SmoothingFilter() = default;
virtual void AddSample(float sample) = 0;
virtual absl::optional<float> GetAverage() = 0;
virtual std::optional<float> GetAverage() = 0;
virtual bool SetTimeConstantMs(int time_constant_ms) = 0;
};
@ -49,7 +49,7 @@ class SmoothingFilterImpl final : public SmoothingFilter {
~SmoothingFilterImpl() override;
void AddSample(float sample) override;
absl::optional<float> GetAverage() override;
std::optional<float> GetAverage() override;
bool SetTimeConstantMs(int time_constant_ms) override;
// Methods used for unittests.
@ -63,7 +63,7 @@ class SmoothingFilterImpl final : public SmoothingFilter {
const float init_factor_;
const float init_const_;
absl::optional<int64_t> init_end_time_ms_;
std::optional<int64_t> init_end_time_ms_;
float last_sample_;
float alpha_;
float state_;

View File

@ -0,0 +1,26 @@
# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../webrtc.gni")
action("registered_field_trials_header") {
visibility = [ ":*" ]
script = "field_trials.py"
args = [
"header",
"--output",
rebase_path(target_gen_dir, root_build_dir) + "/registered_field_trials.h",
]
outputs = [ "$target_gen_dir/registered_field_trials.h" ]
}
rtc_library("registered_field_trials") {
visibility = [ "*" ]
sources = get_target_outputs(":registered_field_trials_header")
deps = [ ":registered_field_trials_header" ]
}

1129
webrtc/experiments/field_trials.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -8,13 +8,14 @@
namespace webrtc {
inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"UseTwccPlrForAna",
"WebRTC-AV1-OverridePriorityBitrate",
"WebRTC-AddNetworkCostToVpn",
"WebRTC-AddPacingToCongestionWindowPushback",
"WebRTC-AdjustOpusBandwidth",
"WebRTC-Aec3AecStateFullResetKillSwitch",
"WebRTC-Aec3AecStateSubtractorAnalyzerResetKillSwitch",
"WebRTC-Aec3AntiHowlingMinimizationKillSwitch",
"WebRTC-Aec3BufferingMaxAllowedExcessRenderBlocksOverride",
"WebRTC-Aec3ClampInstQualityToOneKillSwitch",
"WebRTC-Aec3ClampInstQualityToZeroKillSwitch",
"WebRTC-Aec3CoarseFilterResetHangoverKillSwitch",
@ -22,7 +23,6 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Aec3DeactivateInitialStateResetKillSwitch",
"WebRTC-Aec3DelayEstimateSmoothingDelayFoundOverride",
"WebRTC-Aec3DelayEstimateSmoothingOverride",
"WebRTC-Aec3DelayEstimatorDetectPreEcho",
"WebRTC-Aec3EchoSaturationDetectionKillSwitch",
"WebRTC-Aec3EnforceCaptureDelayEstimationDownmixing",
"WebRTC-Aec3EnforceCaptureDelayEstimationLeftRightPrioritization",
@ -44,8 +44,6 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Aec3MinErleDuringOnsetsKillSwitch",
"WebRTC-Aec3NonlinearModeReverbKillSwitch",
"WebRTC-Aec3OnsetDetectionKillSwitch",
"WebRTC-Aec3PenalyzeHighDelaysInitialPhase",
"WebRTC-Aec3PreEchoConfiguration",
"WebRTC-Aec3RenderDelayEstimationLeftRightPrioritizationKillSwitch",
"WebRTC-Aec3SensitiveDominantNearendActivation",
"WebRTC-Aec3SetupSpecificDefaultConfigDefaultsKillSwitch",
@ -109,20 +107,16 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Audio-NetEqDelayManagerConfig",
"WebRTC-Audio-NetEqNackTrackerConfig",
"WebRTC-Audio-NetEqSmartFlushing",
"WebRTC-Audio-OpusAvoidNoisePumpingDuringDtx",
"WebRTC-Audio-OpusBitrateMultipliers",
"WebRTC-Audio-OpusPlcUsePrevDecodedSamples",
"WebRTC-Audio-OpusSetSignalVoiceWithDtx",
"WebRTC-Audio-OpusGeneratePlc",
"WebRTC-Audio-PriorityBitrate",
"WebRTC-Audio-Red-For-Opus",
"WebRTC-Audio-StableTargetAdaptation",
"WebRTC-Audio-iOS-Holding",
"WebRTC-AudioDevicePlayoutBufferSizeFactor",
"WebRTC-AutomaticAnimationDetectionScreenshare",
"WebRTC-Av1-GetEncoderInfoOverride",
"WebRTC-Avx2SupportKillSwitch",
"WebRTC-BindUsingInterfaceName",
"WebRTC-BoostedScreenshareQp",
"WebRTC-BurstyPacer",
"WebRTC-BitrateAdjusterUseNewfangledHeadroomAdjustment",
"WebRTC-Bwe-AllocationProbing",
"WebRTC-Bwe-AlrProbing",
"WebRTC-Bwe-EstimateBoundedIncrease",
@ -130,8 +124,8 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Bwe-IgnoreProbesLowerThanNetworkStateEstimate",
"WebRTC-Bwe-InitialProbing",
"WebRTC-Bwe-InjectedCongestionController",
"WebRTC-Bwe-LimitPacingFactorByUpperLinkCapacityEstimate",
"WebRTC-Bwe-LimitProbesLowerThanThroughputEstimate",
"WebRTC-Bwe-LinkCapacity",
"WebRTC-Bwe-LossBasedBweV2",
"WebRTC-Bwe-LossBasedControl",
"WebRTC-Bwe-MaxRttLimit",
@ -143,10 +137,10 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Bwe-ProbingConfiguration",
"WebRTC-Bwe-ReceiveTimeFix",
"WebRTC-Bwe-ReceiverLimitCapsOnly",
"WebRTC-Bwe-ResetOnAdapterIdChange",
"WebRTC-Bwe-RobustThroughputEstimatorSettings",
"WebRTC-Bwe-SafeResetOnRouteChange",
"WebRTC-Bwe-SeparateAudioPackets",
"WebRTC-Bwe-SubtractAdditionalBackoffTerm",
"WebRTC-Bwe-TrendlineEstimatorSettings",
"WebRTC-BweBackOffFactor",
"WebRTC-BweLossExperiment",
@ -155,16 +149,19 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-BweWindowSizeInPackets",
"WebRTC-CongestionWindow",
"WebRTC-CpuLoadEstimator",
"WebRTC-DataChannelMessageInterleaving",
"WebRTC-Debugging-RtpDump",
"WebRTC-DecoderDataDumpDirectory",
"WebRTC-DefaultBitrateLimitsKillSwitch",
"WebRTC-DependencyDescriptorAdvertised",
"WebRTC-DisablePacerEmergencyStop",
"WebRTC-DisableRtxRateLimiter",
"WebRTC-DisableTlsSessionTicketKillswitch",
"WebRTC-DisableUlpFecExperiment",
"WebRTC-DontIncreaseDelayBasedBweInAlr",
"WebRTC-DscpFieldTrial",
"WebRTC-ElasticBitrateAllocation",
"WebRTC-EncoderDataDumpDirectory",
"WebRTC-EncryptedRtpHeaderExtensions",
"WebRTC-ExtraICEPing",
"WebRTC-FakeNetworkReceiveConfig",
"WebRTC-FakeNetworkSendConfig",
@ -175,6 +172,7 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-ForcePlayoutDelay",
"WebRTC-ForceSendPlayoutDelay",
"WebRTC-ForceSimulatedOveruseIntervalMs",
"WebRTC-FrameCadenceAdapter-UseVideoFrameTimestamp",
"WebRTC-FrameDropper",
"WebRTC-FullBandHpfKillSwitch",
"WebRTC-GenericCodecDependencyDescriptor",
@ -194,8 +192,10 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-LegacyFrameIdJumpBehavior",
"WebRTC-LegacySimulcastLayerLimit",
"WebRTC-LegacyTlsProtocols",
"WebRTC-LibaomAv1Encoder-DisableFrameDropping",
"WebRTC-LibaomAv1Encoder-AdaptiveMaxConsecDrops",
"WebRTC-LibvpxVp8Encoder-AndroidSpecificThreadingSettings",
"WebRTC-LowresSimulcastBitrateInterpolation",
"WebRTC-MixedCodecSimulcast",
"WebRTC-MutedStateKillSwitch",
"WebRTC-Network-UseNWPathMonitor",
"WebRTC-NetworkMonitorAutoDetect",
@ -207,33 +207,37 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-Pacer-KeyframeFlushing",
"WebRTC-Pacer-PadInSilence",
"WebRTC-PacketBufferMaxSize",
"WebRTC-PaddingMode-RecentLargePacket",
"WebRTC-PayloadTypesInTransport",
"WebRTC-PcFactoryDefaultBitrates",
"WebRTC-PermuteTlsClientHello",
"WebRTC-PiggybackIceCheckAcknowledgement",
"WebRTC-PixelLimitResource",
"WebRTC-PreventSsrcGroupsWithUnexpectedSize",
"WebRTC-ProbingScreenshareBwe",
"WebRTC-ProtectionOverheadRateThreshold",
"WebRTC-QCM-Dynamic-AV1",
"WebRTC-QCM-Dynamic-VP8",
"WebRTC-QCM-Dynamic-VP9",
"WebRTC-QCM-Static-AV1",
"WebRTC-QCM-Static-VP8",
"WebRTC-QCM-Static-VP9",
"WebRTC-QpParsingKillSwitch",
"WebRTC-RFC8888CongestionControlFeedback",
"WebRTC-ReceiveBufferSize",
"WebRTC-RtcEventLogEncodeDependencyDescriptor",
"WebRTC-RtcEventLogEncodeNetEqSetMinimumDelayKillSwitch",
"WebRTC-RtcEventLogKillSwitch",
"WebRTC-RtcEventLogNewFormat",
"WebRTC-RtcpLossNotification",
"WebRTC-RttMult",
"WebRTC-SCM-Timestamp",
"WebRTC-SendBufferSizeBytes",
"WebRTC-SendNackDelayMs",
"WebRTC-SendPacketsOnWorkerThread",
"WebRTC-SetCodecPreferences-ReceiveOnlyFilterInsteadOfThrow",
"WebRTC-SetReadyToSendFalseIfSendFail",
"WebRTC-SetSocketReceiveBuffer",
"WebRTC-SignalNetworkPreferenceChange",
"WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride",
"WebRTC-SimulcastLayerLimitRoundUp",
"WebRTC-SpsPpsIdrIsH264Keyframe",
"WebRTC-SrtpRemoveReceiveStream",
"WebRTC-StableTargetRate",
"WebRTC-Stats-RtxReceiveStats",
"WebRTC-StrictPacingAndProbing",
"WebRTC-StunInterPacketDelay",
"WebRTC-SurfaceCellularTypes",
@ -243,52 +247,47 @@ inline constexpr absl::string_view kRegisteredFieldTrials[] = {
"WebRTC-TransientSuppressorForcedOff",
"WebRTC-UseBaseHeavyVP8TL3RateAllocation",
"WebRTC-UseDifferentiatedCellularCosts",
"WebRTC-UseShortVP8TL2Pattern",
"WebRTC-UseShortVP8TL3Pattern",
"WebRTC-UseNtpTimeAbsoluteSendTime",
"WebRTC-UseStandardBytesStats",
"WebRTC-UseTurnServerAsStunServer",
"WebRTC-VP8-CpuSpeed-Arm",
"WebRTC-VP8-ForcePartitionResilience",
"WebRTC-VP8-Forced-Fallback-Encoder-v2",
"WebRTC-VP8-GetEncoderInfoOverride",
"WebRTC-VP8-MaxFrameInterval",
"WebRTC-VP8-Postproc-Config",
"WebRTC-VP8-Postproc-Config-Arm",
"WebRTC-VP8ConferenceTemporalLayers",
"WebRTC-VP8IosMaxNumberOfThread",
"WebRTC-VP8VariableFramerateScreenshare",
"WebRTC-VP9-GetEncoderInfoOverride",
"WebRTC-VP9-LowTierOptimizations",
"WebRTC-VP9-PerformanceFlags",
"WebRTC-VP9-SvcForSimulcast",
"WebRTC-VP9QualityScaler",
"WebRTC-VP9VariableFramerateScreenshare",
"WebRTC-Video-AV1EvenPayloadSizes",
"WebRTC-Video-BalancedDegradation",
"WebRTC-Video-BalancedDegradationSettings",
"WebRTC-Video-BandwidthQualityScalerSettings",
"WebRTC-Video-DisableAutomaticResize",
"WebRTC-Video-DiscardPacketsWithUnknownSsrc",
"WebRTC-Video-EnableRetransmitAllLayers",
"WebRTC-Video-EncoderFallbackSettings",
"WebRTC-Video-ForcedSwDecoderFallback",
"WebRTC-Video-H26xPacketBuffer",
"WebRTC-Video-InitialDecoderResolution",
"WebRTC-Video-MinVideoBitrate",
"WebRTC-Video-Pacing",
"WebRTC-Video-PreferTemporalSupportOnBaseLayer",
"WebRTC-Video-QualityRampupSettings",
"WebRTC-Video-QualityScalerSettings",
"WebRTC-Video-QualityScaling",
"WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest",
"WebRTC-Video-SimulcastIndependentFrameIds",
"WebRTC-Video-UseFrameRateForOverhead",
"WebRTC-Video-VariableStartScaleFactor",
"WebRTC-Video-Vp9FlexibleMode",
"WebRTC-VideoEncoderSettings",
"WebRTC-VideoFrameTrackingIdAdvertised",
"WebRTC-VideoLayersAllocationAdvertised",
"WebRTC-VideoRateControl",
"WebRTC-VoIPChannelRemixingAdjustmentKillSwitch",
"WebRTC-Vp9ExternalRefCtrl",
"WebRTC-Vp9InterLayerPred",
"WebRTC-Vp9IssueKeyFrameOnLayerDeactivation",
"WebRTC-ZeroHertzScreenshare",
"WebRTC-ZeroHertzQueueOverload",
"WebRTC-ZeroPlayoutDelay",
};

View File

@ -30,7 +30,6 @@ group("modules") {
rtc_source_set("module_api_public") {
sources = [ "include/module_common_types_public.h" ]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_source_set("module_api") {
@ -143,7 +142,6 @@ if (rtc_include_tests && !build_with_chromium) {
"../resources/audio_processing/transient/wpd5.dat",
"../resources/audio_processing/transient/wpd6.dat",
"../resources/audio_processing/transient/wpd7.dat",
"../resources/deflicker_before_cif_short.yuv",
"../resources/far16_stereo.pcm",
"../resources/far176_stereo.pcm",
"../resources/far192_stereo.pcm",
@ -154,7 +152,6 @@ if (rtc_include_tests && !build_with_chromium) {
"../resources/far88_stereo.pcm",
"../resources/far8_stereo.pcm",
"../resources/far96_stereo.pcm",
"../resources/foremanColorEnhanced_cif_short.yuv",
"../resources/foreman_cif.yuv",
"../resources/foreman_cif_short.yuv",
"../resources/near16_stereo.pcm",

View File

@ -22,8 +22,6 @@ rtc_source_set("audio_coding_module_typedefs") {
rtc_library("audio_coding") {
visibility += [ "*" ]
sources = [
"acm2/acm_receiver.cc",
"acm2/acm_receiver.h",
"acm2/acm_remixing.cc",
"acm2/acm_remixing.h",
"acm2/acm_resampler.cc",
@ -38,7 +36,6 @@ rtc_library("audio_coding") {
deps = [
":audio_coding_module_typedefs",
":default_neteq_factory",
":neteq",
"..:module_api",
"..:module_api_public",
@ -46,7 +43,10 @@ rtc_library("audio_coding") {
"../../api:function_view",
"../../api/audio:audio_frame_api",
"../../api/audio_codecs:audio_codecs_api",
"../../api/environment",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/units:timestamp",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base:audio_format_to_string",
@ -58,10 +58,8 @@ rtc_library("audio_coding") {
"../../rtc_base/synchronization:mutex",
"../../system_wrappers",
"../../system_wrappers:metrics",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -76,7 +74,6 @@ rtc_library("legacy_encoded_audio_frame") {
"../../rtc_base:buffer",
"../../rtc_base:checks",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("webrtc_cng") {
@ -109,7 +106,6 @@ rtc_library("audio_encoder_cng") {
"../../common_audio",
"../../rtc_base:checks",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("red") {
@ -129,10 +125,7 @@ rtc_library("red") {
"../../rtc_base:byte_order",
"../../rtc_base:checks",
"../../rtc_base:logging",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -154,7 +147,6 @@ rtc_library("g711") {
"../../rtc_base:buffer",
"../../rtc_base:checks",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
public_deps += [ ":g711_c" ] # no-presubmit-check TODO(webrtc:8603)
}
@ -187,7 +179,6 @@ rtc_library("g722") {
"../../rtc_base:checks",
"../../rtc_base:safe_conversions",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
public_deps += [ ":g722_c" ] # no-presubmit-check TODO(webrtc:8603)
}
@ -222,7 +213,6 @@ rtc_library("ilbc") {
"../../rtc_base:logging",
"../../rtc_base:safe_conversions",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
public_deps += [ ":ilbc_c" ] # no-presubmit-check TODO(webrtc:8603)
}
@ -377,8 +367,8 @@ rtc_library("ilbc_c") {
"../../rtc_base:checks",
"../../rtc_base:sanitizer",
"../../rtc_base/system:arch",
"//third_party/abseil-cpp/absl/base:core_headers",
]
absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
}
rtc_library("isac_vad") {
@ -452,10 +442,7 @@ rtc_library("audio_coding_opus_common") {
"../../api/audio_codecs:audio_codecs_api",
"../../rtc_base:checks",
"../../rtc_base:stringutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -473,8 +460,10 @@ rtc_library("webrtc_opus") {
":audio_coding_opus_common",
":audio_network_adaptor",
"../../api:array_view",
"../../api:field_trials_view",
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs/opus:audio_encoder_opus_config",
"../../api/environment",
"../../common_audio",
"../../rtc_base:buffer",
"../../rtc_base:checks",
@ -486,11 +475,9 @@ rtc_library("webrtc_opus") {
"../../rtc_base:safe_minmax",
"../../rtc_base:stringutils",
"../../rtc_base:timeutils",
"../../system_wrappers:field_trial",
]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
public_deps += # no-presubmit-check TODO(webrtc:8603)
[ ":webrtc_opus_wrapper" ]
@ -510,6 +497,7 @@ rtc_library("webrtc_multiopus") {
deps = [
":audio_coding_opus_common",
"../../api:array_view",
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs/opus:audio_decoder_opus_config",
"../../api/audio_codecs/opus:audio_encoder_opus_config",
@ -518,13 +506,11 @@ rtc_library("webrtc_multiopus") {
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:safe_conversions",
"../../rtc_base:safe_minmax",
"../../rtc_base:stringutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
public_deps += # no-presubmit-check TODO(webrtc:8603)
[ ":webrtc_opus_wrapper" ]
@ -546,7 +532,6 @@ rtc_library("webrtc_opus_wrapper") {
"../../api:array_view",
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../system_wrappers:field_trial",
]
if (rtc_build_opus) {
@ -563,6 +548,7 @@ if (rtc_enable_protobuf) {
sources = [ "audio_network_adaptor/debug_dump.proto" ]
link_deps = [ ":ana_config_proto" ]
proto_out_dir = "modules/audio_coding/audio_network_adaptor"
proto_data_sources = [ "audio_network_adaptor/config.proto" ]
}
proto_library("ana_config_proto") {
visibility += [ "*" ]
@ -577,7 +563,6 @@ rtc_library("audio_network_adaptor_config") {
"audio_network_adaptor/audio_network_adaptor_config.cc",
"audio_network_adaptor/include/audio_network_adaptor_config.h",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("audio_network_adaptor") {
@ -618,7 +603,6 @@ rtc_library("audio_network_adaptor") {
"../../common_audio",
"../../logging:rtc_event_audio",
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../rtc_base:logging",
"../../rtc_base:protobuf_utils",
"../../rtc_base:safe_conversions",
@ -626,11 +610,8 @@ rtc_library("audio_network_adaptor") {
"../../rtc_base/system:file_wrapper",
"../../system_wrappers",
"../../system_wrappers:field_trial",
]
absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
if (rtc_enable_protobuf) {
@ -662,6 +643,8 @@ rtc_library("neteq") {
"neteq/decision_logic.h",
"neteq/decoder_database.cc",
"neteq/decoder_database.h",
"neteq/delay_constraints.cc",
"neteq/delay_constraints.h",
"neteq/delay_manager.cc",
"neteq/delay_manager.h",
"neteq/dsp_helper.cc",
@ -690,8 +673,6 @@ rtc_library("neteq") {
"neteq/packet_arrival_history.h",
"neteq/packet_buffer.cc",
"neteq/packet_buffer.h",
"neteq/post_decode_vad.cc",
"neteq/post_decode_vad.h",
"neteq/preemptive_expand.cc",
"neteq/preemptive_expand.h",
"neteq/random_vector.cc",
@ -717,14 +698,17 @@ rtc_library("neteq") {
":webrtc_cng",
"..:module_api_public",
"../../api:array_view",
"../../api:field_trials_view",
"../../api:rtp_headers",
"../../api:rtp_packet_info",
"../../api:scoped_refptr",
"../../api/audio:audio_frame_api",
"../../api/audio_codecs:audio_codecs_api",
"../../api/environment",
"../../api/neteq:neteq_api",
"../../api/neteq:neteq_controller_api",
"../../api/neteq:tick_timer",
"../../api/units:timestamp",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base:audio_format_to_string",
@ -741,28 +725,9 @@ rtc_library("neteq") {
"../../rtc_base/experiments:field_trial_parser",
"../../rtc_base/synchronization:mutex",
"../../system_wrappers",
"../../system_wrappers:field_trial",
"../../system_wrappers:metrics",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_source_set("default_neteq_factory") {
visibility += webrtc_default_visibility
sources = [
"neteq/default_neteq_factory.cc",
"neteq/default_neteq_factory.h",
]
deps = [
":neteq",
"../../api:scoped_refptr",
"../../api/audio_codecs:audio_codecs_api",
"../../api/neteq:default_neteq_controller_factory",
"../../api/neteq:neteq_api",
"../../system_wrappers:system_wrappers",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -788,16 +753,20 @@ rtc_library("neteq_tools_minimal") {
]
deps = [
":default_neteq_factory",
":neteq",
"../../api:array_view",
"../../api:field_trials",
"../../api:neteq_simulator_api",
"../../api:rtp_headers",
"../../api/audio:audio_frame_api",
"../../api/audio_codecs:audio_codecs_api",
"../../api/environment",
"../../api/environment:environment_factory",
"../../api/neteq:custom_neteq_factory",
"../../api/neteq:default_neteq_controller_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/units:timestamp",
"../../rtc_base:buffer",
"../../rtc_base:checks",
"../../rtc_base:copy_on_write_buffer",
@ -806,7 +775,6 @@ rtc_library("neteq_tools_minimal") {
"../../system_wrappers",
"../rtp_rtcp:rtp_rtcp_format",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
defines = audio_codec_defines
}
@ -840,15 +808,13 @@ rtc_library("neteq_test_tools") {
"../../common_audio",
"../../rtc_base:buffer",
"../../rtc_base:checks",
"../../rtc_base:ssl",
"../../rtc_base:copy_on_write_buffer",
"../../rtc_base:digest",
"../../rtc_base:stringutils",
"../../rtc_base/system:arch",
"../../test:rtp_test_utils",
"../rtp_rtcp:rtp_rtcp_format",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -880,10 +846,7 @@ rtc_library("neteq_tools") {
"../../rtc_base:timeutils",
"../rtp_rtcp",
"../rtp_rtcp:rtp_rtcp_format",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
if (rtc_enable_protobuf) {
@ -909,8 +872,8 @@ rtc_library("neteq_input_audio_tools") {
deps = [
"../../common_audio",
"../../rtc_base:checks",
"//third_party/abseil-cpp/absl/strings:string_view",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
if (rtc_enable_protobuf) {
@ -946,7 +909,6 @@ rtc_library("audio_coding_modules_tests_shared") {
deps = [
":audio_coding",
":audio_coding_module_typedefs",
":default_neteq_factory",
":neteq_test_tools",
":neteq_tools_minimal",
":webrtc_opus_wrapper",
@ -955,19 +917,20 @@ rtc_library("audio_coding_modules_tests_shared") {
"../../api/audio:audio_frame_api",
"../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/audio_codecs:builtin_audio_encoder_factory",
"../../api/environment",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/units:timestamp",
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../rtc_base:ssl",
"../../rtc_base:digest",
"../../rtc_base:stringutils",
"../../system_wrappers",
"../../test:fileutils",
"../../test:test_support",
"//testing/gtest",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
defines = audio_coding_defines
@ -1087,6 +1050,11 @@ if (rtc_include_tests) {
"../../api/audio_codecs/ilbc:audio_encoder_ilbc",
"../../api/audio_codecs/opus:audio_decoder_opus",
"../../api/audio_codecs/opus:audio_encoder_opus",
"../../api/environment",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/units:timestamp",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:logging",
@ -1097,10 +1065,8 @@ if (rtc_include_tests) {
"../../test:fileutils",
"../../test:scoped_key_value_config",
"../../test:test_support",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
defines = audio_coding_defines
}
@ -1117,6 +1083,7 @@ if (rtc_include_tests) {
":neteq_test_support",
":neteq_test_tools",
"../../api/audio_codecs/opus:audio_encoder_opus",
"../../api/environment:environment_factory",
"../../api/test/metrics:global_metrics_logger_and_exporter",
"../../api/test/metrics:metric",
"../../rtc_base:macromagic",
@ -1125,8 +1092,8 @@ if (rtc_include_tests) {
"../../test:fileutils",
"../../test:test_flags",
"../../test:test_support",
"//third_party/abseil-cpp/absl/flags:flag",
]
absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag" ]
}
rtc_library("acm_receive_test") {
@ -1145,12 +1112,14 @@ if (rtc_include_tests) {
"../../api:scoped_refptr",
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../test:test_support",
"//testing/gtest",
]
deps += audio_coding_deps
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
rtc_library("acm_send_test") {
@ -1171,13 +1140,16 @@ if (rtc_include_tests) {
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/audio_codecs:builtin_audio_encoder_factory",
"../../api/environment",
"../../api/environment:environment_factory",
"../../rtc_base:checks",
"../../rtc_base:stringutils",
"../../test:test_support",
"//testing/gtest",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/strings:string_view",
]
deps += audio_coding_deps
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
if (!build_with_chromium) {
@ -1205,8 +1177,10 @@ if (rtc_include_tests) {
":neteq_tools",
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs/opus:audio_encoder_opus",
"../../api/environment:environment_factory",
"../../common_audio",
"../../rtc_base/system:arch",
"../../test:explicit_key_value_config",
"../../test:fileutils",
"../../test:test_main",
"../../test:test_support",
@ -1242,10 +1216,7 @@ if (rtc_include_tests) {
"../../rtc_base:checks",
"../../rtc_base:refcount",
"../../test:fileutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
sources = [
"neteq/tools/neteq_test_factory.cc",
@ -1279,7 +1250,6 @@ if (rtc_include_tests) {
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
sources = [ "neteq/tools/neteq_rtpplay.cc" ]
}
@ -1348,13 +1318,15 @@ if (rtc_include_tests) {
]
deps = [
":default_neteq_factory",
":neteq",
":neteq_test_tools",
":pcm16b",
"../../api/audio:audio_frame_api",
"../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/environment",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../rtc_base:checks",
"../../system_wrappers",
@ -1373,23 +1345,23 @@ if (rtc_include_tests) {
]
deps = [
":default_neteq_factory",
":neteq",
":neteq_input_audio_tools",
":neteq_test_tools",
":neteq_tools_minimal",
"../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/units:timestamp",
"../../rtc_base:checks",
"../../rtc_base:stringutils",
"../../system_wrappers",
"../../test:fileutils",
"../../test:test_support",
"//testing/gtest",
]
absl_deps = [
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/strings:string_view",
]
}
@ -1398,6 +1370,7 @@ if (rtc_include_tests) {
deps = [
":audio_coding",
":audio_coding_module_typedefs",
":audio_encoder_cng",
":neteq_input_audio_tools",
"../../api/audio:audio_frame_api",
@ -1406,10 +1379,12 @@ if (rtc_include_tests) {
"../../api/audio_codecs/g722:audio_encoder_g722",
"../../api/audio_codecs/ilbc:audio_encoder_ilbc",
"../../api/audio_codecs/opus:audio_encoder_opus",
"../../api/environment:environment_factory",
"../../rtc_base:checks",
"../../rtc_base:ip_address",
"../../rtc_base:safe_conversions",
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
"//third_party/abseil-cpp/absl/memory",
]
deps += audio_coding_deps
@ -1596,7 +1571,6 @@ if (rtc_include_tests) {
visibility += webrtc_default_visibility
sources = [
"acm2/acm_receiver_unittest.cc",
"acm2/acm_remixing_unittest.cc",
"acm2/audio_coding_module_unittest.cc",
"acm2/call_statistics_unittest.cc",
@ -1629,6 +1603,7 @@ if (rtc_include_tests) {
"neteq/comfort_noise_unittest.cc",
"neteq/decision_logic_unittest.cc",
"neteq/decoder_database_unittest.cc",
"neteq/delay_constraints_unittest.cc",
"neteq/delay_manager_unittest.cc",
"neteq/dsp_helper_unittest.cc",
"neteq/dtmf_buffer_unittest.cc",
@ -1644,6 +1619,7 @@ if (rtc_include_tests) {
"neteq/mock/mock_expand.h",
"neteq/mock/mock_histogram.h",
"neteq/mock/mock_neteq_controller.h",
"neteq/mock/mock_packet_arrival_history.h",
"neteq/mock/mock_packet_buffer.h",
"neteq/mock/mock_red_payload_splitter.h",
"neteq/mock/mock_statistics_calculator.h",
@ -1656,7 +1632,6 @@ if (rtc_include_tests) {
"neteq/normal_unittest.cc",
"neteq/packet_arrival_history_unittest.cc",
"neteq/packet_buffer_unittest.cc",
"neteq/post_decode_vad_unittest.cc",
"neteq/random_vector_unittest.cc",
"neteq/red_payload_splitter_unittest.cc",
"neteq/reorder_optimizer_unittest.cc",
@ -1678,7 +1653,6 @@ if (rtc_include_tests) {
":audio_coding_opus_common",
":audio_encoder_cng",
":audio_network_adaptor",
":default_neteq_factory",
":g711",
":ilbc",
":legacy_encoded_audio_frame",
@ -1704,12 +1678,16 @@ if (rtc_include_tests) {
"../../api/audio_codecs/opus:audio_decoder_opus",
"../../api/audio_codecs/opus:audio_encoder_multiopus",
"../../api/audio_codecs/opus:audio_encoder_opus",
"../../api/environment",
"../../api/environment:environment_factory",
"../../api/neteq:default_neteq_controller_factory",
"../../api/neteq:default_neteq_factory",
"../../api/neteq:neteq_api",
"../../api/neteq:neteq_controller_api",
"../../api/neteq:tick_timer",
"../../api/neteq:tick_timer_unittest",
"../../api/rtc_event_log",
"../../api/units:timestamp",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../common_audio:mock_common_audio",
@ -1717,7 +1695,7 @@ if (rtc_include_tests) {
"../../logging:rtc_event_audio",
"../../modules/rtp_rtcp:rtp_rtcp_format",
"../../rtc_base:checks",
"../../rtc_base:ignore_wundef",
"../../rtc_base:digest",
"../../rtc_base:macromagic",
"../../rtc_base:platform_thread",
"../../rtc_base:refcount",
@ -1725,7 +1703,6 @@ if (rtc_include_tests) {
"../../rtc_base:rtc_event",
"../../rtc_base:safe_conversions",
"../../rtc_base:sanitizer",
"../../rtc_base:ssl",
"../../rtc_base:stringutils",
"../../rtc_base:timeutils",
"../../rtc_base/synchronization:mutex",
@ -1733,6 +1710,7 @@ if (rtc_include_tests) {
"../../system_wrappers",
"../../test:audio_codec_mocks",
"../../test:audio_test_common",
"../../test:explicit_key_value_config",
"../../test:field_trial",
"../../test:fileutils",
"../../test:rtc_expect_death",
@ -1743,12 +1721,9 @@ if (rtc_include_tests) {
"codecs/opus/test",
"codecs/opus/test:test_unittest",
"//testing/gtest",
]
absl_deps = [
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/strings:string_view",
]
defines = audio_coding_defines

View File

@ -1,22 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
namespace webrtc {
using AudioDecoderIsacFloatImpl = AudioDecoderIsacT<IsacFloat>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_

View File

@ -1,22 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
namespace webrtc {
using AudioEncoderIsacFloatImpl = AudioEncoderIsacT<IsacFloat>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_

View File

@ -1,617 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
typedef struct WebRtcISACStruct ISACStruct;
#if defined(__cplusplus)
extern "C" {
#endif
/******************************************************************************
* WebRtcIsac_Create(...)
*
* This function creates an ISAC instance, which will contain the state
* information for one coding/decoding channel.
*
* Input:
* - *ISAC_main_inst : a pointer to the coder instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_Create(ISACStruct** ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_Free(...)
*
* This function frees the ISAC instance created at the beginning.
*
* Input:
* - ISAC_main_inst : an ISAC instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_Free(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_EncoderInit(...)
*
* This function initializes an ISAC instance prior to the encoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - CodingMode : 0 -> Bit rate and frame length are
* automatically adjusted to available bandwidth
* on transmission channel, just valid if codec
* is created to work in wideband mode.
* 1 -> User sets a frame length and a target bit
* rate which is taken as the maximum
* short-term average bit rate.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_EncoderInit(ISACStruct* ISAC_main_inst, int16_t CodingMode);
/******************************************************************************
* WebRtcIsac_Encode(...)
*
* This function encodes 10ms audio blocks and inserts it into a package.
* Input speech length has 160 samples if operating at 16 kHz sampling
* rate, or 320 if operating at 32 kHz sampling rate. The encoder buffers the
* input audio until the whole frame is buffered then proceeds with encoding.
*
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - speechIn : input speech vector.
*
* Output:
* - encoded : the encoded data vector
*
* Return value:
* : >0 - Length (in bytes) of coded data
* : 0 - The buffer didn't reach the chosen
* frame-size so it keeps buffering speech
* samples.
* : -1 - Error
*/
int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
const int16_t* speechIn,
uint8_t* encoded);
/******************************************************************************
* WebRtcIsac_DecoderInit(...)
*
* This function initializes an ISAC instance prior to the decoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
*/
void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_UpdateBwEstimate(...)
*
* This function updates the estimate of the bandwidth.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - packet_size : size of the packet.
* - rtp_seq_number : the RTP number of the packet.
* - send_ts : the RTP send timestamp, given in samples
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
/******************************************************************************
* WebRtcIsac_Decode(...)
*
* This function decodes an ISAC frame. At 16 kHz sampling rate, the length
* of the output audio could be either 480 or 960 samples, equivalent to
* 30 or 60 ms respectively. At 32 kHz sampling rate, the length of the
* output audio is 960 samples, which is 30 ms.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - len : bytes in encoded vector.
*
* Output:
* - decoded : The decoded vector.
*
* Return value : >0 - number of samples in decoded vector.
* -1 - Error.
*/
int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/******************************************************************************
* WebRtcIsac_DecodePlc(...)
*
* This function conducts PLC for ISAC frame(s). Output speech length
* will be a multiple of frames, i.e. multiples of 30 ms audio. Therefore,
* the output is multiple of 480 samples if operating at 16 kHz and multiple
* of 960 if operating at 32 kHz.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - noOfLostFrames : Number of PLC frames to produce.
*
* Output:
* - decoded : The decoded vector.
*
* Return value : Number of samples in decoded PLC vector
*/
size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
int16_t* decoded,
size_t noOfLostFrames);
/******************************************************************************
* WebRtcIsac_Control(...)
*
* This function sets the limit on the short-term average bit-rate and the
* frame length. Should be used only in Instantaneous mode. At 16 kHz sampling
* rate, an average bit-rate between 10000 to 32000 bps is valid and a
* frame-size of 30 or 60 ms is acceptable. At 32 kHz, an average bit-rate
* between 10000 to 56000 is acceptable, and the valid frame-size is 30 ms.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rate : limit on the short-term average bit rate,
* in bits/second.
* - framesize : frame-size in millisecond.
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsac_Control(ISACStruct* ISAC_main_inst,
int32_t rate,
int framesize);
void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
int bottleneck_bits_per_second);
/******************************************************************************
* WebRtcIsac_ControlBwe(...)
*
* This function sets the initial values of bottleneck and frame-size if
* iSAC is used in channel-adaptive mode. Therefore, this API is not
* applicable if the codec is created to operate in super-wideband mode.
*
* Through this API, users can enforce a frame-size for all values of
* bottleneck. Then iSAC will not automatically change the frame-size.
*
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rateBPS : initial value of bottleneck in bits/second
* 10000 <= rateBPS <= 56000 is accepted
* For default bottleneck set rateBPS = 0
* - frameSizeMs : number of milliseconds per frame (30 or 60)
* - enforceFrameSize : 1 to enforce the given frame-size through
* out the adaptation process, 0 to let iSAC
* change the frame-size if required.
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsac_ControlBwe(ISACStruct* ISAC_main_inst,
int32_t rateBPS,
int frameSizeMs,
int16_t enforceFrameSize);
/******************************************************************************
* WebRtcIsac_ReadFrameLen(...)
*
* This function returns the length of the frame represented in the packet.
*
* Input:
* - encoded : Encoded bit-stream
*
* Output:
* - frameLength : Length of frame in packet (in samples)
*
*/
int16_t WebRtcIsac_ReadFrameLen(const ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
int16_t* frameLength);
/******************************************************************************
* WebRtcIsac_version(...)
*
* This function returns the version number.
*
* Output:
* - version : Pointer to character string
*
*/
void WebRtcIsac_version(char* version);
/******************************************************************************
* WebRtcIsac_GetErrorCode(...)
*
* This function can be used to check the error code of an iSAC instance. When
* a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occurred in the
* specified instance.
*
* Input:
* - ISAC_main_inst : ISAC instance
*
* Return value : Error code
*/
int16_t WebRtcIsac_GetErrorCode(ISACStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsac_GetUplinkBw(...)
*
* This function outputs the target bottleneck of the codec. In
* channel-adaptive mode, the target bottleneck is specified through in-band
* signalling retreived by bandwidth estimator.
* In channel-independent, also called instantaneous mode, the target
* bottleneck is provided to the encoder by calling xxx_control(...). If
* xxx_control is never called the default values is returned. The default
* value for bottleneck at 16 kHz encoder sampling rate is 32000 bits/sec,
* and it is 56000 bits/sec for 32 kHz sampling rate.
* Note that the output is the iSAC internal operating bottleneck which might
* differ slightly from the one provided through xxx_control().
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Output:
* - *bottleneck : bottleneck in bits/sec
*
* Return value : -1 if error happens
* 0 bit-rates computed correctly.
*/
int16_t WebRtcIsac_GetUplinkBw(ISACStruct* ISAC_main_inst, int32_t* bottleneck);
/******************************************************************************
* WebRtcIsac_SetMaxPayloadSize(...)
*
* This function sets a limit for the maximum payload size of iSAC. The same
* value is used both for 30 and 60 ms packets. If the encoder sampling rate
* is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
* encoder sampling rate is 32 kHz the maximum payload size is between 120
* and 600 bytes.
*
* If an out of range limit is used, the function returns -1, but the closest
* valid value will be applied.
*
* ---------------
* IMPORTANT NOTES
* ---------------
* The size of a packet is limited to the minimum of 'max-payload-size' and
* 'max-rate.' For instance, let's assume the max-payload-size is set to
* 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
* translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
* frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
* i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
* 170 bytes, i.e. min(170, 300).
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxPayloadBytes : maximum size of the payload in bytes
* valid values are between 120 and 400 bytes
* if encoder sampling rate is 16 kHz. For
* 32 kHz encoder sampling rate valid values
* are between 120 and 600 bytes.
*
* Return value : 0 if successful
* -1 if error happens
*/
int16_t WebRtcIsac_SetMaxPayloadSize(ISACStruct* ISAC_main_inst,
int16_t maxPayloadBytes);
/******************************************************************************
* WebRtcIsac_SetMaxRate(...)
*
* This function sets the maximum rate which the codec may not exceed for
* any signal packet. The maximum rate is defined and payload-size per
* frame-size in bits per second.
*
* The codec has a maximum rate of 53400 bits per second (200 bytes per 30
* ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
* if the encoder sampling rate is 32 kHz.
*
* It is possible to set a maximum rate between 32000 and 53400 bits/sec
* in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
*
* If an out of range limit is used, the function returns -1, but the closest
* valid value will be applied.
*
* ---------------
* IMPORTANT NOTES
* ---------------
* The size of a packet is limited to the minimum of 'max-payload-size' and
* 'max-rate.' For instance, let's assume the max-payload-size is set to
* 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
* translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
* frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
* i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
* 170 bytes, min(170, 300).
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxRate : maximum rate in bits per second,
* valid values are 32000 to 53400 bits/sec in
* wideband mode, and 32000 to 160000 bits/sec in
* super-wideband mode.
*
* Return value : 0 if successful
* -1 if error happens
*/
int16_t WebRtcIsac_SetMaxRate(ISACStruct* ISAC_main_inst, int32_t maxRate);
/******************************************************************************
* WebRtcIsac_DecSampRate()
* Return the sampling rate of the decoded audio.
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Return value : sampling frequency in Hertz.
*
*/
uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_EncSampRate()
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Return value : sampling rate in Hertz.
*
*/
uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_SetDecSampRate()
* Set the sampling rate of the decoder. Initialization of the decoder WILL
* NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
* which is set when the instance is created.
*
* Input:
* - ISAC_main_inst : iSAC instance
* - sampRate : sampling rate in Hertz.
*
* Return value : 0 if successful
* -1 if failed.
*/
int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
uint16_t samp_rate_hz);
/******************************************************************************
* WebRtcIsac_SetEncSampRate()
* Set the sampling rate of the encoder. Initialization of the encoder WILL
* NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
* which is set when the instance is created. The encoding-mode and the
* bottleneck remain unchanged by this call, however, the maximum rate and
* maximum payload-size will reset to their default value.
*
* Input:
* - ISAC_main_inst : iSAC instance
* - sampRate : sampling rate in Hertz.
*
* Return value : 0 if successful
* -1 if failed.
*/
int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
uint16_t sample_rate_hz);
/******************************************************************************
* WebRtcIsac_GetNewBitStream(...)
*
* This function returns encoded data, with the recieved bwe-index in the
* stream. If the rate is set to a value less than bottleneck of codec
* the new bistream will be re-encoded with the given target rate.
* It should always return a complete packet, i.e. only called once
* even for 60 msec frames.
*
* NOTE 1! This function does not write in the ISACStruct, it is not allowed.
* NOTE 2! Currently not implemented for SWB mode.
* NOTE 3! Rates larger than the bottleneck of the codec will be limited
* to the current bottleneck.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - bweIndex : Index of bandwidth estimate to put in new
* bitstream
* - rate : target rate of the transcoder is bits/sec.
* Valid values are the accepted rate in iSAC,
* i.e. 10000 to 56000.
* - isRCU : if the new bit-stream is an RCU
* stream. Note that the rate parameter always indicates the target rate of the
* main payload, regardless of 'isRCU' value.
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error or called in SWB mode
* NOTE! No error code is written to
* the struct since it is only allowed to read
* the struct.
*/
int16_t WebRtcIsac_GetNewBitStream(ISACStruct* ISAC_main_inst,
int16_t bweIndex,
int16_t jitterInfo,
int32_t rate,
uint8_t* encoded,
int16_t isRCU);
/****************************************************************************
* WebRtcIsac_GetDownLinkBwIndex(...)
*
* This function returns index representing the Bandwidth estimate from
* other side to this side.
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Output:
* - bweIndex : Bandwidth estimate to transmit to other side.
*
*/
int16_t WebRtcIsac_GetDownLinkBwIndex(ISACStruct* ISAC_main_inst,
int16_t* bweIndex,
int16_t* jitterInfo);
/****************************************************************************
* WebRtcIsac_UpdateUplinkBw(...)
*
* This function takes an index representing the Bandwidth estimate from
* this side to other side and updates BWE.
*
* Input:
* - ISAC_main_inst : iSAC struct
* - bweIndex : Bandwidth estimate from other side.
*
*/
int16_t WebRtcIsac_UpdateUplinkBw(ISACStruct* ISAC_main_inst, int16_t bweIndex);
/****************************************************************************
* WebRtcIsac_ReadBwIndex(...)
*
* This function returns the index of the Bandwidth estimate from the bitstream.
*
* Input:
* - encoded : Encoded bitstream
*
* Output:
* - frameLength : Length of frame in packet (in samples)
* - bweIndex : Bandwidth estimate in bitstream
*
*/
int16_t WebRtcIsac_ReadBwIndex(const uint8_t* encoded, int16_t* bweIndex);
/*******************************************************************************
* WebRtcIsac_GetNewFrameLen(...)
*
* returns the frame lenght (in samples) of the next packet. In the case of
* channel-adaptive mode, iSAC decides on its frame lenght based on the
* estimated bottleneck this allows a user to prepare for the next packet (at
* the encoder)
*
* The primary usage is in CE to make the iSAC works in channel-adaptive mode
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Return Value : frame lenght in samples
*
*/
int16_t WebRtcIsac_GetNewFrameLen(ISACStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsac_GetRedPayload(...)
*
* Populates "encoded" with the redundant payload of the recently encoded
* frame. This function has to be called once that WebRtcIsac_Encode(...)
* returns a positive value. Regardless of the frame-size this function will
* be called only once after encoding is completed.
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Output:
* - encoded : the encoded data vector
*
*
* Return value:
* : >0 - Length (in bytes) of coded data
* : -1 - Error
*
*
*/
int16_t WebRtcIsac_GetRedPayload(ISACStruct* ISAC_main_inst, uint8_t* encoded);
/****************************************************************************
* WebRtcIsac_DecodeRcu(...)
*
* This function decodes a redundant (RCU) iSAC frame. Function is called in
* NetEq with a stored RCU payload i case of packet loss. Output speech length
* will be a multiple of 480 samples: 480 or 960 samples,
* depending on the framesize (30 or 60 ms).
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC RCU frame(s)
* - len : bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
*
* Return value : >0 - number of samples in decoded vector
* -1 - Error
*/
int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/* If |inst| is a decoder but not an encoder: tell it what sample rate the
encoder is using, for bandwidth estimation purposes. */
void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
#if defined(__cplusplus)
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_ */

View File

@ -1,60 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/*
* terminate and return byte stream;
* returns the number of bytes in the stream
*/
int WebRtcIsac_EncTerminate(Bitstr *streamdata) /* in-/output struct containing bitstream */
{
uint8_t *stream_ptr;
/* point to the right place in the stream buffer */
stream_ptr = streamdata->stream + streamdata->stream_index;
/* find minimum length (determined by current interval width) */
if ( streamdata->W_upper > 0x01FFFFFF )
{
streamdata->streamval += 0x01000000;
/* add carry to buffer */
if (streamdata->streamval < 0x01000000)
{
/* propagate carry */
while ( !(++(*--stream_ptr)) );
/* put pointer back to the old value */
stream_ptr = streamdata->stream + streamdata->stream_index;
}
/* write remaining data to bitstream */
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
}
else
{
streamdata->streamval += 0x00010000;
/* add carry to buffer */
if (streamdata->streamval < 0x00010000)
{
/* propagate carry */
while ( !(++(*--stream_ptr)) );
/* put pointer back to the old value */
stream_ptr = streamdata->stream + streamdata->stream_index;
}
/* write remaining data to bitstream */
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
*stream_ptr++ = (uint8_t) ((streamdata->streamval >> 16) & 0x00FF);
}
/* calculate stream length */
return (int)(stream_ptr - streamdata->stream);
}

View File

@ -1,67 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routines.h
*
* Functions for arithmetic coding.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
int WebRtcIsac_EncLogisticMulti2(
Bitstr* streamdata, /* in-/output struct containing bitstream */
int16_t* dataQ7, /* input: data vector */
const uint16_t*
env, /* input: side info vector defining the width of the pdf */
const int N, /* input: data vector length */
const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
/* returns the number of bytes in the stream */
int WebRtcIsac_EncTerminate(
Bitstr* streamdata); /* in-/output struct containing bitstream */
/* returns the number of bytes in the stream so far */
int WebRtcIsac_DecLogisticMulti2(
int16_t* data, /* output: data vector */
Bitstr* streamdata, /* in-/output struct containing bitstream */
const uint16_t*
env, /* input: side info vector defining the width of the pdf */
const int16_t* dither, /* input: dither vector */
const int N, /* input: data vector length */
const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
void WebRtcIsac_EncHistMulti(
Bitstr* streamdata, /* in-/output struct containing bitstream */
const int* data, /* input: data vector */
const uint16_t* const* cdf, /* input: array of cdf arrays */
const int N); /* input: data vector length */
int WebRtcIsac_DecHistBisectMulti(
int* data, /* output: data vector */
Bitstr* streamdata, /* in-/output struct containing bitstream */
const uint16_t* const* cdf, /* input: array of cdf arrays */
const uint16_t*
cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
const int N); /* input: data vector length */
int WebRtcIsac_DecHistOneStepMulti(
int* data, /* output: data vector */
Bitstr* streamdata, /* in-/output struct containing bitstream */
const uint16_t* const* cdf, /* input: array of cdf arrays */
const uint16_t*
init_index, /* input: vector of initial cdf table search entries */
const int N); /* input: data vector length */
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_ */

View File

@ -1,291 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
/*
* code symbols into arithmetic bytestream
*/
void WebRtcIsac_EncHistMulti(Bitstr *streamdata, /* in-/output struct containing bitstream */
const int *data, /* input: data vector */
const uint16_t *const *cdf, /* input: array of cdf arrays */
const int N) /* input: data vector length */
{
uint32_t W_lower, W_upper;
uint32_t W_upper_LSB, W_upper_MSB;
uint8_t *stream_ptr;
uint8_t *stream_ptr_carry;
uint32_t cdf_lo, cdf_hi;
int k;
/* point to beginning of stream buffer */
stream_ptr = streamdata->stream + streamdata->stream_index;
W_upper = streamdata->W_upper;
for (k=N; k>0; k--)
{
/* fetch cdf_lower and cdf_upper from cdf tables */
cdf_lo = (uint32_t) *(*cdf + *data);
cdf_hi = (uint32_t) *(*cdf++ + *data++ + 1);
/* update interval */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
W_lower = W_upper_MSB * cdf_lo;
W_lower += (W_upper_LSB * cdf_lo) >> 16;
W_upper = W_upper_MSB * cdf_hi;
W_upper += (W_upper_LSB * cdf_hi) >> 16;
/* shift interval such that it begins at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamdata->streamval += W_lower;
/* handle carry */
if (streamdata->streamval < W_lower)
{
/* propagate carry */
stream_ptr_carry = stream_ptr;
while (!(++(*--stream_ptr_carry)));
}
/* renormalize interval, store most significant byte of streamval and update streamval */
while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
{
W_upper <<= 8;
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
streamdata->streamval <<= 8;
}
}
/* calculate new stream_index */
streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
streamdata->W_upper = W_upper;
return;
}
/*
* function to decode more symbols from the arithmetic bytestream, using method of bisection
* cdf tables should be of size 2^k-1 (which corresponds to an alphabet size of 2^k-2)
*/
int WebRtcIsac_DecHistBisectMulti(int *data, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t *const *cdf, /* input: array of cdf arrays */
const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
const int N) /* input: data vector length */
{
uint32_t W_lower, W_upper;
uint32_t W_tmp;
uint32_t W_upper_LSB, W_upper_MSB;
uint32_t streamval;
const uint8_t *stream_ptr;
const uint16_t *cdf_ptr;
int size_tmp;
int k;
W_lower = 0; //to remove warning -DH
stream_ptr = streamdata->stream + streamdata->stream_index;
W_upper = streamdata->W_upper;
if (W_upper == 0)
/* Should not be possible in normal operation */
return -2;
if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
{
/* read first word from bytestream */
streamval = *stream_ptr << 24;
streamval |= *++stream_ptr << 16;
streamval |= *++stream_ptr << 8;
streamval |= *++stream_ptr;
} else {
streamval = streamdata->streamval;
}
for (k=N; k>0; k--)
{
/* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
/* start halfway the cdf range */
size_tmp = *cdf_size++ >> 1;
cdf_ptr = *cdf + (size_tmp - 1);
/* method of bisection */
for ( ;; )
{
W_tmp = W_upper_MSB * *cdf_ptr;
W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
size_tmp >>= 1;
if (size_tmp == 0) break;
if (streamval > W_tmp)
{
W_lower = W_tmp;
cdf_ptr += size_tmp;
} else {
W_upper = W_tmp;
cdf_ptr -= size_tmp;
}
}
if (streamval > W_tmp)
{
W_lower = W_tmp;
*data++ = (int)(cdf_ptr - *cdf++);
} else {
W_upper = W_tmp;
*data++ = (int)(cdf_ptr - *cdf++ - 1);
}
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamval -= W_lower;
/* renormalize interval and update streamval */
while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
{
/* read next byte from stream */
streamval = (streamval << 8) | *++stream_ptr;
W_upper <<= 8;
}
if (W_upper == 0)
/* Should not be possible in normal operation */
return -2;
}
streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
streamdata->W_upper = W_upper;
streamdata->streamval = streamval;
/* find number of bytes in original stream (determined by current interval width) */
if ( W_upper > 0x01FFFFFF )
return streamdata->stream_index - 2;
else
return streamdata->stream_index - 1;
}
/*
* function to decode more symbols from the arithmetic bytestream, taking single step up or
* down at a time
* cdf tables can be of arbitrary size, but large tables may take a lot of iterations
*/
int WebRtcIsac_DecHistOneStepMulti(int *data, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t *const *cdf, /* input: array of cdf arrays */
const uint16_t *init_index, /* input: vector of initial cdf table search entries */
const int N) /* input: data vector length */
{
uint32_t W_lower, W_upper;
uint32_t W_tmp;
uint32_t W_upper_LSB, W_upper_MSB;
uint32_t streamval;
const uint8_t *stream_ptr;
const uint16_t *cdf_ptr;
int k;
stream_ptr = streamdata->stream + streamdata->stream_index;
W_upper = streamdata->W_upper;
if (W_upper == 0)
/* Should not be possible in normal operation */
return -2;
if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
{
/* read first word from bytestream */
streamval = (uint32_t)(*stream_ptr) << 24;
streamval |= (uint32_t)(*++stream_ptr) << 16;
streamval |= (uint32_t)(*++stream_ptr) << 8;
streamval |= (uint32_t)(*++stream_ptr);
} else {
streamval = streamdata->streamval;
}
for (k=N; k>0; k--)
{
/* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
/* start at the specified table entry */
cdf_ptr = *cdf + (*init_index++);
W_tmp = W_upper_MSB * *cdf_ptr;
W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
if (streamval > W_tmp)
{
for ( ;; )
{
W_lower = W_tmp;
if (cdf_ptr[0]==65535)
/* range check */
return -3;
W_tmp = W_upper_MSB * *++cdf_ptr;
W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
if (streamval <= W_tmp) break;
}
W_upper = W_tmp;
*data++ = (int)(cdf_ptr - *cdf++ - 1);
} else {
for ( ;; )
{
W_upper = W_tmp;
--cdf_ptr;
if (cdf_ptr<*cdf) {
/* range check */
return -3;
}
W_tmp = W_upper_MSB * *cdf_ptr;
W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
if (streamval > W_tmp) break;
}
W_lower = W_tmp;
*data++ = (int)(cdf_ptr - *cdf++);
}
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamval -= W_lower;
/* renormalize interval and update streamval */
while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
{
/* read next byte from stream */
streamval = (streamval << 8) | *++stream_ptr;
W_upper <<= 8;
}
}
streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
streamdata->W_upper = W_upper;
streamdata->streamval = streamval;
/* find number of bytes in original stream (determined by current interval width) */
if ( W_upper > 0x01FFFFFF )
return streamdata->stream_index - 2;
else
return streamdata->stream_index - 1;
}

View File

@ -1,303 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routines.h
*
* This file contains functions for arithmatically encoding and
* decoding DFT coefficients.
*
*/
#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
static const int32_t kHistEdgesQ15[51] = {
-327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
-196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644,
-65536, -52429, -39322, -26215, -13108, 0, 13107, 26214, 39321, 52428,
65536, 78643, 91750, 104857, 117964, 131072, 144179, 157286, 170393, 183500,
196608, 209715, 222822, 235929, 249036, 262144, 275251, 288358, 301465, 314572,
327680};
static const int kCdfSlopeQ0[51] = { /* Q0 */
5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 13, 23, 47, 87, 154, 315, 700, 1088,
2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312,
1095, 660, 316, 145, 86, 41, 32, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 0};
static const int kCdfQ16[51] = { /* Q16 */
0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
20, 22, 24, 29, 38, 57, 92, 153, 279, 559,
994, 1983, 4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636,
64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514,
65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534,
65535};
/* function to be converted to fixed point */
static __inline uint32_t piecewise(int32_t xinQ15) {
int32_t ind, qtmp1, qtmp2, qtmp3;
uint32_t tmpUW32;
qtmp2 = xinQ15;
if (qtmp2 < kHistEdgesQ15[0]) {
qtmp2 = kHistEdgesQ15[0];
}
if (qtmp2 > kHistEdgesQ15[50]) {
qtmp2 = kHistEdgesQ15[50];
}
qtmp1 = qtmp2 - kHistEdgesQ15[0]; /* Q15 - Q15 = Q15 */
ind = (qtmp1 * 5) >> 16; /* 2^16 / 5 = 0.4 in Q15 */
/* Q15 -> Q0 */
qtmp1 = qtmp2 - kHistEdgesQ15[ind]; /* Q15 - Q15 = Q15 */
qtmp2 = kCdfSlopeQ0[ind] * qtmp1; /* Q0 * Q15 = Q15 */
qtmp3 = qtmp2>>15; /* Q15 -> Q0 */
tmpUW32 = kCdfQ16[ind] + qtmp3; /* Q0 + Q0 = Q0 */
return tmpUW32;
}
int WebRtcIsac_EncLogisticMulti2(
Bitstr *streamdata, /* in-/output struct containing bitstream */
int16_t *dataQ7, /* input: data vector */
const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
const int N, /* input: data vector length / 2 */
const int16_t isSWB12kHz)
{
uint32_t W_lower, W_upper;
uint32_t W_upper_LSB, W_upper_MSB;
uint8_t *stream_ptr;
uint8_t *maxStreamPtr;
uint8_t *stream_ptr_carry;
uint32_t cdf_lo, cdf_hi;
int k;
/* point to beginning of stream buffer */
stream_ptr = streamdata->stream + streamdata->stream_index;
W_upper = streamdata->W_upper;
maxStreamPtr = streamdata->stream + STREAM_SIZE_MAX_60 - 1;
for (k = 0; k < N; k++)
{
/* compute cdf_lower and cdf_upper by evaluating the piecewise linear cdf */
cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
/* test and clip if probability gets too small */
while (cdf_lo+1 >= cdf_hi) {
/* clip */
if (*dataQ7 > 0) {
*dataQ7 -= 128;
cdf_hi = cdf_lo;
cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
} else {
*dataQ7 += 128;
cdf_lo = cdf_hi;
cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
}
}
dataQ7++;
// increment only once per 4 iterations for SWB-16kHz or WB
// increment only once per 2 iterations for SWB-12kHz
envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
/* update interval */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
W_lower = W_upper_MSB * cdf_lo;
W_lower += (W_upper_LSB * cdf_lo) >> 16;
W_upper = W_upper_MSB * cdf_hi;
W_upper += (W_upper_LSB * cdf_hi) >> 16;
/* shift interval such that it begins at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamdata->streamval += W_lower;
/* handle carry */
if (streamdata->streamval < W_lower)
{
/* propagate carry */
stream_ptr_carry = stream_ptr;
while (!(++(*--stream_ptr_carry)));
}
/* renormalize interval, store most significant byte of streamval and update streamval */
while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
{
W_upper <<= 8;
*stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
if(stream_ptr > maxStreamPtr)
{
return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
}
streamdata->streamval <<= 8;
}
}
/* calculate new stream_index */
streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
streamdata->W_upper = W_upper;
return 0;
}
int WebRtcIsac_DecLogisticMulti2(
int16_t *dataQ7, /* output: data vector */
Bitstr *streamdata, /* in-/output struct containing bitstream */
const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
const int16_t *ditherQ7,/* input: dither vector */
const int N, /* input: data vector length */
const int16_t isSWB12kHz)
{
uint32_t W_lower, W_upper;
uint32_t W_tmp;
uint32_t W_upper_LSB, W_upper_MSB;
uint32_t streamval;
const uint8_t *stream_ptr;
uint32_t cdf_tmp;
int16_t candQ7;
int k;
// Position just past the end of the stream. STREAM_SIZE_MAX_60 instead of
// STREAM_SIZE_MAX (which is the size of the allocated buffer) because that's
// the limit to how much data is filled in.
const uint8_t* const stream_end = streamdata->stream + STREAM_SIZE_MAX_60;
stream_ptr = streamdata->stream + streamdata->stream_index;
W_upper = streamdata->W_upper;
if (streamdata->stream_index == 0) /* first time decoder is called for this stream */
{
/* read first word from bytestream */
if (stream_ptr + 3 >= stream_end)
return -1; // Would read out of bounds. Malformed input?
streamval = *stream_ptr << 24;
streamval |= *++stream_ptr << 16;
streamval |= *++stream_ptr << 8;
streamval |= *++stream_ptr;
} else {
streamval = streamdata->streamval;
}
for (k = 0; k < N; k++)
{
/* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
/* find first candidate by inverting the logistic cdf */
candQ7 = - *ditherQ7 + 64;
cdf_tmp = piecewise(candQ7 * *envQ8);
W_tmp = W_upper_MSB * cdf_tmp;
W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
if (streamval > W_tmp)
{
W_lower = W_tmp;
candQ7 += 128;
cdf_tmp = piecewise(candQ7 * *envQ8);
W_tmp = W_upper_MSB * cdf_tmp;
W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
while (streamval > W_tmp)
{
W_lower = W_tmp;
candQ7 += 128;
cdf_tmp = piecewise(candQ7 * *envQ8);
W_tmp = W_upper_MSB * cdf_tmp;
W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
/* error check */
if (W_lower == W_tmp) return -1;
}
W_upper = W_tmp;
/* another sample decoded */
*dataQ7 = candQ7 - 64;
}
else
{
W_upper = W_tmp;
candQ7 -= 128;
cdf_tmp = piecewise(candQ7 * *envQ8);
W_tmp = W_upper_MSB * cdf_tmp;
W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
while ( !(streamval > W_tmp) )
{
W_upper = W_tmp;
candQ7 -= 128;
cdf_tmp = piecewise(candQ7 * *envQ8);
W_tmp = W_upper_MSB * cdf_tmp;
W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
/* error check */
if (W_upper == W_tmp) return -1;
}
W_lower = W_tmp;
/* another sample decoded */
*dataQ7 = candQ7 + 64;
}
ditherQ7++;
dataQ7++;
// increment only once per 4 iterations for SWB-16kHz or WB
// increment only once per 2 iterations for SWB-12kHz
envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamval -= W_lower;
/* renormalize interval and update streamval */
while ( !(W_upper & 0xFF000000) ) /* W_upper < 2^24 */
{
/* read next byte from stream */
if (stream_ptr + 1 >= stream_end)
return -1; // Would read out of bounds. Malformed input?
streamval = (streamval << 8) | *++stream_ptr;
W_upper <<= 8;
}
}
streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
streamdata->W_upper = W_upper;
streamdata->streamval = streamval;
/* find number of bytes in original stream (determined by current interval width) */
if ( W_upper > 0x01FFFFFF )
return streamdata->stream_index - 2;
else
return streamdata->stream_index - 1;
}

View File

@ -1,20 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
namespace webrtc {
// Explicit instantiation:
template class AudioDecoderIsacT<IsacFloat>;
} // namespace webrtc

View File

@ -1,20 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
namespace webrtc {
// Explicit instantiation:
template class AudioEncoderIsacT<IsacFloat>;
} // namespace webrtc

View File

@ -1,165 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* bandwidth_estimator.h
*
* This header file contains the API for the Bandwidth Estimator
* designed for iSAC.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#define MIN_ISAC_BW 10000
#define MIN_ISAC_BW_LB 10000
#define MIN_ISAC_BW_UB 25000
#define MAX_ISAC_BW 56000
#define MAX_ISAC_BW_UB 32000
#define MAX_ISAC_BW_LB 32000
#define MIN_ISAC_MD 5
#define MAX_ISAC_MD 25
// assumed header size, in bytes; we don't know the exact number
// (header compression may be used)
#define HEADER_SIZE 35
// Initial Frame-Size, in ms, for Wideband & Super-Wideband Mode
#define INIT_FRAME_LEN_WB 60
#define INIT_FRAME_LEN_SWB 30
// Initial Bottleneck Estimate, in bits/sec, for
// Wideband & Super-wideband mode
#define INIT_BN_EST_WB 20e3f
#define INIT_BN_EST_SWB 56e3f
// Initial Header rate (header rate depends on frame-size),
// in bits/sec, for Wideband & Super-Wideband mode.
#define INIT_HDR_RATE_WB \
((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_WB)
#define INIT_HDR_RATE_SWB \
((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_SWB)
// number of packets in a row for a high rate burst
#define BURST_LEN 3
// ms, max time between two full bursts
#define BURST_INTERVAL 500
// number of packets in a row for initial high rate burst
#define INIT_BURST_LEN 5
// bits/s, rate for the first BURST_LEN packets
#define INIT_RATE_WB INIT_BN_EST_WB
#define INIT_RATE_SWB INIT_BN_EST_SWB
#if defined(__cplusplus)
extern "C" {
#endif
/* This function initializes the struct */
/* to be called before using the struct for anything else */
/* returns 0 if everything went fine, -1 otherwise */
int32_t WebRtcIsac_InitBandwidthEstimator(
BwEstimatorstr* bwest_str,
enum IsacSamplingRate encoderSampRate,
enum IsacSamplingRate decoderSampRate);
/* This function updates the receiving estimate */
/* Parameters: */
/* rtp_number - value from RTP packet, from NetEq */
/* frame length - length of signal frame in ms, from iSAC decoder */
/* send_ts - value in RTP header giving send time in samples */
/* arr_ts - value given by timeGetTime() time of arrival in samples of
* packet from NetEq */
/* pksize - size of packet in bytes, from NetEq */
/* Index - integer (range 0...23) indicating bottle neck & jitter as
* estimated by other side */
/* returns 0 if everything went fine, -1 otherwise */
int16_t WebRtcIsac_UpdateBandwidthEstimator(BwEstimatorstr* bwest_str,
const uint16_t rtp_number,
const int32_t frame_length,
const uint32_t send_ts,
const uint32_t arr_ts,
const size_t pksize);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data
* packet. */
int16_t WebRtcIsac_UpdateUplinkBwImpl(
BwEstimatorstr* bwest_str,
int16_t Index,
enum IsacSamplingRate encoderSamplingFreq);
/* Returns the bandwidth/jitter estimation code (integer 0...23) to put in the
* sending iSAC payload */
void WebRtcIsac_GetDownlinkBwJitIndexImpl(
BwEstimatorstr* bwest_str,
int16_t* bottleneckIndex,
int16_t* jitterInfo,
enum IsacSamplingRate decoderSamplingFreq);
/* Returns the bandwidth estimation (in bps) */
int32_t WebRtcIsac_GetDownlinkBandwidth(const BwEstimatorstr* bwest_str);
/* Returns the max delay (in ms) */
int32_t WebRtcIsac_GetDownlinkMaxDelay(const BwEstimatorstr* bwest_str);
/* Returns the bandwidth that iSAC should send with in bps */
int32_t WebRtcIsac_GetUplinkBandwidth(const BwEstimatorstr* bwest_str);
/* Returns the max delay value from the other side in ms */
int32_t WebRtcIsac_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str);
/*
* update amount of data in bottle neck buffer and burst handling
* returns minimum payload size (bytes)
*/
int WebRtcIsac_GetMinBytes(
RateModel* State,
int StreamSize, /* bytes in bitstream */
const int FrameLen, /* ms per frame */
const double BottleNeck, /* bottle neck rate; excl headers (bps) */
const double DelayBuildUp, /* max delay from bottleneck buffering (ms) */
enum ISACBandwidth bandwidth
/*,int16_t frequentLargePackets*/);
/*
* update long-term average bitrate and amount of data in buffer
*/
void WebRtcIsac_UpdateRateModel(
RateModel* State,
int StreamSize, /* bytes in bitstream */
const int FrameSamples, /* samples per frame */
const double BottleNeck); /* bottle neck rate; excl headers (bps) */
void WebRtcIsac_InitRateModel(RateModel* State);
/* Returns the new framelength value (input argument: bottle_neck) */
int WebRtcIsac_GetNewFrameLength(double bottle_neck, int current_framelength);
/* Returns the new SNR value (input argument: bottle_neck) */
double WebRtcIsac_GetSnr(double bottle_neck, int new_framelength);
int16_t WebRtcIsac_UpdateUplinkJitter(BwEstimatorstr* bwest_str, int32_t index);
#if defined(__cplusplus)
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_ \
*/

View File

@ -1,223 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* codec.h
*
* This header file contains the calls to the internal encoder
* and decoder functions.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#include "modules/third_party/fft/fft.h"
void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str,
Bitstr* streamdata,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts,
enum IsacSamplingRate encoderSampRate,
enum IsacSamplingRate decoderSampRate);
int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
float* signal_out,
ISACLBDecStruct* ISACdec_obj,
int16_t* current_framesamples,
int16_t isRCUPayload);
int WebRtcIsac_DecodeRcuLb(float* signal_out,
ISACLBDecStruct* ISACdec_obj,
int16_t* current_framesamples);
int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
float* in,
ISACLBEncStruct* ISACencLB_obj,
int16_t codingMode,
int16_t bottleneckIndex);
int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
Bitstr* ISACBitStr_obj,
int BWnumber,
float scale);
int WebRtcIsac_EncodeStoredDataUb(
const ISACUBSaveEncDataStruct* ISACSavedEnc_obj,
Bitstr* bitStream,
int32_t jitterInfo,
float scale,
enum ISACBandwidth bandwidth);
int16_t WebRtcIsac_GetRedPayloadUb(
const ISACUBSaveEncDataStruct* ISACSavedEncObj,
Bitstr* bitStreamObj,
enum ISACBandwidth bandwidth);
/******************************************************************************
* WebRtcIsac_RateAllocation()
* Internal function to perform a rate-allocation for upper and lower-band,
* given a total rate.
*
* Input:
* - inRateBitPerSec : a total bit-rate in bits/sec.
*
* Output:
* - rateLBBitPerSec : a bit-rate allocated to the lower-band
* in bits/sec.
* - rateUBBitPerSec : a bit-rate allocated to the upper-band
* in bits/sec.
*
* Return value : 0 if rate allocation has been successful.
* -1 if failed to allocate rates.
*/
int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
double* rateLBBitPerSec,
double* rateUBBitPerSec,
enum ISACBandwidth* bandwidthKHz);
/******************************************************************************
* WebRtcIsac_DecodeUb16()
*
* Decode the upper-band if the codec is in 0-16 kHz mode.
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band decoder object. The
* bit-stream is stored inside the decoder object.
*
* Output:
* -signal_out : decoded audio, 480 samples 30 ms.
*
* Return value : >0 number of decoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
float* signal_out,
ISACUBDecStruct* ISACdec_obj,
int16_t isRCUPayload);
/******************************************************************************
* WebRtcIsac_DecodeUb12()
*
* Decode the upper-band if the codec is in 0-12 kHz mode.
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band decoder object. The
* bit-stream is stored inside the decoder object.
*
* Output:
* -signal_out : decoded audio, 480 samples 30 ms.
*
* Return value : >0 number of decoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
float* signal_out,
ISACUBDecStruct* ISACdec_obj,
int16_t isRCUPayload);
/******************************************************************************
* WebRtcIsac_EncodeUb16()
*
* Encode the upper-band if the codec is in 0-16 kHz mode.
*
* Input:
* -in : upper-band audio, 160 samples (10 ms).
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band encoder object. The
* bit-stream is stored inside the encoder object.
*
* Return value : >0 number of encoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
float* in,
ISACUBEncStruct* ISACenc_obj,
int32_t jitterInfo);
/******************************************************************************
* WebRtcIsac_EncodeUb12()
*
* Encode the upper-band if the codec is in 0-12 kHz mode.
*
* Input:
* -in : upper-band audio, 160 samples (10 ms).
*
* Input/Output:
* -ISACdec_obj : pointer to the upper-band encoder object. The
* bit-stream is stored inside the encoder object.
*
* Return value : >0 number of encoded bytes.
* <0 if an error occurred.
*/
int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
float* in,
ISACUBEncStruct* ISACenc_obj,
int32_t jitterInfo);
/************************** initialization functions *************************/
void WebRtcIsac_InitMasking(MaskFiltstr* maskdata);
void WebRtcIsac_InitPostFilterbank(PostFiltBankstr* postfiltdata);
/**************************** transform functions ****************************/
void WebRtcIsac_InitTransform(TransformTables* tables);
void WebRtcIsac_Time2Spec(const TransformTables* tables,
double* inre1,
double* inre2,
int16_t* outre,
int16_t* outim,
FFTstr* fftstr_obj);
void WebRtcIsac_Spec2time(const TransformTables* tables,
double* inre,
double* inim,
double* outre1,
double* outre2,
FFTstr* fftstr_obj);
/***************************** filterbank functions **************************/
void WebRtcIsac_FilterAndCombineFloat(float* InLP,
float* InHP,
float* Out,
PostFiltBankstr* postfiltdata);
/************************* normalized lattice filters ************************/
void WebRtcIsac_NormLatticeFilterMa(int orderCoef,
float* stateF,
float* stateG,
float* lat_in,
double* filtcoeflo,
double* lat_out);
void WebRtcIsac_NormLatticeFilterAr(int orderCoef,
float* stateF,
float* stateG,
double* lat_in,
double* lo_filt_coef,
float* lat_out);
void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */

View File

@ -1,111 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include "modules/audio_coding/codecs/isac/main/source/crc.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#define POLYNOMIAL 0x04c11db7L
static const uint32_t kCrcTable[256] = {
0, 0x4c11db7, 0x9823b6e, 0xd4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x18aeb13, 0x54bf6a4,
0x808d07d, 0xcc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
0x3f9b762c, 0x3b5a6b9b, 0x315d626, 0x7d4cb91, 0xa97ed48, 0xe56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
0x18197087, 0x1cd86d30, 0x29f3d35, 0x65e2082, 0xb1d065b, 0xfdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
};
/****************************************************************************
* WebRtcIsac_GetCrc(...)
*
* This function returns a 32 bit CRC checksum of a bit stream
*
* Input:
* - bitstream : payload bitstream
* - len_bitstream_in_bytes : number of 8-bit words in the bit stream
*
* Output:
* - crc : checksum
*
* Return value : 0 - Ok
* -1 - Error
*/
int WebRtcIsac_GetCrc(const int16_t* bitstream,
int len_bitstream_in_bytes,
uint32_t* crc)
{
uint8_t* bitstream_ptr_uw8;
uint32_t crc_state;
int byte_cntr;
int crc_tbl_indx;
/* Sanity Check. */
if (bitstream == NULL) {
return -1;
}
/* cast to UWord8 pointer */
bitstream_ptr_uw8 = (uint8_t *)bitstream;
/* initialize */
crc_state = 0xFFFFFFFF;
for (byte_cntr = 0; byte_cntr < len_bitstream_in_bytes; byte_cntr++) {
crc_tbl_indx = (WEBRTC_SPL_RSHIFT_U32(crc_state, 24) ^
bitstream_ptr_uw8[byte_cntr]) & 0xFF;
crc_state = (crc_state << 8) ^ kCrcTable[crc_tbl_indx];
}
*crc = ~crc_state;
return 0;
}

View File

@ -1,41 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* crc.h
*
* Checksum functions
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
#include <stdint.h>
/****************************************************************************
* WebRtcIsac_GetCrc(...)
*
* This function returns a 32 bit CRC checksum of a bit stream
*
* Input:
* - encoded : payload bit stream
* - no_of_word8s : number of 8-bit words in the bit stream
*
* Output:
* - crc : checksum
*
* Return value : 0 - Ok
* -1 - Error
*/
int WebRtcIsac_GetCrc(const int16_t* encoded, int no_of_word8s, uint32_t* crc);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_ */

View File

@ -1,303 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* decode_B.c
*
* This file contains definition of funtions for decoding.
* Decoding of lower-band, including normal-decoding and RCU decoding.
* Decoding of upper-band, including 8-12 kHz, when the bandwidth is
* 0-12 kHz, and 8-16 kHz, when the bandwidth is 0-16 kHz.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/pitch_filter.h"
/*
* function to decode the bitstream
* returns the total number of bytes in the stream
*/
int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
float* signal_out, ISACLBDecStruct* ISACdecLB_obj,
int16_t* current_framesamples,
int16_t isRCUPayload) {
int k;
int len, err;
int16_t bandwidthInd;
float LP_dec_float[FRAMESAMPLES_HALF];
float HP_dec_float[FRAMESAMPLES_HALF];
double LPw[FRAMESAMPLES_HALF];
double HPw[FRAMESAMPLES_HALF];
double LPw_pf[FRAMESAMPLES_HALF];
double lo_filt_coef[(ORDERLO + 1)*SUBFRAMES];
double hi_filt_coef[(ORDERHI + 1)*SUBFRAMES];
double real_f[FRAMESAMPLES_HALF];
double imag_f[FRAMESAMPLES_HALF];
double PitchLags[4];
double PitchGains[4];
double AvgPitchGain;
int16_t PitchGains_Q12[4];
int16_t AvgPitchGain_Q12;
float gain;
int frame_nb; /* counter */
int frame_mode; /* 0 30ms, 1 for 60ms */
/* Processed_samples: 480 (30, 60 ms). Cannot take other values. */
WebRtcIsac_ResetBitstream(&(ISACdecLB_obj->bitstr_obj));
len = 0;
/* Decode framelength and BW estimation - not used,
only for stream pointer*/
err = WebRtcIsac_DecodeFrameLen(&ISACdecLB_obj->bitstr_obj,
current_framesamples);
if (err < 0) {
return err;
}
/* Frame_mode:
* 0: indicates 30 ms frame (480 samples)
* 1: indicates 60 ms frame (960 samples) */
frame_mode = *current_framesamples / MAX_FRAMESAMPLES;
err = WebRtcIsac_DecodeSendBW(&ISACdecLB_obj->bitstr_obj, &bandwidthInd);
if (err < 0) {
return err;
}
/* One loop if it's one frame (20 or 30ms), 2 loops if 2 frames
bundled together (60ms). */
for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
/* Decode & de-quantize pitch parameters */
err = WebRtcIsac_DecodePitchGain(&ISACdecLB_obj->bitstr_obj,
PitchGains_Q12);
if (err < 0) {
return err;
}
err = WebRtcIsac_DecodePitchLag(&ISACdecLB_obj->bitstr_obj, PitchGains_Q12,
PitchLags);
if (err < 0) {
return err;
}
AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
/* Decode & de-quantize filter coefficients. */
err = WebRtcIsac_DecodeLpc(&ISACdecLB_obj->bitstr_obj, lo_filt_coef,
hi_filt_coef);
if (err < 0) {
return err;
}
/* Decode & de-quantize spectrum. */
len = WebRtcIsac_DecodeSpec(&ISACdecLB_obj->bitstr_obj, AvgPitchGain_Q12,
kIsacLowerBand, real_f, imag_f);
if (len < 0) {
return len;
}
/* Inverse transform. */
WebRtcIsac_Spec2time(transform_tables, real_f, imag_f, LPw, HPw,
&ISACdecLB_obj->fftstr_obj);
/* Convert PitchGains back to float for pitchfilter_post */
for (k = 0; k < 4; k++) {
PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
}
if (isRCUPayload) {
for (k = 0; k < 240; k++) {
LPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
HPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
}
}
/* Inverse pitch filter. */
WebRtcIsac_PitchfilterPost(LPw, LPw_pf, &ISACdecLB_obj->pitchfiltstr_obj,
PitchLags, PitchGains);
/* Convert AvgPitchGain back to float for computation of gain. */
AvgPitchGain = ((float)AvgPitchGain_Q12) / 4096;
gain = 1.0f - 0.45f * (float)AvgPitchGain;
for (k = 0; k < FRAMESAMPLES_HALF; k++) {
/* Reduce gain to compensate for pitch enhancer. */
LPw_pf[k] *= gain;
}
if (isRCUPayload) {
for (k = 0; k < FRAMESAMPLES_HALF; k++) {
/* Compensation for transcoding gain changes. */
LPw_pf[k] *= RCU_TRANSCODING_SCALE;
HPw[k] *= RCU_TRANSCODING_SCALE;
}
}
/* Perceptual post-filtering (using normalized lattice filter). */
WebRtcIsac_NormLatticeFilterAr(
ORDERLO, ISACdecLB_obj->maskfiltstr_obj.PostStateLoF,
(ISACdecLB_obj->maskfiltstr_obj).PostStateLoG, LPw_pf, lo_filt_coef,
LP_dec_float);
WebRtcIsac_NormLatticeFilterAr(
ORDERHI, ISACdecLB_obj->maskfiltstr_obj.PostStateHiF,
(ISACdecLB_obj->maskfiltstr_obj).PostStateHiG, HPw, hi_filt_coef,
HP_dec_float);
/* Recombine the 2 bands. */
WebRtcIsac_FilterAndCombineFloat(LP_dec_float, HP_dec_float,
signal_out + frame_nb * FRAMESAMPLES,
&ISACdecLB_obj->postfiltbankstr_obj);
}
return len;
}
/*
* This decode function is called when the codec is operating in 16 kHz
* bandwidth to decode the upperband, i.e. 8-16 kHz.
*
* Contrary to lower-band, the upper-band (8-16 kHz) is not split in
* frequency, but split to 12 sub-frames, i.e. twice as lower-band.
*/
int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
int16_t isRCUPayload) {
int len, err;
double halfFrameFirst[FRAMESAMPLES_HALF];
double halfFrameSecond[FRAMESAMPLES_HALF];
double percepFilterParam[(UB_LPC_ORDER + 1) * (SUBFRAMES << 1) +
(UB_LPC_ORDER + 1)];
double real_f[FRAMESAMPLES_HALF];
double imag_f[FRAMESAMPLES_HALF];
const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
len = 0;
/* Decode & de-quantize filter coefficients. */
memset(percepFilterParam, 0, sizeof(percepFilterParam));
err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
percepFilterParam, isac16kHz);
if (err < 0) {
return err;
}
/* Decode & de-quantize spectrum. */
len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
kIsacUpperBand16, real_f, imag_f);
if (len < 0) {
return len;
}
if (isRCUPayload) {
int n;
for (n = 0; n < 240; n++) {
real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
}
}
/* Inverse transform. */
WebRtcIsac_Spec2time(transform_tables,
real_f, imag_f, halfFrameFirst, halfFrameSecond,
&ISACdecUB_obj->fftstr_obj);
/* Perceptual post-filtering (using normalized lattice filter). */
WebRtcIsac_NormLatticeFilterAr(
UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
(ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameFirst,
&percepFilterParam[(UB_LPC_ORDER + 1)], signal_out);
WebRtcIsac_NormLatticeFilterAr(
UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
(ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameSecond,
&percepFilterParam[(UB_LPC_ORDER + 1) * SUBFRAMES + (UB_LPC_ORDER + 1)],
&signal_out[FRAMESAMPLES_HALF]);
return len;
}
/*
* This decode function is called when the codec operates at 0-12 kHz
* bandwidth to decode the upperband, i.e. 8-12 kHz.
*
* At the encoder the upper-band is split into two band, 8-12 kHz & 12-16
* kHz, and only 8-12 kHz is encoded. At the decoder, 8-12 kHz band is
* reconstructed and 12-16 kHz replaced with zeros. Then two bands
* are combined, to reconstruct the upperband 8-16 kHz.
*/
int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
int16_t isRCUPayload) {
int len, err;
float LP_dec_float[FRAMESAMPLES_HALF];
float HP_dec_float[FRAMESAMPLES_HALF];
double LPw[FRAMESAMPLES_HALF];
double HPw[FRAMESAMPLES_HALF];
double percepFilterParam[(UB_LPC_ORDER + 1)*SUBFRAMES];
double real_f[FRAMESAMPLES_HALF];
double imag_f[FRAMESAMPLES_HALF];
const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
len = 0;
/* Decode & dequantize filter coefficients. */
err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
percepFilterParam, isac12kHz);
if (err < 0) {
return err;
}
/* Decode & de-quantize spectrum. */
len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
kIsacUpperBand12, real_f, imag_f);
if (len < 0) {
return len;
}
if (isRCUPayload) {
int n;
for (n = 0; n < 240; n++) {
real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
}
}
/* Inverse transform. */
WebRtcIsac_Spec2time(transform_tables,
real_f, imag_f, LPw, HPw, &ISACdecUB_obj->fftstr_obj);
/* perceptual post-filtering (using normalized lattice filter) */
WebRtcIsac_NormLatticeFilterAr(UB_LPC_ORDER,
ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
(ISACdecUB_obj->maskfiltstr_obj).PostStateLoG,
LPw, percepFilterParam, LP_dec_float);
/* Zero for 12-16 kHz. */
memset(HP_dec_float, 0, sizeof(float) * (FRAMESAMPLES_HALF));
/* Recombine the 2 bands. */
WebRtcIsac_FilterAndCombineFloat(HP_dec_float, LP_dec_float, signal_out,
&ISACdecUB_obj->postfiltbankstr_obj);
return len;
}

View File

@ -1,89 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
int
WebRtcIsac_EstimateBandwidth(
BwEstimatorstr* bwest_str,
Bitstr* streamdata,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts,
enum IsacSamplingRate encoderSampRate,
enum IsacSamplingRate decoderSampRate)
{
int16_t index;
int16_t frame_samples;
uint32_t sendTimestampIn16kHz;
uint32_t arrivalTimestampIn16kHz;
uint32_t diffSendTime;
uint32_t diffArrivalTime;
int err;
/* decode framelength and BW estimation */
err = WebRtcIsac_DecodeFrameLen(streamdata, &frame_samples);
if(err < 0) // error check
{
return err;
}
err = WebRtcIsac_DecodeSendBW(streamdata, &index);
if(err < 0) // error check
{
return err;
}
/* UPDATE ESTIMATES FROM OTHER SIDE */
err = WebRtcIsac_UpdateUplinkBwImpl(bwest_str, index, encoderSampRate);
if(err < 0)
{
return err;
}
// We like BWE to work at 16 kHz sampling rate,
// therefore, we have to change the timestamps accordingly.
// translate the send timestamp if required
diffSendTime = (uint32_t)((uint32_t)send_ts -
(uint32_t)bwest_str->senderTimestamp);
bwest_str->senderTimestamp = send_ts;
diffArrivalTime = (uint32_t)((uint32_t)arr_ts -
(uint32_t)bwest_str->receiverTimestamp);
bwest_str->receiverTimestamp = arr_ts;
if(decoderSampRate == kIsacSuperWideband)
{
diffArrivalTime = (uint32_t)diffArrivalTime >> 1;
diffSendTime = (uint32_t)diffSendTime >> 1;
}
// arrival timestamp in 16 kHz
arrivalTimestampIn16kHz = (uint32_t)((uint32_t)
bwest_str->prev_rec_arr_ts + (uint32_t)diffArrivalTime);
// send timestamp in 16 kHz
sendTimestampIn16kHz = (uint32_t)((uint32_t)
bwest_str->prev_rec_send_ts + (uint32_t)diffSendTime);
err = WebRtcIsac_UpdateBandwidthEstimator(bwest_str, rtp_seq_number,
(frame_samples * 1000) / FS, sendTimestampIn16kHz,
arrivalTimestampIn16kHz, packet_size);
// error check
if(err < 0)
{
return err;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,706 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* code_LPC_UB.c
*
* This file contains definition of functions used to
* encode LPC parameters (Shape & gain) of the upper band.
*
*/
#include <math.h>
#include <stdio.h>
#include <string.h>
#include "modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h"
#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/******************************************************************************
* WebRtcIsac_RemoveLarMean()
*
* Remove the means from LAR coefficients.
*
* Input:
* -lar : pointer to lar vectors. LAR vectors are
* concatenated.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -lar : pointer to mean-removed LAR:s.
*
*
*/
int16_t
WebRtcIsac_RemoveLarMean(
double* lar,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t vecCntr;
int16_t numVec;
const double* meanLAR;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb12;
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb16;
break;
}
default:
return -1;
}
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
// REMOVE MEAN
*lar++ -= meanLAR[coeffCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateIntraVec()
*
* Remove the correlation amonge the components of LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from left.
*
* Input:
* -inLar : pointer to mean-removed LAR vecrtors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t
WebRtcIsac_DecorrelateIntraVec(
const double* data,
double* out,
int16_t bandwidth)
{
const double* ptrData;
const double* ptrRow;
int16_t rowCntr;
int16_t colCntr;
int16_t larVecCntr;
int16_t numVec;
const double* decorrMat;
switch(bandwidth)
{
case isac12kHz:
{
decorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
numVec = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
decorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
numVec = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// decorrMat * data
//
// data is assumed to contain 'numVec' of LAR
// vectors (mean removed) each of dimension 'UB_LPC_ORDER'
// concatenated one after the other.
//
ptrData = data;
for(larVecCntr = 0; larVecCntr < numVec; larVecCntr++)
{
for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
{
ptrRow = &decorrMat[rowCntr * UB_LPC_ORDER];
*out = 0;
for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
{
*out += ptrData[colCntr] * ptrRow[colCntr];
}
out++;
}
ptrData += UB_LPC_ORDER;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateInterVec()
*
* Remover the correlation among mean-removed LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from right.
*
* Input:
* -data : pointer to matrix of LAR vectors. The matrix
* is stored column-wise.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t
WebRtcIsac_DecorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t rowCntr;
int16_t colCntr;
const double* decorrMat;
int16_t interVecDim;
switch(bandwidth)
{
case isac12kHz:
{
decorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
decorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// data * decorrMat
//
// data is of size 'interVecDim' * 'UB_LPC_ORDER'
// That is 'interVecDim' of LAR vectors (mean removed)
// in columns each of dimension 'UB_LPC_ORDER'.
// matrix is stored column-wise.
//
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
for(colCntr = 0; colCntr < interVecDim; colCntr++)
{
out[coeffCntr + colCntr * UB_LPC_ORDER] = 0;
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
out[coeffCntr + colCntr * UB_LPC_ORDER] +=
data[coeffCntr + rowCntr * UB_LPC_ORDER] *
decorrMat[rowCntr * interVecDim + colCntr];
}
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_QuantizeUncorrLar()
*
* Quantize the uncorrelated parameters.
*
* Input:
* -data : uncorrelated LAR vectors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : quantized version of the input.
* -idx : pointer to quantization indices.
*/
double
WebRtcIsac_QuantizeUncorrLar(
double* data,
int* recIdx,
int16_t bandwidth)
{
int16_t cntr;
int32_t idx;
int16_t interVecDim;
const double* leftRecPoint;
double quantizationStepSize;
const int16_t* numQuantCell;
switch(bandwidth)
{
case isac12kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb12;
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
numQuantCell = WebRtcIsac_kLpcShapeNumRecPointUb16;
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// Quantize the parametrs.
//
for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
{
idx = (int32_t)floor((*data - leftRecPoint[cntr]) /
quantizationStepSize + 0.5);
if(idx < 0)
{
idx = 0;
}
else if(idx >= numQuantCell[cntr])
{
idx = numQuantCell[cntr] - 1;
}
*data++ = leftRecPoint[cntr] + idx * quantizationStepSize;
*recIdx++ = idx;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DequantizeLpcParam()
*
* Get the quantized value of uncorrelated LARs given the quantization indices.
*
* Input:
* -idx : pointer to quantiztion indices.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : pointer to quantized values.
*/
int16_t
WebRtcIsac_DequantizeLpcParam(
const int* idx,
double* out,
int16_t bandwidth)
{
int16_t cntr;
int16_t interVecDim;
const double* leftRecPoint;
double quantizationStepSize;
switch(bandwidth)
{
case isac12kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb12;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
interVecDim = UB_LPC_VEC_PER_FRAME;
break;
}
case isac16kHz:
{
leftRecPoint = WebRtcIsac_kLpcShapeLeftRecPointUb16;
quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
interVecDim = UB16_LPC_VEC_PER_FRAME;
break;
}
default:
return -1;
}
//
// Dequantize given the quantization indices
//
for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
{
*out++ = leftRecPoint[cntr] + *idx++ * quantizationStepSize;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateIntraVec()
*
* This is the inverse of WebRtcIsac_DecorrelateIntraVec().
*
* Input:
* -data : uncorrelated parameters.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t
WebRtcIsac_CorrelateIntraVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t vecCntr;
int16_t rowCntr;
int16_t colCntr;
int16_t numVec;
const double* ptrData;
const double* intraVecDecorrMat;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
intraVecDecorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
intraVecDecorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
break;
}
default:
return -1;
}
ptrData = data;
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
{
*out = 0;
for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
{
*out += ptrData[rowCntr] *
intraVecDecorrMat[rowCntr * UB_LPC_ORDER + colCntr];
}
out++;
}
ptrData += UB_LPC_ORDER;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateInterVec()
*
* This is the inverse of WebRtcIsac_DecorrelateInterVec().
*
* Input:
* -data
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t
WebRtcIsac_CorrelateInterVec(
const double* data,
double* out,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t rowCntr;
int16_t colCntr;
int16_t interVecDim;
double myVec[UB16_LPC_VEC_PER_FRAME] = {0.0};
const double* interVecDecorrMat;
switch(bandwidth)
{
case isac12kHz:
{
interVecDim = UB_LPC_VEC_PER_FRAME;
interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
break;
}
case isac16kHz:
{
interVecDim = UB16_LPC_VEC_PER_FRAME;
interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
break;
}
default:
return -1;
}
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
myVec[rowCntr] = 0;
for(colCntr = 0; colCntr < interVecDim; colCntr++)
{
myVec[rowCntr] += data[coeffCntr + colCntr * UB_LPC_ORDER] * //*ptrData *
interVecDecorrMat[rowCntr * interVecDim + colCntr];
//ptrData += UB_LPC_ORDER;
}
}
for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
{
out[coeffCntr + rowCntr * UB_LPC_ORDER] = myVec[rowCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_AddLarMean()
*
* This is the inverse of WebRtcIsac_RemoveLarMean()
*
* Input:
* -data : pointer to mean-removed LAR:s.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : pointer to LARs.
*/
int16_t
WebRtcIsac_AddLarMean(
double* data,
int16_t bandwidth)
{
int16_t coeffCntr;
int16_t vecCntr;
int16_t numVec;
const double* meanLAR;
switch(bandwidth)
{
case isac12kHz:
{
numVec = UB_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb12;
break;
}
case isac16kHz:
{
numVec = UB16_LPC_VEC_PER_FRAME;
meanLAR = WebRtcIsac_kMeanLarUb16;
break;
}
default:
return -1;
}
for(vecCntr = 0; vecCntr < numVec; vecCntr++)
{
for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
{
*data++ += meanLAR[coeffCntr];
}
}
return 0;
}
/******************************************************************************
* WebRtcIsac_ToLogDomainRemoveMean()
*
* Transform the LPC gain to log domain then remove the mean value.
*
* Input:
* -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
*
* Output:
* -lpcGain : mean-removed in log domain.
*/
int16_t
WebRtcIsac_ToLogDomainRemoveMean(
double* data)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
data[coeffCntr] = log(data[coeffCntr]) - WebRtcIsac_kMeanLpcGain;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DecorrelateLPGain()
*
* Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
* multiplying gain vector with decorrelating matrix.
*
* Input:
* -data : LPC gain in log-domain with mean removed.
*
* Output:
* -out : decorrelated parameters.
*/
int16_t WebRtcIsac_DecorrelateLPGain(
const double* data,
double* out)
{
int16_t rowCntr;
int16_t colCntr;
for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
{
*out = 0;
for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
{
*out += data[rowCntr] * WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr];
}
out++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_QuantizeLpcGain()
*
* Quantize the decorrelated log-domain gains.
*
* Input:
* -lpcGain : uncorrelated LPC gains.
*
* Output:
* -idx : quantization indices
* -lpcGain : quantized value of the inpt.
*/
double WebRtcIsac_QuantizeLpcGain(
double* data,
int* idx)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
*idx = (int)floor((*data - WebRtcIsac_kLeftRecPointLpcGain[coeffCntr]) /
WebRtcIsac_kQSizeLpcGain + 0.5);
if(*idx < 0)
{
*idx = 0;
}
else if(*idx >= WebRtcIsac_kNumQCellLpcGain[coeffCntr])
{
*idx = WebRtcIsac_kNumQCellLpcGain[coeffCntr] - 1;
}
*data = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
WebRtcIsac_kQSizeLpcGain;
data++;
idx++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_DequantizeLpcGain()
*
* Get the quantized values given the quantization indices.
*
* Input:
* -idx : pointer to quantization indices.
*
* Output:
* -lpcGains : quantized values of the given parametes.
*/
int16_t WebRtcIsac_DequantizeLpcGain(
const int* idx,
double* out)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
*out = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
WebRtcIsac_kQSizeLpcGain;
out++;
idx++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_CorrelateLpcGain()
*
* This is the inverse of WebRtcIsac_DecorrelateLPGain().
*
* Input:
* -data : decorrelated parameters.
*
* Output:
* -out : correlated parameters.
*/
int16_t WebRtcIsac_CorrelateLpcGain(
const double* data,
double* out)
{
int16_t rowCntr;
int16_t colCntr;
for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
{
*out = 0;
for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
{
*out += WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr] * data[colCntr];
}
out++;
}
return 0;
}
/******************************************************************************
* WebRtcIsac_AddMeanToLinearDomain()
*
* This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
*
* Input:
* -lpcGain : LPC gain in log-domain & mean removed
*
* Output:
* -lpcGain : LPC gain in normal domain.
*/
int16_t WebRtcIsac_AddMeanToLinearDomain(
double* lpcGains)
{
int16_t coeffCntr;
for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
{
lpcGains[coeffCntr] = exp(lpcGains[coeffCntr] + WebRtcIsac_kMeanLpcGain);
}
return 0;
}

View File

@ -1,246 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* encode_lpc_swb.h
*
* This file contains declaration of functions used to
* encode LPC parameters (Shape & gain) of the upper band.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
/******************************************************************************
* WebRtcIsac_RemoveLarMean()
*
* Remove the means from LAR coefficients.
*
* Input:
* -lar : pointer to lar vectors. LAR vectors are
* concatenated.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -lar : pointer to mean-removed LAR:s.
*
*
*/
int16_t WebRtcIsac_RemoveLarMean(double* lar, int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DecorrelateIntraVec()
*
* Remove the correlation amonge the components of LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from left.
*
* Input:
* -inLar : pointer to mean-removed LAR vecrtors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t WebRtcIsac_DecorrelateIntraVec(const double* inLAR,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DecorrelateInterVec()
*
* Remover the correlation among mean-removed LAR vectors. If LAR vectors
* of one frame are put in a matrix where each column is a LAR vector of a
* sub-frame, then this is equivalent to multiplying the LAR matrix with
* a decorrelting mtrix from right.
*
* Input:
* -data : pointer to matrix of LAR vectors. The matrix
* is stored column-wise.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : decorrelated LAR vectors.
*/
int16_t WebRtcIsac_DecorrelateInterVec(const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_QuantizeUncorrLar()
*
* Quantize the uncorrelated parameters.
*
* Input:
* -data : uncorrelated LAR vectors.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : quantized version of the input.
* -idx : pointer to quantization indices.
*/
double WebRtcIsac_QuantizeUncorrLar(double* data, int* idx, int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_CorrelateIntraVec()
*
* This is the inverse of WebRtcIsac_DecorrelateIntraVec().
*
* Input:
* -data : uncorrelated parameters.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t WebRtcIsac_CorrelateIntraVec(const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_CorrelateInterVec()
*
* This is the inverse of WebRtcIsac_DecorrelateInterVec().
*
* Input:
* -data
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : correlated parametrs.
*/
int16_t WebRtcIsac_CorrelateInterVec(const double* data,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_AddLarMean()
*
* This is the inverse of WebRtcIsac_RemoveLarMean()
*
* Input:
* -data : pointer to mean-removed LAR:s.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -data : pointer to LARs.
*/
int16_t WebRtcIsac_AddLarMean(double* data, int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_DequantizeLpcParam()
*
* Get the quantized value of uncorrelated LARs given the quantization indices.
*
* Input:
* -idx : pointer to quantiztion indices.
* -bandwidth : indicates if the given LAR vectors belong
* to SWB-12kHz or SWB-16kHz.
*
* Output:
* -out : pointer to quantized values.
*/
int16_t WebRtcIsac_DequantizeLpcParam(const int* idx,
double* out,
int16_t bandwidth);
/******************************************************************************
* WebRtcIsac_ToLogDomainRemoveMean()
*
* Transform the LPC gain to log domain then remove the mean value.
*
* Input:
* -lpcGain : pointer to LPC Gain, expecting 6 LPC gains
*
* Output:
* -lpcGain : mean-removed in log domain.
*/
int16_t WebRtcIsac_ToLogDomainRemoveMean(double* lpGains);
/******************************************************************************
* WebRtcIsac_DecorrelateLPGain()
*
* Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
* multiplying gain vector with decorrelating matrix.
*
* Input:
* -data : LPC gain in log-domain with mean removed.
*
* Output:
* -out : decorrelated parameters.
*/
int16_t WebRtcIsac_DecorrelateLPGain(const double* data, double* out);
/******************************************************************************
* WebRtcIsac_QuantizeLpcGain()
*
* Quantize the decorrelated log-domain gains.
*
* Input:
* -lpcGain : uncorrelated LPC gains.
*
* Output:
* -idx : quantization indices
* -lpcGain : quantized value of the inpt.
*/
double WebRtcIsac_QuantizeLpcGain(double* lpGains, int* idx);
/******************************************************************************
* WebRtcIsac_DequantizeLpcGain()
*
* Get the quantized values given the quantization indices.
*
* Input:
* -idx : pointer to quantization indices.
*
* Output:
* -lpcGains : quantized values of the given parametes.
*/
int16_t WebRtcIsac_DequantizeLpcGain(const int* idx, double* lpGains);
/******************************************************************************
* WebRtcIsac_CorrelateLpcGain()
*
* This is the inverse of WebRtcIsac_DecorrelateLPGain().
*
* Input:
* -data : decorrelated parameters.
*
* Output:
* -out : correlated parameters.
*/
int16_t WebRtcIsac_CorrelateLpcGain(const double* data, double* out);
/******************************************************************************
* WebRtcIsac_AddMeanToLinearDomain()
*
* This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
*
* Input:
* -lpcGain : LPC gain in log-domain & mean removed
*
* Output:
* -lpcGain : LPC gain in normal domain.
*/
int16_t WebRtcIsac_AddMeanToLinearDomain(double* lpcGains);
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_

View File

@ -1,347 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* entropy_coding.h
*
* This header file declares all of the functions used to arithmetically
* encode the iSAC bistream
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
/******************************************************************************
* WebRtcIsac_DecodeSpec()
* Decode real and imaginary part of the DFT coefficients, given a bit-stream.
* The decoded DFT coefficient can be transformed to time domain by
* WebRtcIsac_Time2Spec().
*
* Input:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
* - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
* relevant for 0-4 kHz band, and the input value is
* not used in other bands.
* - band : specifies which band's DFT should be decoded.
*
* Output:
* - *fr : pointer to a buffer where the real part of DFT
* coefficients are written to.
* - *fi : pointer to a buffer where the imaginary part
* of DFT coefficients are written to.
*
* Return value : < 0 if an error occures
* 0 if succeeded.
*/
int WebRtcIsac_DecodeSpec(Bitstr* streamdata,
int16_t AvgPitchGain_Q12,
enum ISACBand band,
double* fr,
double* fi);
/******************************************************************************
* WebRtcIsac_EncodeSpec()
* Encode real and imaginary part of the DFT coefficients into the given
* bit-stream.
*
* Input:
* - *fr : pointer to a buffer where the real part of DFT
* coefficients are written to.
* - *fi : pointer to a buffer where the imaginary part
* of DFT coefficients are written to.
* - AvgPitchGain_Q12 : average pitch-gain of the frame. This is only
* relevant for 0-4 kHz band, and the input value is
* not used in other bands.
* - band : specifies which band's DFT should be decoded.
*
* Output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Return value : < 0 if an error occures
* 0 if succeeded.
*/
int WebRtcIsac_EncodeSpec(const int16_t* fr,
const int16_t* fi,
int16_t AvgPitchGain_Q12,
enum ISACBand band,
Bitstr* streamdata);
/* decode & dequantize LPC Coef */
int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef);
int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata,
double* lpcVecs,
double* percepFilterGains,
int16_t bandwidth);
int WebRtcIsac_DecodeLpc(Bitstr* streamdata,
double* LPCCoef_lo,
double* LPCCoef_hi);
/* quantize & code LPC Coef */
void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo,
double* LPCCoef_hi,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo,
double* LPCCoef_hi,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
/******************************************************************************
* WebRtcIsac_EncodeLpcUB()
* Encode LPC parameters, given as A-polynomial, of upper-band. The encoding
* is performed in LAR domain.
* For the upper-band, we compute and encode LPC of some sub-frames, LPC of
* other sub-frames are computed by linear interpolation, in LAR domain. This
* function performs the interpolation and returns the LPC of all sub-frames.
*
* Inputs:
* - lpcCoef : a buffer containing A-polynomials of sub-frames
* (excluding first coefficient that is 1).
* - bandwidth : specifies if the codec is operating at 0-12 kHz
* or 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a structure containing the encoded
* data and the parameters needed for entropy
* coding.
*
* Output:
* - interpolLPCCoeff : Decoded and interpolated LPC (A-polynomial)
* of all sub-frames.
* If LP analysis is of order K, and there are N
* sub-frames then this is a buffer of size
* (k + 1) * N, each vector starts with the LPC gain
* of the corresponding sub-frame. The LPC gains
* are encoded and inserted after this function is
* called. The first A-coefficient which is 1 is not
* included.
*
* Return value : 0 if encoding is successful,
* <0 if failed to encode.
*/
int16_t WebRtcIsac_EncodeLpcUB(double* lpcCoeff,
Bitstr* streamdata,
double* interpolLPCCoeff,
int16_t bandwidth,
ISACUBSaveEncDataStruct* encData);
/******************************************************************************
* WebRtcIsac_DecodeInterpolLpcUb()
* Decode LPC coefficients and interpolate to get the coefficients fo all
* sub-frmaes.
*
* Inputs:
* - bandwidth : spepecifies if the codec is in 0-12 kHz or
* 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - percepFilterParam : Decoded and interpolated LPC (A-polynomial) of
* all sub-frames.
* If LP analysis is of order K, and there are N
* sub-frames then this is a buffer of size
* (k + 1) * N, each vector starts with the LPC gain
* of the corresponding sub-frame. The LPC gains
* are encoded and inserted after this function is
* called. The first A-coefficient which is 1 is not
* included.
*
* Return value : 0 if encoding is successful,
* <0 if failed to encode.
*/
int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
double* percepFilterParam,
int16_t bandwidth);
/* Decode & dequantize RC */
int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15);
/* Quantize & code RC */
void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata);
/* Decode & dequantize squared Gain */
int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* Gain2);
/* Quantize & code squared Gain (input is squared gain) */
int WebRtcIsac_EncodeGain2(int32_t* gain2, Bitstr* streamdata);
void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
void WebRtcIsac_EncodePitchLag(double* PitchLags,
int16_t* PitchGain_Q12,
Bitstr* streamdata,
IsacSaveEncoderData* encData);
int WebRtcIsac_DecodePitchGain(Bitstr* streamdata, int16_t* PitchGain_Q12);
int WebRtcIsac_DecodePitchLag(Bitstr* streamdata,
int16_t* PitchGain_Q12,
double* PitchLag);
int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framelength);
int WebRtcIsac_EncodeFrameLen(int16_t framelength, Bitstr* streamdata);
int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno);
void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata);
/* Step-down */
void WebRtcIsac_Poly2Rc(double* a, int N, double* RC);
/* Step-up */
void WebRtcIsac_Rc2Poly(double* RC, int N, double* a);
void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo,
double* LPCCoef_hi,
int* index_g);
/******************************************************************************
* WebRtcIsac_EncodeLpcGainUb()
* Encode LPC gains of sub-Frames.
*
* Input/outputs:
* - lpGains : a buffer which contains 'SUBFRAME' number of
* LP gains to be encoded. The input values are
* overwritten by the quantized values.
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - lpcGainIndex : quantization indices for lpc gains, these will
* be stored to be used for FEC.
*/
void WebRtcIsac_EncodeLpcGainUb(double* lpGains,
Bitstr* streamdata,
int* lpcGainIndex);
/******************************************************************************
* WebRtcIsac_EncodeLpcGainUb()
* Store LPC gains of sub-Frames in 'streamdata'.
*
* Input:
* - lpGains : a buffer which contains 'SUBFRAME' number of
* LP gains to be encoded.
* Input/outputs:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
*/
void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata);
/******************************************************************************
* WebRtcIsac_DecodeLpcGainUb()
* Decode the LPC gain of sub-frames.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - lpGains : a buffer where decoded LPC gians will be stored.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata);
/******************************************************************************
* WebRtcIsac_EncodeBandwidth()
* Encode if the bandwidth of encoded audio is 0-12 kHz or 0-16 kHz.
*
* Input:
* - bandwidth : an enumerator specifying if the codec in is
* 0-12 kHz or 0-16 kHz mode.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
Bitstr* streamData);
/******************************************************************************
* WebRtcIsac_DecodeBandwidth()
* Decode the bandwidth of the encoded audio, i.e. if the bandwidth is 0-12 kHz
* or 0-16 kHz.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - bandwidth : an enumerator specifying if the codec is in
* 0-12 kHz or 0-16 kHz mode.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
enum ISACBandwidth* bandwidth);
/******************************************************************************
* WebRtcIsac_EncodeJitterInfo()
* Decode the jitter information.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Input:
* - jitterInfo : one bit of info specifying if the channel is
* in high/low jitter. Zero indicates low jitter
* and one indicates high jitter.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex, Bitstr* streamData);
/******************************************************************************
* WebRtcIsac_DecodeJitterInfo()
* Decode the jitter information.
*
* Input/output:
* - streamdata : pointer to a stucture containg the encoded
* data and theparameters needed for entropy
* coding.
*
* Output:
* - jitterInfo : one bit of info specifying if the channel is
* in high/low jitter. Zero indicates low jitter
* and one indicates high jitter.
*
* Return value : 0 if succeeded.
* <0 if failed.
*/
int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData, int32_t* jitterInfo);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_ */

View File

@ -1,114 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* filterbanks.c
*
* This file contains function WebRtcIsac_AllPassFilter2Float,
* WebRtcIsac_SplitAndFilter, and WebRtcIsac_FilterAndCombine
* which implement filterbanks that produce decimated lowpass and
* highpass versions of a signal, and performs reconstruction.
*
*/
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
/* Combining */
/* HPstcoeff_out_1 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
static const float kHpStCoefOut1Float[4] =
{-1.99701049409000f, 0.99714204490000f, 0.01701049409000f, -0.01704204490000f};
/* HPstcoeff_out_2 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
static const float kHpStCoefOut2Float[4] =
{-1.98645294509837f, 0.98672435560000f, 0.00645294509837f, -0.00662435560000f};
/* Function WebRtcIsac_FilterAndCombine */
/* This is a decoder function that takes the decimated
length FRAMESAMPLES_HALF input low-pass and
high-pass signals and creates a reconstructed fullband
output signal of length FRAMESAMPLES. WebRtcIsac_FilterAndCombine
is the sibling function of WebRtcIsac_SplitAndFilter */
/* INPUTS:
inLP: a length FRAMESAMPLES_HALF array of input low-pass
samples.
inHP: a length FRAMESAMPLES_HALF array of input high-pass
samples.
postfiltdata: input data structure containing the filterbank
states from the previous decoding iteration.
OUTPUTS:
Out: a length FRAMESAMPLES array of output reconstructed
samples (fullband) based on the input low-pass and
high-pass signals.
postfiltdata: the input data structure containing the filterbank
states is updated for the next decoding iteration */
void WebRtcIsac_FilterAndCombineFloat(float *InLP,
float *InHP,
float *Out,
PostFiltBankstr *postfiltdata)
{
int k;
float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
float ftmp, ftmp2;
/* Form the polyphase signals*/
for (k=0;k<FRAMESAMPLES_HALF;k++) {
tempin_ch1[k]=InLP[k]+InHP[k]; /* Construct a new upper channel signal*/
tempin_ch2[k]=InLP[k]-InHP[k]; /* Construct a new lower channel signal*/
}
/* all-pass filter the new upper channel signal. HOWEVER, use the all-pass filter factors
that were used as a lower channel at the encoding side. So at the decoder, the
corresponding all-pass filter factors for each channel are swapped.*/
WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kLowerApFactorsFloat,
FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_UPPER_float);
/* Now, all-pass filter the new lower channel signal. But since all-pass filter factors
at the decoder are swapped from the ones at the encoder, the 'upper' channel
all-pass filter factors (WebRtcIsac_kUpperApFactorsFloat) are used to filter this new
lower channel signal */
WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kUpperApFactorsFloat,
FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_LOWER_float);
/* Merge outputs to form the full length output signal.*/
for (k=0;k<FRAMESAMPLES_HALF;k++) {
Out[2*k]=tempin_ch2[k];
Out[2*k+1]=tempin_ch1[k];
}
/* High pass filter */
for (k=0;k<FRAMESAMPLES;k++) {
ftmp2 = Out[k] + kHpStCoefOut1Float[2] * postfiltdata->HPstates1_float[0] +
kHpStCoefOut1Float[3] * postfiltdata->HPstates1_float[1];
ftmp = Out[k] - kHpStCoefOut1Float[0] * postfiltdata->HPstates1_float[0] -
kHpStCoefOut1Float[1] * postfiltdata->HPstates1_float[1];
postfiltdata->HPstates1_float[1] = postfiltdata->HPstates1_float[0];
postfiltdata->HPstates1_float[0] = ftmp;
Out[k] = ftmp2;
}
for (k=0;k<FRAMESAMPLES;k++) {
ftmp2 = Out[k] + kHpStCoefOut2Float[2] * postfiltdata->HPstates2_float[0] +
kHpStCoefOut2Float[3] * postfiltdata->HPstates2_float[1];
ftmp = Out[k] - kHpStCoefOut2Float[0] * postfiltdata->HPstates2_float[0] -
kHpStCoefOut2Float[1] * postfiltdata->HPstates2_float[1];
postfiltdata->HPstates2_float[1] = postfiltdata->HPstates2_float[0];
postfiltdata->HPstates2_float[0] = ftmp;
Out[k] = ftmp2;
}
}

View File

@ -1,72 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/* encode.c - Encoding function for the iSAC coder */
#include <math.h>
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
void WebRtcIsac_InitMasking(MaskFiltstr *maskdata) {
int k;
for (k = 0; k < WINLEN; k++) {
maskdata->DataBufferLo[k] = 0.0;
maskdata->DataBufferHi[k] = 0.0;
}
for (k = 0; k < ORDERLO+1; k++) {
maskdata->CorrBufLo[k] = 0.0;
maskdata->PreStateLoF[k] = 0.0;
maskdata->PreStateLoG[k] = 0.0;
maskdata->PostStateLoF[k] = 0.0;
maskdata->PostStateLoG[k] = 0.0;
}
for (k = 0; k < ORDERHI+1; k++) {
maskdata->CorrBufHi[k] = 0.0;
maskdata->PreStateHiF[k] = 0.0;
maskdata->PreStateHiG[k] = 0.0;
maskdata->PostStateHiF[k] = 0.0;
maskdata->PostStateHiG[k] = 0.0;
}
maskdata->OldEnergy = 10.0;
return;
}
void WebRtcIsac_InitPostFilterbank(PostFiltBankstr *postfiltdata)
{
int k;
for (k = 0; k < 2*POSTQORDER; k++) {
postfiltdata->STATE_0_LOWER[k] = 0;
postfiltdata->STATE_0_UPPER[k] = 0;
postfiltdata->STATE_0_LOWER_float[k] = 0;
postfiltdata->STATE_0_UPPER_float[k] = 0;
}
/* High pass filter states */
postfiltdata->HPstates1[0] = 0.0;
postfiltdata->HPstates1[1] = 0.0;
postfiltdata->HPstates2[0] = 0.0;
postfiltdata->HPstates2[1] = 0.0;
postfiltdata->HPstates1_float[0] = 0.0f;
postfiltdata->HPstates1_float[1] = 0.0f;
postfiltdata->HPstates2_float[0] = 0.0f;
postfiltdata->HPstates2_float[1] = 0.0f;
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,100 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
#include "modules/audio_coding/codecs/isac/main/include/isac.h"
namespace webrtc {
struct IsacFloat {
using instance_type = ISACStruct;
static const bool has_swb = true;
static inline int16_t Control(instance_type* inst,
int32_t rate,
int framesize) {
return WebRtcIsac_Control(inst, rate, framesize);
}
static inline int16_t ControlBwe(instance_type* inst,
int32_t rate_bps,
int frame_size_ms,
int16_t enforce_frame_size) {
return WebRtcIsac_ControlBwe(inst, rate_bps, frame_size_ms,
enforce_frame_size);
}
static inline int16_t Create(instance_type** inst) {
return WebRtcIsac_Create(inst);
}
static inline int DecodeInternal(instance_type* inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speech_type) {
return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
}
static inline size_t DecodePlc(instance_type* inst,
int16_t* decoded,
size_t num_lost_frames) {
return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
}
static inline void DecoderInit(instance_type* inst) {
WebRtcIsac_DecoderInit(inst);
}
static inline int Encode(instance_type* inst,
const int16_t* speech_in,
uint8_t* encoded) {
return WebRtcIsac_Encode(inst, speech_in, encoded);
}
static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
return WebRtcIsac_EncoderInit(inst, coding_mode);
}
static inline uint16_t EncSampRate(instance_type* inst) {
return WebRtcIsac_EncSampRate(inst);
}
static inline int16_t Free(instance_type* inst) {
return WebRtcIsac_Free(inst);
}
static inline int16_t GetErrorCode(instance_type* inst) {
return WebRtcIsac_GetErrorCode(inst);
}
static inline int16_t GetNewFrameLen(instance_type* inst) {
return WebRtcIsac_GetNewFrameLen(inst);
}
static inline int16_t SetDecSampRate(instance_type* inst,
uint16_t sample_rate_hz) {
return WebRtcIsac_SetDecSampRate(inst, sample_rate_hz);
}
static inline int16_t SetEncSampRate(instance_type* inst,
uint16_t sample_rate_hz) {
return WebRtcIsac_SetEncSampRate(inst, sample_rate_hz);
}
static inline void SetEncSampRateInDecoder(instance_type* inst,
uint16_t sample_rate_hz) {
WebRtcIsac_SetEncSampRateInDecoder(inst, sample_rate_hz);
}
static inline void SetInitialBweBottleneck(instance_type* inst,
int bottleneck_bits_per_second) {
WebRtcIsac_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
}
static inline int16_t SetMaxPayloadSize(instance_type* inst,
int16_t max_payload_size_bytes) {
return WebRtcIsac_SetMaxPayloadSize(inst, max_payload_size_bytes);
}
static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
return WebRtcIsac_SetMaxRate(inst, max_bit_rate);
}
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_

View File

@ -1,219 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lattice.c
*
* contains the normalized lattice filter routines (MA and AR) for iSAC codec
*
*/
#include <math.h>
#include <memory.h>
#include <string.h>
#ifdef WEBRTC_ANDROID
#include <stdlib.h>
#endif
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
/* filter the signal using normalized lattice filter */
/* MA filter */
void WebRtcIsac_NormLatticeFilterMa(int orderCoef,
float *stateF,
float *stateG,
float *lat_in,
double *filtcoeflo,
double *lat_out)
{
int n,k,i,u,temp1;
int ord_1 = orderCoef+1;
float sth[MAX_AR_MODEL_ORDER];
float cth[MAX_AR_MODEL_ORDER];
float inv_cth[MAX_AR_MODEL_ORDER];
double a[MAX_AR_MODEL_ORDER+1];
float f[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], g[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
float gain1;
for (u=0;u<SUBFRAMES;u++)
{
/* set the Direct Form coefficients */
temp1 = u*ord_1;
a[0] = 1;
memcpy(a+1, filtcoeflo+temp1+1, sizeof(double) * (ord_1-1));
/* compute lattice filter coefficients */
WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
/* compute the gain */
gain1 = (float)filtcoeflo[temp1];
for (k=0;k<orderCoef;k++)
{
gain1 *= cth[k];
inv_cth[k] = 1/cth[k];
}
/* normalized lattice filter */
/*****************************/
/* initial conditions */
for (i=0;i<HALF_SUBFRAMELEN;i++)
{
f[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
g[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
}
/* get the state of f&g for the first input, for all orders */
for (i=1;i<ord_1;i++)
{
f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
}
/* filtering */
for(k=0;k<orderCoef;k++)
{
for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
{
f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
}
}
for(n=0;n<HALF_SUBFRAMELEN;n++)
{
lat_out[n + u * HALF_SUBFRAMELEN] = gain1 * f[orderCoef][n];
}
/* save the states */
for (i=0;i<ord_1;i++)
{
stateF[i] = f[i][HALF_SUBFRAMELEN-1];
stateG[i] = g[i][HALF_SUBFRAMELEN-1];
}
/* process next frame */
}
return;
}
/*///////////////////AR filter ///////////////////////////////*/
/* filter the signal using normalized lattice filter */
void WebRtcIsac_NormLatticeFilterAr(int orderCoef,
float *stateF,
float *stateG,
double *lat_in,
double *lo_filt_coef,
float *lat_out)
{
int n,k,i,u,temp1;
int ord_1 = orderCoef+1;
float sth[MAX_AR_MODEL_ORDER];
float cth[MAX_AR_MODEL_ORDER];
double a[MAX_AR_MODEL_ORDER+1];
float ARf[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], ARg[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
float gain1,inv_gain1;
for (u=0;u<SUBFRAMES;u++)
{
/* set the denominator and numerator of the Direct Form */
temp1 = u*ord_1;
a[0] = 1;
memcpy(a+1, lo_filt_coef+temp1+1, sizeof(double) * (ord_1-1));
WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
gain1 = (float)lo_filt_coef[temp1];
for (k=0;k<orderCoef;k++)
{
gain1 = cth[k]*gain1;
}
/* initial conditions */
inv_gain1 = 1/gain1;
for (i=0;i<HALF_SUBFRAMELEN;i++)
{
ARf[orderCoef][i] = (float)lat_in[i + u * HALF_SUBFRAMELEN]*inv_gain1;
}
for (i=orderCoef-1;i>=0;i--) //get the state of f&g for the first input, for all orders
{
ARf[i][0] = cth[i]*ARf[i+1][0] - sth[i]*stateG[i];
ARg[i+1][0] = sth[i]*ARf[i+1][0] + cth[i]* stateG[i];
}
ARg[0][0] = ARf[0][0];
for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
{
for(k=orderCoef-1;k>=0;k--)
{
ARf[k][n+1] = cth[k]*ARf[k+1][n+1] - sth[k]*ARg[k][n];
ARg[k+1][n+1] = sth[k]*ARf[k+1][n+1] + cth[k]* ARg[k][n];
}
ARg[0][n+1] = ARf[0][n+1];
}
memcpy(lat_out+u * HALF_SUBFRAMELEN, &(ARf[0][0]), sizeof(float) * HALF_SUBFRAMELEN);
/* cannot use memcpy in the following */
for (i=0;i<ord_1;i++)
{
stateF[i] = ARf[i][HALF_SUBFRAMELEN-1];
stateG[i] = ARg[i][HALF_SUBFRAMELEN-1];
}
}
return;
}
/* compute the reflection coefficients using the step-down procedure*/
/* converts the direct form parameters to lattice form.*/
/* a and b are vectors which contain the direct form coefficients,
according to
A(z) = a(1) + a(2)*z + a(3)*z^2 + ... + a(M+1)*z^M
B(z) = b(1) + b(2)*z + b(3)*z^2 + ... + b(M+1)*z^M
*/
void WebRtcIsac_Dir2Lat(double *a,
int orderCoef,
float *sth,
float *cth)
{
int m, k;
float tmp[MAX_AR_MODEL_ORDER];
float tmp_inv, cth2;
sth[orderCoef-1] = (float)a[orderCoef];
cth2 = 1.0f - sth[orderCoef-1] * sth[orderCoef-1];
cth[orderCoef-1] = (float)sqrt(cth2);
for (m=orderCoef-1; m>0; m--)
{
tmp_inv = 1.0f / cth2;
for (k=1; k<=m; k++)
{
tmp[k] = ((float)a[k] - sth[m] * (float)a[m-k+1]) * tmp_inv;
}
for (k=1; k<m; k++)
{
a[k] = tmp[k];
}
sth[m-1] = tmp[m];
cth2 = 1 - sth[m-1] * sth[m-1];
cth[m-1] = (float)sqrt(cth2);
}
}

View File

@ -1,496 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <math.h>
#include <string.h>
#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/codec.h"
#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/main/source/filter_functions.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_vad.h"
/* window */
/* Matlab generation code:
* t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
* for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
*/
static const double kLpcCorrWindow[WINLEN] = {
0.00000000, 0.00000001, 0.00000004, 0.00000010, 0.00000020,
0.00000035, 0.00000055, 0.00000083, 0.00000118, 0.00000163,
0.00000218, 0.00000283, 0.00000361, 0.00000453, 0.00000558, 0.00000679,
0.00000817, 0.00000973, 0.00001147, 0.00001342, 0.00001558,
0.00001796, 0.00002058, 0.00002344, 0.00002657, 0.00002997,
0.00003365, 0.00003762, 0.00004190, 0.00004651, 0.00005144, 0.00005673,
0.00006236, 0.00006837, 0.00007476, 0.00008155, 0.00008875,
0.00009636, 0.00010441, 0.00011290, 0.00012186, 0.00013128,
0.00014119, 0.00015160, 0.00016252, 0.00017396, 0.00018594, 0.00019846,
0.00021155, 0.00022521, 0.00023946, 0.00025432, 0.00026978,
0.00028587, 0.00030260, 0.00031998, 0.00033802, 0.00035674,
0.00037615, 0.00039626, 0.00041708, 0.00043863, 0.00046092, 0.00048396,
0.00050775, 0.00053233, 0.00055768, 0.00058384, 0.00061080,
0.00063858, 0.00066720, 0.00069665, 0.00072696, 0.00075813,
0.00079017, 0.00082310, 0.00085692, 0.00089164, 0.00092728, 0.00096384,
0.00100133, 0.00103976, 0.00107914, 0.00111947, 0.00116077,
0.00120304, 0.00124630, 0.00129053, 0.00133577, 0.00138200,
0.00142924, 0.00147749, 0.00152676, 0.00157705, 0.00162836, 0.00168070,
0.00173408, 0.00178850, 0.00184395, 0.00190045, 0.00195799,
0.00201658, 0.00207621, 0.00213688, 0.00219860, 0.00226137,
0.00232518, 0.00239003, 0.00245591, 0.00252284, 0.00259079, 0.00265977,
0.00272977, 0.00280078, 0.00287280, 0.00294582, 0.00301984,
0.00309484, 0.00317081, 0.00324774, 0.00332563, 0.00340446,
0.00348421, 0.00356488, 0.00364644, 0.00372889, 0.00381220, 0.00389636,
0.00398135, 0.00406715, 0.00415374, 0.00424109, 0.00432920,
0.00441802, 0.00450754, 0.00459773, 0.00468857, 0.00478001,
0.00487205, 0.00496464, 0.00505775, 0.00515136, 0.00524542, 0.00533990,
0.00543476, 0.00552997, 0.00562548, 0.00572125, 0.00581725,
0.00591342, 0.00600973, 0.00610612, 0.00620254, 0.00629895,
0.00639530, 0.00649153, 0.00658758, 0.00668341, 0.00677894, 0.00687413,
0.00696891, 0.00706322, 0.00715699, 0.00725016, 0.00734266,
0.00743441, 0.00752535, 0.00761540, 0.00770449, 0.00779254,
0.00787947, 0.00796519, 0.00804963, 0.00813270, 0.00821431, 0.00829437,
0.00837280, 0.00844949, 0.00852436, 0.00859730, 0.00866822,
0.00873701, 0.00880358, 0.00886781, 0.00892960, 0.00898884,
0.00904542, 0.00909923, 0.00915014, 0.00919805, 0.00924283, 0.00928436,
0.00932252, 0.00935718, 0.00938821, 0.00941550, 0.00943890,
0.00945828, 0.00947351, 0.00948446, 0.00949098, 0.00949294,
0.00949020, 0.00948262, 0.00947005, 0.00945235, 0.00942938, 0.00940099,
0.00936704, 0.00932738, 0.00928186, 0.00923034, 0.00917268,
0.00910872, 0.00903832, 0.00896134, 0.00887763, 0.00878706,
0.00868949, 0.00858478, 0.00847280, 0.00835343, 0.00822653, 0.00809199,
0.00794970, 0.00779956, 0.00764145, 0.00747530, 0.00730103,
0.00711857, 0.00692787, 0.00672888, 0.00652158, 0.00630597,
0.00608208, 0.00584994, 0.00560962, 0.00536124, 0.00510493, 0.00484089,
0.00456935, 0.00429062, 0.00400505, 0.00371310, 0.00341532,
0.00311238, 0.00280511, 0.00249452, 0.00218184, 0.00186864,
0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
};
static void WebRtcIsac_GetVars(const double* input,
const int16_t* pitchGains_Q12,
double* oldEnergy,
double* varscale) {
double nrg[4], chng, pg;
int k;
double pitchGains[4]={0,0,0,0};;
/* Calculate energies of first and second frame halfs */
nrg[0] = 0.0001;
for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES_QUARTER + QLOOKAHEAD) / 2; k++) {
nrg[0] += input[k]*input[k];
}
nrg[1] = 0.0001;
for ( ; k < (FRAMESAMPLES_HALF + QLOOKAHEAD) / 2; k++) {
nrg[1] += input[k]*input[k];
}
nrg[2] = 0.0001;
for ( ; k < (FRAMESAMPLES*3/4 + QLOOKAHEAD) / 2; k++) {
nrg[2] += input[k]*input[k];
}
nrg[3] = 0.0001;
for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
nrg[3] += input[k]*input[k];
}
/* Calculate average level change */
chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
fabs(10.0 * log10(nrg[2] / nrg[1])) +
fabs(10.0 * log10(nrg[1] / nrg[0])) +
fabs(10.0 * log10(nrg[0] / *oldEnergy)));
/* Find average pitch gain */
pg = 0.0;
for (k=0; k<4; k++)
{
pitchGains[k] = ((float)pitchGains_Q12[k])/4096;
pg += pitchGains[k];
}
pg *= 0.25;
/* If pitch gain is low and energy constant - increase noise level*/
/* Matlab code:
pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
*/
*varscale = 0.0 + 1.0 * exp( -1.4 * exp(-200.0 * pg*pg*pg) / (1.0 + 0.4 * chng) );
*oldEnergy = nrg[3];
}
static void WebRtcIsac_GetVarsUB(const double* input,
double* oldEnergy,
double* varscale) {
double nrg[4], chng;
int k;
/* Calculate energies of first and second frame halfs */
nrg[0] = 0.0001;
for (k = 0; k < (FRAMESAMPLES_QUARTER) / 2; k++) {
nrg[0] += input[k]*input[k];
}
nrg[1] = 0.0001;
for ( ; k < (FRAMESAMPLES_HALF) / 2; k++) {
nrg[1] += input[k]*input[k];
}
nrg[2] = 0.0001;
for ( ; k < (FRAMESAMPLES*3/4) / 2; k++) {
nrg[2] += input[k]*input[k];
}
nrg[3] = 0.0001;
for ( ; k < (FRAMESAMPLES) / 2; k++) {
nrg[3] += input[k]*input[k];
}
/* Calculate average level change */
chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
fabs(10.0 * log10(nrg[2] / nrg[1])) +
fabs(10.0 * log10(nrg[1] / nrg[0])) +
fabs(10.0 * log10(nrg[0] / *oldEnergy)));
/* If pitch gain is low and energy constant - increase noise level*/
/* Matlab code:
pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
*/
*varscale = exp( -1.4 / (1.0 + 0.4 * chng) );
*oldEnergy = nrg[3];
}
void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
double signal_noise_ratio, const int16_t *pitchGains_Q12,
double *lo_coeff, double *hi_coeff)
{
int k, n, j, pos1, pos2;
double varscale;
double DataLo[WINLEN], DataHi[WINLEN];
double corrlo[ORDERLO+2], corrlo2[ORDERLO+1];
double corrhi[ORDERHI+1];
double k_veclo[ORDERLO], k_vechi[ORDERHI];
double a_LO[ORDERLO+1], a_HI[ORDERHI+1];
double tmp, res_nrg;
double FwdA, FwdB;
/* hearing threshold level in dB; higher value gives more noise */
const double HearThresOffset = -28.0;
/* bandwdith expansion factors for low- and high band */
const double gammaLo = 0.9;
const double gammaHi = 0.8;
/* less-noise-at-low-frequencies factor */
double aa;
/* convert from dB to signal level */
const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46; /* divide by sqrt(12) */
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsac_GetVars(inLo, pitchGains_Q12, &(maskdata->OldEnergy), &varscale);
/* less-noise-at-low-frequencies factor */
aa = 0.35 * (0.5 + 0.5 * varscale);
/* replace data in buffer by new look-ahead data */
for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++)
maskdata->DataBufferLo[pos1 + WINLEN - QLOOKAHEAD] = inLo[pos1];
for (k = 0; k < SUBFRAMES; k++) {
/* Update input buffer and multiply signal with window */
for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 + UPDATE/2];
maskdata->DataBufferHi[pos1] = maskdata->DataBufferHi[pos1 + UPDATE/2];
DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
}
pos2 = k * UPDATE/2;
for (n = 0; n < UPDATE/2; n++, pos1++) {
maskdata->DataBufferLo[pos1] = inLo[QLOOKAHEAD + pos2];
maskdata->DataBufferHi[pos1] = inHi[pos2++];
DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
}
/* Get correlation coefficients */
WebRtcIsac_AutoCorr(corrlo, DataLo, WINLEN, ORDERLO+1); /* computing autocorrelation */
WebRtcIsac_AutoCorr(corrhi, DataHi, WINLEN, ORDERHI);
/* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
corrlo2[0] = (1.0+aa*aa) * corrlo[0] - 2.0*aa * corrlo[1];
tmp = (1.0 + aa*aa);
for (n = 1; n <= ORDERLO; n++) {
corrlo2[n] = tmp * corrlo[n] - aa * (corrlo[n-1] + corrlo[n+1]);
}
tmp = (1.0+aa) * (1.0+aa);
for (n = 0; n <= ORDERHI; n++) {
corrhi[n] = tmp * corrhi[n];
}
/* add white noise floor */
corrlo2[0] += 1e-6;
corrhi[0] += 1e-6;
FwdA = 0.01;
FwdB = 0.01;
/* recursive filtering of correlation over subframes */
for (n = 0; n <= ORDERLO; n++) {
maskdata->CorrBufLo[n] = FwdA * maskdata->CorrBufLo[n] + corrlo2[n];
corrlo2[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufLo[n] + (1.0-FwdB) * corrlo2[n];
}
for (n = 0; n <= ORDERHI; n++) {
maskdata->CorrBufHi[n] = FwdA * maskdata->CorrBufHi[n] + corrhi[n];
corrhi[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufHi[n] + (1.0-FwdB) * corrhi[n];
}
/* compute prediction coefficients */
WebRtcIsac_LevDurb(a_LO, k_veclo, corrlo2, ORDERLO);
WebRtcIsac_LevDurb(a_HI, k_vechi, corrhi, ORDERHI);
/* bandwidth expansion */
tmp = gammaLo;
for (n = 1; n <= ORDERLO; n++) {
a_LO[n] *= tmp;
tmp *= gammaLo;
}
/* residual energy */
res_nrg = 0.0;
for (j = 0; j <= ORDERLO; j++) {
for (n = 0; n <= j; n++) {
res_nrg += a_LO[j] * corrlo2[j-n] * a_LO[n];
}
for (n = j+1; n <= ORDERLO; n++) {
res_nrg += a_LO[j] * corrlo2[n-j] * a_LO[n];
}
}
/* add hearing threshold and compute the gain */
*lo_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
/* copy coefficients to output array */
for (n = 1; n <= ORDERLO; n++) {
*lo_coeff++ = a_LO[n];
}
/* bandwidth expansion */
tmp = gammaHi;
for (n = 1; n <= ORDERHI; n++) {
a_HI[n] *= tmp;
tmp *= gammaHi;
}
/* residual energy */
res_nrg = 0.0;
for (j = 0; j <= ORDERHI; j++) {
for (n = 0; n <= j; n++) {
res_nrg += a_HI[j] * corrhi[j-n] * a_HI[n];
}
for (n = j+1; n <= ORDERHI; n++) {
res_nrg += a_HI[j] * corrhi[n-j] * a_HI[n];
}
}
/* add hearing threshold and compute of the gain */
*hi_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
/* copy coefficients to output array */
for (n = 1; n <= ORDERHI; n++) {
*hi_coeff++ = a_HI[n];
}
}
}
/******************************************************************************
* WebRtcIsac_GetLpcCoefUb()
*
* Compute LP coefficients and correlation coefficients. At 12 kHz LP
* coefficients of the first and the last sub-frame is computed. At 16 kHz
* LP coefficients of 4th, 8th and 12th sub-frames are computed. We always
* compute correlation coefficients of all sub-frames.
*
* Inputs:
* -inSignal : Input signal
* -maskdata : a structure keeping signal from previous frame.
* -bandwidth : specifies if the codec is in 0-16 kHz mode or
* 0-12 kHz mode.
*
* Outputs:
* -lpCoeff : pointer to a buffer where A-polynomials are
* written to (first coeff is 1 and it is not
* written)
* -corrMat : a matrix where correlation coefficients of each
* sub-frame are written to one row.
* -varscale : a scale used to compute LPC gains.
*/
void
WebRtcIsac_GetLpcCoefUb(
double* inSignal,
MaskFiltstr* maskdata,
double* lpCoeff,
double corrMat[][UB_LPC_ORDER + 1],
double* varscale,
int16_t bandwidth)
{
int frameCntr, activeFrameCntr, n, pos1, pos2;
int16_t criterion1;
int16_t criterion2;
int16_t numSubFrames = SUBFRAMES * (1 + (bandwidth == isac16kHz));
double data[WINLEN];
double corrSubFrame[UB_LPC_ORDER+2];
double reflecCoeff[UB_LPC_ORDER];
double aPolynom[UB_LPC_ORDER+1];
double tmp;
/* bandwdith expansion factors */
const double gamma = 0.9;
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsac_GetVarsUB(inSignal, &(maskdata->OldEnergy), varscale);
/* replace data in buffer by new look-ahead data */
for(frameCntr = 0, activeFrameCntr = 0; frameCntr < numSubFrames;
frameCntr++)
{
if(frameCntr == SUBFRAMES)
{
// we are in 16 kHz
varscale++;
WebRtcIsac_GetVarsUB(&inSignal[FRAMESAMPLES_HALF],
&(maskdata->OldEnergy), varscale);
}
/* Update input buffer and multiply signal with window */
for(pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++)
{
maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 +
UPDATE/2];
data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
}
pos2 = frameCntr * UPDATE/2;
for(n = 0; n < UPDATE/2; n++, pos1++, pos2++)
{
maskdata->DataBufferLo[pos1] = inSignal[pos2];
data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
}
/* Get correlation coefficients */
/* computing autocorrelation */
WebRtcIsac_AutoCorr(corrSubFrame, data, WINLEN, UB_LPC_ORDER+1);
memcpy(corrMat[frameCntr], corrSubFrame,
(UB_LPC_ORDER+1)*sizeof(double));
criterion1 = ((frameCntr == 0) || (frameCntr == (SUBFRAMES - 1))) &&
(bandwidth == isac12kHz);
criterion2 = (((frameCntr+1) % 4) == 0) &&
(bandwidth == isac16kHz);
if(criterion1 || criterion2)
{
/* add noise */
corrSubFrame[0] += 1e-6;
/* compute prediction coefficients */
WebRtcIsac_LevDurb(aPolynom, reflecCoeff, corrSubFrame,
UB_LPC_ORDER);
/* bandwidth expansion */
tmp = gamma;
for (n = 1; n <= UB_LPC_ORDER; n++)
{
*lpCoeff++ = aPolynom[n] * tmp;
tmp *= gamma;
}
activeFrameCntr++;
}
}
}
/******************************************************************************
* WebRtcIsac_GetLpcGain()
*
* Compute the LPC gains for each sub-frame, given the LPC of each sub-frame
* and the corresponding correlation coefficients.
*
* Inputs:
* -signal_noise_ratio : the desired SNR in dB.
* -numVecs : number of sub-frames
* -corrMat : a matrix of correlation coefficients where
* each row is a set of correlation coefficients of
* one sub-frame.
* -varscale : a scale computed when WebRtcIsac_GetLpcCoefUb()
* is called.
*
* Outputs:
* -gain : pointer to a buffer where LP gains are written.
*
*/
void
WebRtcIsac_GetLpcGain(
double signal_noise_ratio,
const double* filtCoeffVecs,
int numVecs,
double* gain,
double corrMat[][UB_LPC_ORDER + 1],
const double* varscale)
{
int16_t j, n;
int16_t subFrameCntr;
double aPolynom[ORDERLO + 1];
double res_nrg;
const double HearThresOffset = -28.0;
const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
/* divide by sqrt(12) = 3.46 */
const double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46;
aPolynom[0] = 1;
for(subFrameCntr = 0; subFrameCntr < numVecs; subFrameCntr++)
{
if(subFrameCntr == SUBFRAMES)
{
// we are in second half of a SWB frame. use new varscale
varscale++;
}
memcpy(&aPolynom[1], &filtCoeffVecs[(subFrameCntr * (UB_LPC_ORDER + 1)) +
1], sizeof(double) * UB_LPC_ORDER);
/* residual energy */
res_nrg = 0.0;
for(j = 0; j <= UB_LPC_ORDER; j++)
{
for(n = 0; n <= j; n++)
{
res_nrg += aPolynom[j] * corrMat[subFrameCntr][j-n] *
aPolynom[n];
}
for(n = j+1; n <= UB_LPC_ORDER; n++)
{
res_nrg += aPolynom[j] * corrMat[subFrameCntr][n-j] *
aPolynom[n];
}
}
/* add hearing threshold and compute the gain */
gain[subFrameCntr] = S_N_R / (sqrt(res_nrg) / *varscale + H_T_H);
}
}

View File

@ -1,46 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_analysis.h
*
* LPC functions
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
void WebRtcIsac_GetLpcCoefLb(double* inLo,
double* inHi,
MaskFiltstr* maskdata,
double signal_noise_ratio,
const int16_t* pitchGains_Q12,
double* lo_coeff,
double* hi_coeff);
void WebRtcIsac_GetLpcGain(double signal_noise_ratio,
const double* filtCoeffVecs,
int numVecs,
double* gain,
double corrLo[][UB_LPC_ORDER + 1],
const double* varscale);
void WebRtcIsac_GetLpcCoefUb(double* inSignal,
MaskFiltstr* maskdata,
double* lpCoeff,
double corr[][UB_LPC_ORDER + 1],
double* varscale,
int16_t bandwidth);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYIS_H_ */

View File

@ -1,136 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables_LPCGain.c
*
* This file defines tables used for entropy coding of LPC Gain
* of upper-band.
*
*/
#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
const double WebRtcIsac_kQSizeLpcGain = 0.100000;
const double WebRtcIsac_kMeanLpcGain = -3.3822;
/*
* The smallest reconstruction points for quantiztion of
* LPC gains.
*/
const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES] =
{
-0.800000, -1.000000, -1.200000, -2.200000, -3.000000, -12.700000
};
/*
* Number of reconstruction points of quantizers for LPC Gains.
*/
const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES] =
{
17, 20, 25, 45, 77, 170
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES] =
{
8, 10, 12, 22, 38, 85
};
/*
* The following 6 vectors define CDF of 6 decorrelated LPC
* gains.
*/
const uint16_t WebRtcIsac_kLpcGainCdfVec0[18] =
{
0, 10, 27, 83, 234, 568, 1601, 4683, 16830, 57534, 63437,
64767, 65229, 65408, 65483, 65514, 65527, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec1[21] =
{
0, 15, 33, 84, 185, 385, 807, 1619, 3529, 7850, 19488,
51365, 62437, 64548, 65088, 65304, 65409, 65484, 65507, 65522, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec2[26] =
{
0, 15, 29, 54, 89, 145, 228, 380, 652, 1493, 4260,
12359, 34133, 50749, 57224, 60814, 62927, 64078, 64742, 65103, 65311, 65418,
65473, 65509, 65521, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec3[46] =
{
0, 8, 12, 16, 26, 42, 56, 76, 111, 164, 247,
366, 508, 693, 1000, 1442, 2155, 3188, 4854, 7387, 11249, 17617,
30079, 46711, 56291, 60127, 62140, 63258, 63954, 64384, 64690, 64891, 65031,
65139, 65227, 65293, 65351, 65399, 65438, 65467, 65492, 65504, 65510, 65518,
65523, 65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec4[78] =
{
0, 17, 29, 39, 51, 70, 104, 154, 234, 324, 443,
590, 760, 971, 1202, 1494, 1845, 2274, 2797, 3366, 4088, 4905,
5899, 7142, 8683, 10625, 12983, 16095, 20637, 28216, 38859, 47237, 51537,
54150, 56066, 57583, 58756, 59685, 60458, 61103, 61659, 62144, 62550, 62886,
63186, 63480, 63743, 63954, 64148, 64320, 64467, 64600, 64719, 64837, 64939,
65014, 65098, 65160, 65211, 65250, 65290, 65325, 65344, 65366, 65391, 65410,
65430, 65447, 65460, 65474, 65487, 65494, 65501, 65509, 65513, 65518, 65520,
65535
};
const uint16_t WebRtcIsac_kLpcGainCdfVec5[171] =
{
0, 10, 12, 14, 16, 18, 23, 29, 35, 42, 51,
58, 65, 72, 78, 87, 96, 103, 111, 122, 134, 150,
167, 184, 202, 223, 244, 265, 289, 315, 346, 379, 414,
450, 491, 532, 572, 613, 656, 700, 751, 802, 853, 905,
957, 1021, 1098, 1174, 1250, 1331, 1413, 1490, 1565, 1647, 1730,
1821, 1913, 2004, 2100, 2207, 2314, 2420, 2532, 2652, 2783, 2921,
3056, 3189, 3327, 3468, 3640, 3817, 3993, 4171, 4362, 4554, 4751,
4948, 5142, 5346, 5566, 5799, 6044, 6301, 6565, 6852, 7150, 7470,
7797, 8143, 8492, 8835, 9181, 9547, 9919, 10315, 10718, 11136, 11566,
12015, 12482, 12967, 13458, 13953, 14432, 14903, 15416, 15936, 16452, 16967,
17492, 18024, 18600, 19173, 19736, 20311, 20911, 21490, 22041, 22597, 23157,
23768, 24405, 25034, 25660, 26280, 26899, 27614, 28331, 29015, 29702, 30403,
31107, 31817, 32566, 33381, 34224, 35099, 36112, 37222, 38375, 39549, 40801,
42074, 43350, 44626, 45982, 47354, 48860, 50361, 51845, 53312, 54739, 56026,
57116, 58104, 58996, 59842, 60658, 61488, 62324, 63057, 63769, 64285, 64779,
65076, 65344, 65430, 65500, 65517, 65535
};
/*
* An array of pointers to CDFs of decorrelated LPC Gains
*/
const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES] =
{
WebRtcIsac_kLpcGainCdfVec0, WebRtcIsac_kLpcGainCdfVec1,
WebRtcIsac_kLpcGainCdfVec2, WebRtcIsac_kLpcGainCdfVec3,
WebRtcIsac_kLpcGainCdfVec4, WebRtcIsac_kLpcGainCdfVec5
};
/*
* A matrix to decorrellate LPC gains of subframes.
*/
const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES] =
{
{-0.150860, 0.327872, 0.367220, 0.504613, 0.559270, 0.409234},
{ 0.457128, -0.613591, -0.289283, -0.029734, 0.393760, 0.418240},
{-0.626043, 0.136489, -0.439118, -0.448323, 0.135987, 0.420869},
{ 0.526617, 0.480187, 0.242552, -0.488754, -0.158713, 0.411331},
{-0.302587, -0.494953, 0.588112, -0.063035, -0.404290, 0.387510},
{ 0.086378, 0.147714, -0.428875, 0.548300, -0.570121, 0.401391}
};

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables_LPCGain.h
*
* This file declares tables used for entropy coding of LPC Gain
* of upper-band.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
extern const double WebRtcIsac_kQSizeLpcGain;
extern const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES];
extern const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES];
extern const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec0[18];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec1[21];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec2[26];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec3[46];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec4[78];
extern const uint16_t WebRtcIsac_kLpcGainCdfVec5[171];
extern const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES];
extern const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES];
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_

View File

@ -1,158 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB_KLT_Tables.c
*
* This file defines tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 12 kHz.
*
*/
#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/*
* Mean value of LAR
*/
const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER] =
{
0.03748928306641, 0.09453441192543, -0.01112522344398, 0.03800237516842
};
/*
* A rotation matrix to decorrelate intra-vector correlation,
* i.e. correlation among components of LAR vector.
*/
const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER] =
{
{-0.00075365493856, -0.05809964887743, -0.23397966154116, 0.97050367376411},
{ 0.00625021257734, -0.17299965610679, 0.95977735920651, 0.22104179375008},
{ 0.20543384258374, -0.96202143495696, -0.15301870801552, -0.09432375099565},
{-0.97865075648479, -0.20300322280841, -0.02581111653779, -0.01913568980258}
};
/*
* A rotation matrix to remove correlation among LAR coefficients
* of different LAR vectors. One might guess that decorrelation matrix
* for the first component should differ from the second component
* but we haven't observed a significant benefit of having different
* decorrelation matrices for different components.
*/
const double WebRtcIsac_kInterVecDecorrMatUb12
[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME] =
{
{ 0.70650597970460, -0.70770707262373},
{-0.70770707262373, -0.70650597970460}
};
/*
* LAR quantization step-size.
*/
const double WebRtcIsac_kLpcShapeQStepSizeUb12 = 0.150000;
/*
* The smallest reconstruction points for quantiztion of LAR coefficients.
*/
const double WebRtcIsac_kLpcShapeLeftRecPointUb12
[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME] =
{
-0.900000, -1.050000, -1.350000, -1.800000, -1.350000, -1.650000,
-2.250000, -3.450000
};
/*
* Number of reconstruction points of quantizers for LAR coefficients.
*/
const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
13, 15, 19, 27, 19, 24, 32, 48
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
6, 7, 9, 13, 9, 12, 16, 24
};
/*
* The following 8 vectors define CDF of 8 decorrelated LAR
* coefficients.
*/
const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14] =
{
0, 13, 95, 418, 1687, 6498, 21317, 44200, 59029, 63849, 65147,
65449, 65525, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16] =
{
0, 10, 59, 255, 858, 2667, 8200, 22609, 42988, 57202, 62947,
64743, 65308, 65476, 65522, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20] =
{
0, 18, 40, 118, 332, 857, 2017, 4822, 11321, 24330, 41279,
54342, 60637, 63394, 64659, 65184, 65398, 65482, 65518, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28] =
{
0, 21, 38, 90, 196, 398, 770, 1400, 2589, 4650, 8211,
14933, 26044, 39592, 50814, 57452, 60971, 62884, 63995, 64621, 65019, 65273,
65410, 65480, 65514, 65522, 65531, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20] =
{
0, 7, 46, 141, 403, 969, 2132, 4649, 10633, 24902, 43254,
54665, 59928, 62674, 64173, 64938, 65293, 65464, 65523, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25] =
{
0, 7, 22, 72, 174, 411, 854, 1737, 3545, 6774, 13165,
25221, 40980, 52821, 58714, 61706, 63472, 64437, 64989, 65287, 65430, 65503,
65525, 65529, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33] =
{
0, 11, 21, 36, 65, 128, 228, 401, 707, 1241, 2126,
3589, 6060, 10517, 18853, 31114, 42477, 49770, 54271, 57467, 59838, 61569,
62831, 63772, 64433, 64833, 65123, 65306, 65419, 65466, 65499, 65519, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49] =
{
0, 14, 34, 67, 107, 167, 245, 326, 449, 645, 861,
1155, 1508, 2003, 2669, 3544, 4592, 5961, 7583, 9887, 13256, 18765,
26519, 34077, 40034, 44349, 47795, 50663, 53262, 55473, 57458, 59122, 60592,
61742, 62690, 63391, 63997, 64463, 64794, 65045, 65207, 65309, 65394, 65443,
65478, 65504, 65514, 65523, 65535
};
/*
* An array of pointers to CDFs of decorrelated LARs
*/
const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
{
WebRtcIsac_kLpcShapeCdfVec0Ub12, WebRtcIsac_kLpcShapeCdfVec1Ub12,
WebRtcIsac_kLpcShapeCdfVec2Ub12, WebRtcIsac_kLpcShapeCdfVec3Ub12,
WebRtcIsac_kLpcShapeCdfVec4Ub12, WebRtcIsac_kLpcShapeCdfVec5Ub12,
WebRtcIsac_kLpcShapeCdfVec6Ub12, WebRtcIsac_kLpcShapeCdfVec7Ub12
};

View File

@ -1,66 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_shape_swb12_tables.h
*
* This file declares tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 12 kHz.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
extern const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER];
extern const double WebRtcIsac_kMeanLpcGain;
extern const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER]
[UB_LPC_ORDER];
extern const double WebRtcIsac_kInterVecDecorrMatUb12[UB_LPC_VEC_PER_FRAME]
[UB_LPC_VEC_PER_FRAME];
extern const double WebRtcIsac_kLpcShapeQStepSizeUb12;
extern const double
WebRtcIsac_kLpcShapeLeftRecPointUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
extern const int16_t
WebRtcIsac_kLpcShapeNumRecPointUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
extern const uint16_t
WebRtcIsac_kLpcShapeEntropySearchUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49];
extern const uint16_t*
WebRtcIsac_kLpcShapeCdfMatUb12[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_

View File

@ -1,247 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* SWB16_KLT_Tables.c
*
* This file defines tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 16 kHz.
*
*/
#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/*
* Mean value of LAR
*/
const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER] =
{
0.454978, 0.364747, 0.102999, 0.104523
};
/*
* A rotation matrix to decorrelate intra-vector correlation,
* i.e. correlation among components of LAR vector.
*/
const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER] =
{
{-0.020528, -0.085858, -0.002431, 0.996093},
{-0.033155, 0.036102, 0.998786, 0.004866},
{ 0.202627, 0.974853, -0.028940, 0.088132},
{-0.978479, 0.202454, -0.039785, -0.002811}
};
/*
* A rotation matrix to remove correlation among LAR coefficients
* of different LAR vectors. One might guess that decorrelation matrix
* for the first component should differ from the second component
* but we haven't observed a significant benefit of having different
* decorrelation matrices for different components.
*/
const double WebRtcIsac_kInterVecDecorrMatUb16
[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME] =
{
{ 0.291675, -0.515786, 0.644927, 0.482658},
{-0.647220, 0.479712, 0.289556, 0.516856},
{ 0.643084, 0.485489, -0.289307, 0.516763},
{-0.287185, -0.517823, -0.645389, 0.482553}
};
/*
* The following 16 vectors define CDF of 16 decorrelated LAR
* coefficients.
*/
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14] =
{
0, 2, 20, 159, 1034, 5688, 20892, 44653,
59849, 64485, 65383, 65518, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16] =
{
0, 1, 7, 43, 276, 1496, 6681, 21653,
43891, 58859, 64022, 65248, 65489, 65529, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18] =
{
0, 1, 9, 54, 238, 933, 3192, 9461,
23226, 42146, 56138, 62413, 64623, 65300, 65473, 65521,
65533, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30] =
{
0, 2, 4, 8, 17, 36, 75, 155,
329, 683, 1376, 2662, 5047, 9508, 17526, 29027,
40363, 48997, 55096, 59180, 61789, 63407, 64400, 64967,
65273, 65429, 65497, 65526, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16] =
{
0, 1, 10, 63, 361, 1785, 7407, 22242,
43337, 58125, 63729, 65181, 65472, 65527, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17] =
{
0, 1, 7, 29, 134, 599, 2443, 8590,
22962, 42635, 56911, 63060, 64940, 65408, 65513, 65531,
65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21] =
{
0, 1, 5, 16, 57, 191, 611, 1808,
4847, 11755, 24612, 40910, 53789, 60698, 63729, 64924,
65346, 65486, 65523, 65532, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36] =
{
0, 1, 4, 12, 25, 55, 104, 184,
314, 539, 926, 1550, 2479, 3861, 5892, 8845,
13281, 20018, 29019, 38029, 45581, 51557, 56057, 59284,
61517, 63047, 64030, 64648, 65031, 65261, 65402, 65480,
65518, 65530, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21] =
{
0, 1, 2, 7, 26, 103, 351, 1149,
3583, 10204, 23846, 41711, 55361, 61917, 64382, 65186,
65433, 65506, 65528, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21] =
{
0, 6, 19, 63, 205, 638, 1799, 4784,
11721, 24494, 40803, 53805, 60886, 63822, 64931, 65333,
65472, 65517, 65530, 65533, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28] =
{
0, 1, 3, 11, 31, 86, 221, 506,
1101, 2296, 4486, 8477, 15356, 26079, 38941, 49952,
57165, 61257, 63426, 64549, 65097, 65351, 65463, 65510,
65526, 65532, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55] =
{
0, 3, 12, 23, 42, 65, 89, 115,
150, 195, 248, 327, 430, 580, 784, 1099,
1586, 2358, 3651, 5899, 9568, 14312, 19158, 23776,
28267, 32663, 36991, 41153, 45098, 48680, 51870, 54729,
57141, 59158, 60772, 62029, 63000, 63761, 64322, 64728,
65000, 65192, 65321, 65411, 65463, 65496, 65514, 65523,
65527, 65529, 65531, 65532, 65533, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26] =
{
0, 2, 4, 10, 21, 48, 114, 280,
701, 1765, 4555, 11270, 24267, 41213, 54285, 61003,
63767, 64840, 65254, 65421, 65489, 65514, 65526, 65532,
65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28] =
{
0, 1, 3, 6, 15, 36, 82, 196,
453, 1087, 2557, 5923, 13016, 25366, 40449, 52582,
59539, 62896, 64389, 65033, 65316, 65442, 65494, 65519,
65529, 65533, 65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34] =
{
0, 2, 4, 8, 18, 35, 73, 146,
279, 524, 980, 1789, 3235, 5784, 10040, 16998,
27070, 38543, 48499, 55421, 59712, 62257, 63748, 64591,
65041, 65278, 65410, 65474, 65508, 65522, 65530, 65533,
65534, 65535
};
const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71] =
{
0, 1, 2, 6, 13, 26, 55, 92,
141, 191, 242, 296, 355, 429, 522, 636,
777, 947, 1162, 1428, 1753, 2137, 2605, 3140,
3743, 4409, 5164, 6016, 6982, 8118, 9451, 10993,
12754, 14810, 17130, 19780, 22864, 26424, 30547, 35222,
40140, 44716, 48698, 52056, 54850, 57162, 59068, 60643,
61877, 62827, 63561, 64113, 64519, 64807, 65019, 65167,
65272, 65343, 65399, 65440, 65471, 65487, 65500, 65509,
65518, 65524, 65527, 65531, 65533, 65534, 65535
};
/*
* An array of pointers to CDFs of decorrelated LARs
*/
const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] = {
WebRtcIsac_kLpcShapeCdfVec01Ub16,
WebRtcIsac_kLpcShapeCdfVec1Ub16,
WebRtcIsac_kLpcShapeCdfVec2Ub16,
WebRtcIsac_kLpcShapeCdfVec3Ub16,
WebRtcIsac_kLpcShapeCdfVec4Ub16,
WebRtcIsac_kLpcShapeCdfVec5Ub16,
WebRtcIsac_kLpcShapeCdfVec6Ub16,
WebRtcIsac_kLpcShapeCdfVec7Ub16,
WebRtcIsac_kLpcShapeCdfVec8Ub16,
WebRtcIsac_kLpcShapeCdfVec01Ub160,
WebRtcIsac_kLpcShapeCdfVec01Ub161,
WebRtcIsac_kLpcShapeCdfVec01Ub162,
WebRtcIsac_kLpcShapeCdfVec01Ub163,
WebRtcIsac_kLpcShapeCdfVec01Ub164,
WebRtcIsac_kLpcShapeCdfVec01Ub165,
WebRtcIsac_kLpcShapeCdfVec01Ub166
};
/*
* The smallest reconstruction points for quantiztion of LAR coefficients.
*/
const double WebRtcIsac_kLpcShapeLeftRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
-0.8250, -0.9750, -1.1250, -2.1750, -0.9750, -1.1250, -1.4250,
-2.6250, -1.4250, -1.2750, -1.8750, -3.6750, -1.7250, -1.8750,
-2.3250, -5.4750
};
/*
* Number of reconstruction points of quantizers for LAR coefficients.
*/
const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
13, 15, 17, 29, 15, 16, 20, 35, 20,
20, 27, 54, 25, 27, 33, 70
};
/*
* Starting index for entropy decoder to search for the right interval,
* one entry per LAR coefficient
*/
const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
{
6, 7, 8, 14, 7, 8, 10, 17, 10,
10, 13, 27, 12, 13, 16, 35
};
/*
* LAR quantization step-size.
*/
const double WebRtcIsac_kLpcShapeQStepSizeUb16 = 0.150000;

View File

@ -1,81 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_shape_swb16_tables.h
*
* This file declares tables used for entropy coding of LPC shape of
* upper-band signal if the bandwidth is 16 kHz.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
extern const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER];
extern const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER]
[UB_LPC_ORDER];
extern const double WebRtcIsac_kInterVecDecorrMatUb16[UB16_LPC_VEC_PER_FRAME]
[UB16_LPC_VEC_PER_FRAME];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34];
extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71];
extern const uint16_t*
WebRtcIsac_kLpcShapeCdfMatUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const double
WebRtcIsac_kLpcShapeLeftRecPointUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const int16_t
WebRtcIsac_kLpcShapeNumRecPointUb16[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
extern const uint16_t
WebRtcIsac_kLpcShapeEntropySearchUb16[UB_LPC_ORDER *
UB16_LPC_VEC_PER_FRAME];
extern const double WebRtcIsac_kLpcShapeQStepSizeUb16;
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_

View File

@ -1,601 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/* coding tables for the KLT coefficients */
#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/* cdf array for model indicator */
const uint16_t WebRtcIsac_kQKltModelCdf[4] = {
0, 15434, 37548, 65535 };
/* pointer to cdf array for model indicator */
const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1] = {
WebRtcIsac_kQKltModelCdf };
/* initial cdf index for decoder of model indicator */
const uint16_t WebRtcIsac_kQKltModelInitIndex[1] = { 1 };
/* offset to go from rounded value to quantization index */
const short WebRtcIsac_kQKltQuantMinGain[12] = {
3, 6, 4, 6, 6, 9, 5, 16, 11, 34, 32, 47 };
const short WebRtcIsac_kQKltQuantMinShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2, 2, 3, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 2, 2, 3, 0, 0, 0, 0,
1, 0, 1, 1, 1, 1, 1, 1, 1, 2,
2, 4, 3, 5, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 2, 1, 2, 2, 3, 4,
4, 7, 0, 0, 1, 1, 1, 1, 1, 1,
1, 2, 3, 2, 3, 4, 4, 5, 7, 13,
0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
5, 6, 7, 11, 9, 13, 12, 26 };
/* maximum quantization index */
const uint16_t WebRtcIsac_kQKltMaxIndGain[12] = {
6, 12, 8, 14, 10, 19, 12, 31, 22, 56, 52, 138 };
const uint16_t WebRtcIsac_kQKltMaxIndShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
2, 2, 2, 2, 4, 4, 5, 6, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 2, 2,
2, 2, 3, 4, 5, 7, 0, 0, 0, 0,
2, 0, 2, 2, 2, 2, 3, 2, 2, 4,
4, 6, 6, 9, 0, 0, 0, 0, 2, 2,
2, 2, 2, 2, 3, 2, 4, 4, 7, 7,
9, 13, 0, 0, 2, 2, 2, 2, 2, 2,
3, 4, 5, 4, 6, 8, 8, 10, 16, 25,
0, 2, 2, 4, 5, 4, 4, 4, 7, 8,
9, 10, 13, 19, 17, 23, 25, 49 };
/* index offset */
const uint16_t WebRtcIsac_kQKltOffsetGain[12] = {
0, 7, 20, 29, 44, 55, 75, 88, 120, 143, 200, 253 };
const uint16_t WebRtcIsac_kQKltOffsetShape[108] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
11, 14, 17, 20, 23, 28, 33, 39, 46, 47,
48, 49, 50, 52, 53, 54, 55, 56, 58, 61,
64, 67, 70, 74, 79, 85, 93, 94, 95, 96,
97, 100, 101, 104, 107, 110, 113, 117, 120, 123,
128, 133, 140, 147, 157, 158, 159, 160, 161, 164,
167, 170, 173, 176, 179, 183, 186, 191, 196, 204,
212, 222, 236, 237, 238, 241, 244, 247, 250, 253,
256, 260, 265, 271, 276, 283, 292, 301, 312, 329,
355, 356, 359, 362, 367, 373, 378, 383, 388, 396,
405, 415, 426, 440, 460, 478, 502, 528 };
/* initial cdf index for KLT coefficients */
const uint16_t WebRtcIsac_kQKltInitIndexGain[12] = {
3, 6, 4, 7, 5, 10, 6, 16, 11, 28, 26, 69};
const uint16_t WebRtcIsac_kQKltInitIndexShape[108] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2, 3, 3, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 1, 1,
1, 1, 2, 2, 3, 4, 0, 0, 0, 0,
1, 0, 1, 1, 1, 1, 2, 1, 1, 2,
2, 3, 3, 5, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 2, 1, 2, 2, 4, 4,
5, 7, 0, 0, 1, 1, 1, 1, 1, 1,
2, 2, 3, 2, 3, 4, 4, 5, 8, 13,
0, 1, 1, 2, 3, 2, 2, 2, 4, 4,
5, 5, 7, 10, 9, 12, 13, 25 };
/* quantizer representation levels */
const double WebRtcIsac_kQKltLevelsGain[392] = {
-2.78127126, -1.76745590, -0.77913790, -0.00437329, 0.79961206,
1.81775776, 2.81389782, -5.78753143, -4.88384084, -3.89320940,
-2.88133610, -1.92859977, -0.86347396, 0.02003888, 0.86140400,
1.89667156, 2.97134967, 3.98781964, 4.91727277, 5.82865898,
-4.11195874, -2.80898424, -1.87547977, -0.80943825, -0.00679084,
0.79573851, 1.83953397, 2.67586037, 3.76274082, -6.10933968,
-4.93034581, -3.89281296, -2.91530625, -1.89684163, -0.85319130,
-0.02275767, 0.86862017, 1.91578276, 2.96107339, 3.96543056,
4.91369908, 5.91058154, 6.83848343, 8.07136925, -5.87470395,
-4.84703049, -3.84284597, -2.86168446, -1.89290192, -0.82798145,
-0.00080013, 0.82594974, 1.85754329, 2.88351798, 3.96172628,
-8.85684885, -7.87387461, -6.97811862, -5.93256270, -4.94301439,
-3.95513701, -2.96041544, -1.94031192, -0.87961478, -0.00456201,
0.89911505, 1.91723376, 2.94011511, 3.93302540, 4.97990967,
5.93133404, 7.02181199, 7.92407762, 8.80155440, 10.04665814,
-4.82396678, -3.85612158, -2.89482244, -1.89558408, -0.90036978,
-0.00677823, 0.90607989, 1.90937981, 2.91175777, 3.91637730,
4.97565723, 5.84771228, 7.11145863, -16.07879840, -15.03776309,
-13.93905670, -12.95671800, -11.89171202, -10.95820934, -9.95923714,
-8.94357334, -7.99068299, -6.97481009, -5.94826231, -4.96673988,
-3.97490466, -2.97846970, -1.95130435, -0.94215262, -0.01444043,
0.96770704, 1.95848598, 2.94107862, 3.95666119, 4.97253085,
5.97191122, 6.93277360, 7.96608727, 8.87958779, 10.00264269,
10.86560820, 12.07449071, 13.04491775, 13.97507061, 14.91845261,
-10.85696295, -9.83365357, -9.01245635, -7.95915145, -6.95625003,
-5.95362618, -4.93468444, -3.98760978, -2.95044407, -1.97041277,
-0.97701799, -0.00840234, 0.97834289, 1.98361415, 2.97802439,
3.96415871, 4.95369042, 5.94101770, 6.92756798, 7.94063998,
8.85951828, 9.97077022, 11.00068503, -33.92030406, -32.81426422,
-32.00000000, -31.13243639, -30.11886909, -29.06017570, -28.12598824,
-27.22045482, -25.81215858, -25.07849962, -23.93018013, -23.02097643,
-21.89529725, -20.99091085, -19.98889048, -18.94327044, -17.96562071,
-16.96126218, -15.95054062, -14.98516200, -13.97101012, -13.02106500,
-11.98438006, -11.03216748, -9.95930286, -8.97043946, -7.98085082,
-6.98360995, -5.98998802, -4.98668173, -4.00032906, -3.00420619,
-1.98701132, -0.99324682, -0.00609324, 0.98297834, 1.99483076,
3.00305044, 3.97142097, 4.97525759, 5.98612258, 6.97448236,
7.97575900, 9.01086211, 9.98665542, 11.00541438, 11.98078628,
12.92352471, 14.06849675, 14.99949430, 15.94904834, 16.97440321,
18.04040916, 18.88987609, 20.05312391, 21.00000000, 21.79443341,
-31.98578825, -31.00000000, -29.89060567, -28.98555686, -27.97114102,
-26.84935410, -26.02402230, -24.94195278, -23.92336849, -22.95552382,
-21.97932836, -20.96055470, -19.99649553, -19.03436122, -17.96706525,
-17.01139515, -16.01363516, -14.99154248, -14.00298333, -12.99630613,
-11.99955519, -10.99000421, -10.00819092, -8.99763648, -7.98431793,
-7.01769025, -5.99604690, -4.99980697, -3.99334671, -3.01748192,
-2.02051217, -1.00848371, -0.01942358, 1.00477757, 1.95477872,
2.98593031, 3.98779079, 4.96862849, 6.02694771, 6.93983733,
7.89874717, 8.99615862, 10.02367921, 10.96293452, 11.84351528,
12.92207187, 13.85122329, 15.05146877, 15.99371264, 17.00000000,
18.00000000, 19.00000000, 19.82763573, -47.00000000, -46.00000000,
-44.87138498, -44.00000000, -43.00000000, -42.00000000, -41.00000000,
-39.88966612, -38.98913239, -37.80306486, -37.23584325, -35.94200288,
-34.99881301, -34.11361858, -33.06507360, -32.13129135, -30.90891364,
-29.81511907, -28.99250380, -28.04535391, -26.99767800, -26.04418164,
-24.95687851, -24.04865595, -23.03392645, -21.89366707, -20.93517364,
-19.99388660, -18.91620943, -18.03749683, -16.99532379, -15.98683813,
-15.06421479, -13.99359211, -12.99714098, -11.97022520, -10.98500279,
-9.98834422, -8.95729330, -8.01232284, -7.00253661, -5.99681626,
-5.01207817, -3.95914904, -3.01232178, -1.96615919, -0.97687670,
0.01228030, 0.98412288, 2.01753544, 3.00580570, 3.97783510,
4.98846894, 6.01321400, 7.00867732, 8.00416375, 9.01771966,
9.98637729, 10.98255180, 11.99194163, 13.01807333, 14.00999545,
15.00118556, 16.00089224, 17.00584148, 17.98251763, 18.99942091,
19.96917690, 20.97839265, 21.98207297, 23.00171271, 23.99930737,
24.99746061, 26.00936304, 26.98240132, 28.01126868, 29.01395915,
29.98153507, 31.01376711, 31.99876818, 33.00475317, 33.99753994,
34.99493913, 35.98933585, 36.95620160, 37.98428461, 38.99317544,
40.01832073, 40.98048133, 41.95999283, 42.98232091, 43.96523612,
44.99574268, 45.99524194, 47.05464025, 48.03821548, 48.99354366,
49.96400411, 50.98017973, 51.95184408, 52.96291806, 54.00194392,
54.96603783, 55.95623778, 57.03076595, 58.05889901, 58.99081551,
59.97928121, 61.05071612, 62.03971580, 63.01286038, 64.01290338,
65.02074503, 65.99454594, 67.00399425, 67.96571257, 68.95305727,
69.92030664, 70.95594862, 71.98088567, 73.04764124, 74.00285480,
75.02696330, 75.89837673, 76.93459997, 78.16266309, 78.83317543,
80.00000000, 80.87251574, 82.09803524, 83.10671664, 84.00000000,
84.77023523, 86.00000000, 87.00000000, 87.92946897, 88.69159118,
90.00000000, 90.90535270 };
const double WebRtcIsac_kQKltLevelsShape[578] = {
0.00032397, 0.00008053, -0.00061202, -0.00012620, 0.00030437,
0.00054764, -0.00027902, 0.00069360, 0.00029449, -0.80219239,
0.00091089, -0.74514927, -0.00094283, 0.64030631, -0.60509119,
0.00035575, 0.61851665, -0.62129957, 0.00375219, 0.60054900,
-0.61554359, 0.00054977, 0.63362016, -1.73118727, -0.65422341,
0.00524568, 0.66165298, 1.76785515, -1.83182018, -0.65997434,
-0.00011887, 0.67524299, 1.79933938, -1.76344480, -0.72547708,
-0.00133017, 0.73104704, 1.75305377, 2.85164534, -2.80423916,
-1.71959639, -0.75419722, -0.00329945, 0.77196760, 1.72211069,
2.87339653, 0.00031089, -0.00015311, 0.00018201, -0.00035035,
-0.77357251, 0.00154647, -0.00047625, -0.00045299, 0.00086590,
0.00044762, -0.83383829, 0.00024787, -0.68526258, -0.00122472,
0.64643255, -0.60904942, -0.00448987, 0.62309184, -0.59626442,
-0.00574132, 0.62296546, -0.63222115, 0.00013441, 0.63609545,
-0.66911055, -0.00369971, 0.66346095, 2.07281301, -1.77184694,
-0.67640425, -0.00010145, 0.64818392, 1.74948973, -1.69420224,
-0.71943894, -0.00004680, 0.75303493, 1.81075983, 2.80610041,
-2.80005755, -1.79866753, -0.77409777, -0.00084220, 0.80141293,
1.78291081, 2.73954236, 3.82994169, 0.00015140, -0.00012766,
-0.00034241, -0.00119125, -0.76113497, 0.00069246, 0.76722027,
0.00132862, -0.69107530, 0.00010656, 0.77061578, -0.78012970,
0.00095947, 0.77828502, -0.64787758, 0.00217168, 0.63050167,
-0.58601125, 0.00306596, 0.59466308, -0.58603410, 0.00059779,
0.64257970, 1.76512766, -0.61193600, -0.00259517, 0.59767574,
-0.61026273, 0.00315811, 0.61725479, -1.69169719, -0.65816029,
0.00067575, 0.65576890, 2.00000000, -1.72689193, -0.69780808,
-0.00040990, 0.70668487, 1.74198458, -3.79028154, -3.00000000,
-1.73194459, -0.70179341, -0.00106695, 0.71302629, 1.76849782,
-2.89332364, -1.78585007, -0.78731491, -0.00132610, 0.79692976,
1.75247009, 2.97828682, -5.26238694, -3.69559829, -2.87286122,
-1.84908818, -0.84434577, -0.01167975, 0.84641753, 1.84087672,
2.87628156, 3.83556679, -0.00190204, 0.00092642, 0.00354385,
-0.00012982, -0.67742785, 0.00229509, 0.64935672, -0.58444751,
0.00470733, 0.57299534, -0.58456202, -0.00097715, 0.64593607,
-0.64060330, -0.00638534, 0.59680157, -0.59287537, 0.00490772,
0.58919707, -0.60306173, -0.00417464, 0.60562100, -1.75218757,
-0.63018569, -0.00225922, 0.63863300, -0.63949939, -0.00126421,
0.64268914, -1.75851182, -0.68318060, 0.00510418, 0.69049211,
1.88178506, -1.71136148, -0.72710534, -0.00815559, 0.73412917,
1.79996711, -2.77111145, -1.73940498, -0.78212945, 0.01074476,
0.77688916, 1.76873972, 2.87281379, 3.77554698, -3.75832725,
-2.95463235, -1.80451491, -0.80017226, 0.00149902, 0.80729206,
1.78265046, 2.89391793, -3.78236148, -2.83640598, -1.82532067,
-0.88844327, -0.00620952, 0.88208030, 1.85757631, 2.81712391,
3.88430176, 5.16179367, -7.00000000, -5.93805408, -4.87172597,
-3.87524433, -2.89399744, -1.92359563, -0.92136341, -0.00172725,
0.93087018, 1.90528280, 2.89809686, 3.88085708, 4.89147740,
5.89078692, -0.00239502, 0.00312564, -1.00000000, 0.00178325,
1.00000000, -0.62198029, 0.00143254, 0.65344051, -0.59851220,
-0.00676987, 0.61510140, -0.58894151, 0.00385055, 0.59794203,
-0.59808568, -0.00038214, 0.57625703, -0.63009713, -0.01107985,
0.61278758, -0.64206758, -0.00154369, 0.65480598, 1.80604162,
-1.80909286, -0.67810514, 0.00205762, 0.68571097, 1.79453891,
-3.22682422, -1.73808453, -0.71870305, -0.00738594, 0.71486172,
1.73005326, -1.66891897, -0.73689615, -0.00616203, 0.74262409,
1.73807899, -2.92417482, -1.73866741, -0.78133871, 0.00764425,
0.80027264, 1.78668732, 2.74992588, -4.00000000, -2.75578740,
-1.83697516, -0.83117035, -0.00355191, 0.83527172, 1.82814700,
2.77377675, 3.80718693, -3.81667698, -2.83575471, -1.83372350,
-0.86579471, 0.00547578, 0.87582281, 1.82858793, 2.87265007,
3.91405377, -4.87521600, -3.78999094, -2.86437014, -1.86964365,
-0.90618018, 0.00128243, 0.91497811, 1.87374952, 2.83199819,
3.91519130, 4.76632822, -6.68713448, -6.01252467, -4.94587936,
-3.88795368, -2.91299088, -1.92592211, -0.95504570, -0.00089980,
0.94565200, 1.93239633, 2.91832808, 3.91363475, 4.88920034,
5.96471415, 6.83905252, 7.86195009, 8.81571018,-12.96141759,
-11.73039516,-10.96459719, -9.97382433, -9.04414433, -7.89460619,
-6.96628608, -5.93236595, -4.93337924, -3.95479990, -2.96451499,
-1.96635876, -0.97271229, -0.00402238, 0.98343930, 1.98348291,
2.96641164, 3.95456471, 4.95517089, 5.98975714, 6.90322073,
7.90468849, 8.85639467, 9.97255498, 10.79006309, 11.81988596,
0.04950500, -1.00000000, -0.01226628, 1.00000000, -0.59479469,
-0.10438305, 0.59822144, -2.00000000, -0.67109149, -0.09256692,
0.65171621, 2.00000000, -3.00000000, -1.68391999, -0.76681039,
-0.03354151, 0.71509146, 1.77615472, -2.00000000, -0.68661511,
-0.02497881, 0.66478398, 2.00000000, -2.00000000, -0.67032784,
-0.00920582, 0.64892756, 2.00000000, -2.00000000, -0.68561894,
0.03641869, 0.73021611, 1.68293863, -4.00000000, -2.72024184,
-1.80096059, -0.81696185, 0.03604685, 0.79232033, 1.70070730,
3.00000000, -4.00000000, -2.71795670, -1.80482986, -0.86001162,
0.03764903, 0.87723968, 1.79970771, 2.72685932, 3.67589143,
-5.00000000, -4.00000000, -2.85492548, -1.78996365, -0.83250358,
-0.01376828, 0.84195506, 1.78161105, 2.76754458, 4.00000000,
-6.00000000, -5.00000000, -3.82268811, -2.77563624, -1.82608163,
-0.86486114, -0.02671886, 0.86693165, 1.88422879, 2.86248347,
3.95632216, -7.00000000, -6.00000000, -5.00000000, -3.77533988,
-2.86391432, -1.87052039, -0.90513658, 0.06271236, 0.91083620,
1.85734756, 2.86031688, 3.82019418, 4.94420394, 6.00000000,
-11.00000000,-10.00000000, -9.00000000, -8.00000000, -6.91952415,
-6.00000000, -4.92044374, -3.87845165, -2.87392362, -1.88413020,
-0.91915740, 0.00318517, 0.91602800, 1.89664838, 2.88925058,
3.84123856, 4.78988651, 5.94526812, 6.81953917, 8.00000000,
-9.00000000, -8.00000000, -7.03319143, -5.94530963, -4.86669720,
-3.92438007, -2.88620396, -1.92848070, -0.94365985, 0.01671855,
0.97349410, 1.93419878, 2.89740109, 3.89662823, 4.83235583,
5.88106535, 6.80328232, 8.00000000,-13.00000000,-12.00000000,
-11.00000000,-10.00000000, -9.00000000, -7.86033489, -6.83344055,
-5.89844215, -4.90811454, -3.94841298, -2.95820490, -1.98627966,
-0.99161468, -0.02286136, 0.96055651, 1.95052433, 2.93969396,
3.94304346, 4.88522624, 5.87434241, 6.78309433, 7.87244101,
9.00000000, 10.00000000,-12.09117356,-11.00000000,-10.00000000,
-8.84766108, -7.86934236, -6.98544896, -5.94233429, -4.95583292,
-3.95575986, -2.97085529, -1.98955811, -0.99359873, -0.00485413,
0.98298870, 1.98093258, 2.96430203, 3.95540216, 4.96915010,
5.96775124, 6.99236918, 7.96503302, 8.99864542, 9.85857723,
10.96541926, 11.91647197, 12.71060069,-26.00000000,-25.00000000,
-24.00585596,-23.11642573,-22.14271284,-20.89800711,-19.87815799,
-19.05036354,-17.88555651,-16.86471209,-15.97711073,-14.94012359,
-14.02661226,-12.98243228,-11.97489256,-10.97402777, -9.96425624,
-9.01085220, -7.97372506, -6.98795002, -5.97271328, -5.00191694,
-3.98055849, -2.98458048, -1.99470442, -0.99656768, -0.00825666,
1.00272004, 1.99922218, 2.99357669, 4.01407905, 5.01003897,
5.98115528, 7.00018958, 8.00338125, 8.98981046, 9.98990318,
10.96341479, 11.96866930, 12.99175139, 13.94580443, 14.95745083,
15.98992869, 16.97484646, 17.99630043, 18.93396897, 19.88347741,
20.96532482, 21.92191032, 23.22314702 };
/* cdf tables for quantizer indices */
const uint16_t WebRtcIsac_kQKltCdfGain[404] = {
0, 13, 301, 3730, 61784, 65167, 65489, 65535, 0, 17,
142, 314, 929, 2466, 7678, 56450, 63463, 64740, 65204, 65426,
65527, 65535, 0, 8, 100, 724, 6301, 60105, 65125, 65510,
65531, 65535, 0, 13, 117, 368, 1068, 3010, 11928, 53603,
61177, 63404, 64505, 65108, 65422, 65502, 65531, 65535, 0, 4,
17, 96, 410, 1859, 12125, 54361, 64103, 65305, 65497, 65535,
0, 4, 88, 230, 469, 950, 1746, 3228, 6092, 16592,
44756, 56848, 61256, 63308, 64325, 64920, 65309, 65460, 65502,
65522, 65535, 0, 88, 352, 1675, 6339, 20749, 46686, 59284, 63525,
64949, 65359, 65502, 65527, 65535, 0, 13, 38, 63, 117,
234, 381, 641, 929, 1407, 2043, 2809, 4032, 5753, 8792,
14407, 24308, 38941, 48947, 55403, 59293, 61411, 62688, 63630,
64329, 64840, 65188, 65376, 65472, 65506, 65527, 65531, 65535,
0, 8, 29, 75, 222, 615, 1327, 2801, 5623, 9931, 16094, 24966,
34419, 43458, 50676, 56186, 60055, 62500, 63936, 64765, 65225,
65435, 65514, 65535, 0, 8, 13, 15, 17, 21, 33, 59,
71, 92, 151, 243, 360, 456, 674, 934, 1223, 1583,
1989, 2504, 3031, 3617, 4354, 5154, 6163, 7411, 8780, 10747,
12874, 15591, 18974, 23027, 27436, 32020, 36948, 41830, 46205,
49797, 53042, 56094, 58418, 60360, 61763, 62818, 63559, 64103,
64509, 64798, 65045, 65162, 65288, 65363, 65447, 65506, 65522,
65531, 65533, 65535, 0, 4, 6, 25, 38, 71, 138, 264, 519, 808,
1227, 1825, 2516, 3408, 4279, 5560, 7092, 9197, 11420, 14108,
16947, 20300, 23926, 27459, 31164, 34827, 38575, 42178, 45540,
48747, 51444, 54090, 56426, 58460, 60080, 61595, 62734, 63668,
64275, 64673, 64936, 65112, 65217, 65334, 65426, 65464, 65477,
65489, 65518, 65527, 65529, 65531, 65533, 65535, 0, 2, 4, 8, 10,
12, 14, 16, 21, 33, 50, 71, 84, 92, 105, 138, 180, 255, 318,
377, 435, 473, 511, 590, 682, 758, 913, 1097, 1256, 1449, 1671,
1884, 2169, 2445, 2772, 3157, 3563, 3944, 4375, 4848, 5334, 5820,
6448, 7101, 7716, 8378, 9102, 9956, 10752, 11648, 12707, 13670,
14758, 15910, 17187, 18472, 19627, 20649, 21951, 23169, 24283,
25552, 26862, 28227, 29391, 30764, 31882, 33213, 34432, 35600,
36910, 38116, 39464, 40729, 41872, 43144, 44371, 45514, 46762,
47813, 48968, 50069, 51032, 51974, 52908, 53737, 54603, 55445,
56282, 56990, 57572, 58191, 58840, 59410, 59887, 60264, 60607,
60946, 61269, 61516, 61771, 61960, 62198, 62408, 62558, 62776,
62985, 63207, 63408, 63546, 63739, 63906, 64070, 64237, 64371,
64551, 64677, 64836, 64999, 65095, 65213, 65284, 65338, 65380,
65426, 65447, 65472, 65485, 65487, 65489, 65502, 65510, 65512,
65514, 65516, 65518, 65522, 65531, 65533, 65535 };
const uint16_t WebRtcIsac_kQKltCdfShape[686] = {
0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535,
0, 65535, 0, 65535, 0, 65535, 0, 65535, 0, 4,
65535, 0, 8, 65514, 65535, 0, 29, 65481, 65535, 0,
121, 65439, 65535, 0, 239, 65284, 65535, 0, 8, 779,
64999, 65527, 65535, 0, 8, 888, 64693, 65522, 65535, 0,
29, 2604, 62843, 65497, 65531, 65535, 0, 25, 176, 4576,
61164, 65275, 65527, 65535, 0, 65535, 0, 65535, 0, 65535,
0, 65535, 0, 4, 65535, 0, 65535, 0, 65535, 0,
65535, 0, 65535, 0, 4, 65535, 0, 33, 65502, 65535,
0, 54, 65481, 65535, 0, 251, 65309, 65535, 0, 611,
65074, 65535, 0, 1273, 64292, 65527, 65535, 0, 4, 1809,
63940, 65518, 65535, 0, 88, 4392, 60603, 65426, 65531, 65535,
0, 25, 419, 7046, 57756, 64961, 65514, 65531, 65535, 0,
65535, 0, 65535, 0, 65535, 0, 65535, 0, 4, 65531,
65535, 0, 65535, 0, 8, 65531, 65535, 0, 4, 65527,
65535, 0, 17, 65510, 65535, 0, 42, 65481, 65535, 0,
197, 65342, 65531, 65535, 0, 385, 65154, 65535, 0, 1005,
64522, 65535, 0, 8, 1985, 63469, 65533, 65535, 0, 38,
3119, 61884, 65514, 65535, 0, 4, 6, 67, 4961, 60804,
65472, 65535, 0, 17, 565, 9182, 56538, 65087, 65514, 65535,
0, 8, 63, 327, 2118, 14490, 52774, 63839, 65376, 65522,
65535, 0, 65535, 0, 65535, 0, 65535, 0, 65535, 0,
17, 65522, 65535, 0, 59, 65489, 65535, 0, 50, 65522,
65535, 0, 54, 65489, 65535, 0, 310, 65179, 65535, 0,
615, 64836, 65535, 0, 4, 1503, 63965, 65535, 0, 2780,
63383, 65535, 0, 21, 3919, 61051, 65527, 65535, 0, 84,
6674, 59929, 65435, 65535, 0, 4, 255, 7976, 55784, 65150,
65518, 65531, 65535, 0, 4, 8, 582, 10726, 53465, 64949,
65518, 65535, 0, 29, 339, 3006, 17555, 49517, 62956, 65200,
65497, 65531, 65535, 0, 2, 33, 138, 565, 2324, 7670,
22089, 45966, 58949, 63479, 64966, 65380, 65518, 65535, 0, 65535,
0, 65535, 0, 2, 65533, 65535, 0, 46, 65514, 65535,
0, 414, 65091, 65535, 0, 540, 64911, 65535, 0, 419,
65162, 65535, 0, 976, 64790, 65535, 0, 2977, 62495, 65531,
65535, 0, 4, 3852, 61034, 65527, 65535, 0, 4, 29,
6021, 60243, 65468, 65535, 0, 84, 6711, 58066, 65418, 65535,
0, 13, 281, 9550, 54917, 65125, 65506, 65535, 0, 2,
63, 984, 12108, 52644, 64342, 65435, 65527, 65535, 0, 29,
251, 2014, 14871, 47553, 62881, 65229, 65518, 65535, 0, 13,
142, 749, 4220, 18497, 45200, 60913, 64823, 65426, 65527, 65535,
0, 13, 71, 264, 1176, 3789, 10500, 24480, 43488, 56324,
62315, 64493, 65242, 65464, 65514, 65522, 65531, 65535, 0, 4,
13, 38, 109, 205, 448, 850, 1708, 3429, 6276, 11371,
19221, 29734, 40955, 49391, 55411, 59460, 62102, 63793, 64656,
65150, 65401, 65485, 65522, 65531, 65535, 0, 65535, 0, 2, 65533,
65535, 0, 1160, 65476, 65535, 0, 2, 6640, 64763, 65533,
65535, 0, 2, 38, 9923, 61009, 65527, 65535, 0, 2,
4949, 63092, 65533, 65535, 0, 2, 3090, 63398, 65533, 65535,
0, 2, 2520, 58744, 65510, 65535, 0, 2, 13, 544,
8784, 51403, 65148, 65533, 65535, 0, 2, 25, 1017, 10412,
43550, 63651, 65489, 65527, 65535, 0, 2, 4, 29, 783,
13377, 52462, 64524, 65495, 65533, 65535, 0, 2, 4, 6,
100, 1817, 18451, 52590, 63559, 65376, 65531, 65535, 0, 2,
4, 6, 46, 385, 2562, 11225, 37416, 60488, 65026, 65487,
65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 12,
42, 222, 971, 5221, 19811, 45048, 60312, 64486, 65294, 65474,
65525, 65529, 65533, 65535, 0, 2, 4, 8, 71, 167,
666, 2533, 7875, 19622, 38082, 54359, 62108, 64633, 65290, 65495,
65529, 65533, 65535, 0, 2, 4, 6, 8, 10, 13,
109, 586, 1930, 4949, 11600, 22641, 36125, 48312, 56899, 61495,
63927, 64932, 65389, 65489, 65518, 65531, 65533, 65535, 0, 4,
6, 8, 67, 209, 712, 1838, 4195, 8432, 14432, 22834,
31723, 40523, 48139, 53929, 57865, 60657, 62403, 63584, 64363,
64907, 65167, 65372, 65472, 65514, 65535, 0, 2, 4, 13, 25,
42, 46, 50, 75, 113, 147, 281, 448, 657, 909,
1185, 1591, 1976, 2600, 3676, 5317, 7398, 9914, 12941, 16169,
19477, 22885, 26464, 29851, 33360, 37228, 41139, 44802, 48654,
52058, 55181, 57676, 59581, 61022, 62190, 63107, 63676, 64199,
64547, 64924, 65158, 65313, 65430, 65481, 65518, 65535 };
/* pointers to cdf tables for quantizer indices */
const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12] = {
WebRtcIsac_kQKltCdfGain +0 +0, WebRtcIsac_kQKltCdfGain +0 +8,
WebRtcIsac_kQKltCdfGain +0 +22, WebRtcIsac_kQKltCdfGain +0 +32,
WebRtcIsac_kQKltCdfGain +0 +48, WebRtcIsac_kQKltCdfGain +0 +60,
WebRtcIsac_kQKltCdfGain +0 +81, WebRtcIsac_kQKltCdfGain +0 +95,
WebRtcIsac_kQKltCdfGain +0 +128, WebRtcIsac_kQKltCdfGain +0 +152,
WebRtcIsac_kQKltCdfGain +0 +210, WebRtcIsac_kQKltCdfGain +0 +264 };
const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108] = {
WebRtcIsac_kQKltCdfShape +0 +0, WebRtcIsac_kQKltCdfShape +0 +2,
WebRtcIsac_kQKltCdfShape +0 +4, WebRtcIsac_kQKltCdfShape +0 +6,
WebRtcIsac_kQKltCdfShape +0 +8, WebRtcIsac_kQKltCdfShape +0 +10,
WebRtcIsac_kQKltCdfShape +0 +12, WebRtcIsac_kQKltCdfShape +0 +14,
WebRtcIsac_kQKltCdfShape +0 +16, WebRtcIsac_kQKltCdfShape +0 +18,
WebRtcIsac_kQKltCdfShape +0 +21, WebRtcIsac_kQKltCdfShape +0 +25,
WebRtcIsac_kQKltCdfShape +0 +29, WebRtcIsac_kQKltCdfShape +0 +33,
WebRtcIsac_kQKltCdfShape +0 +37, WebRtcIsac_kQKltCdfShape +0 +43,
WebRtcIsac_kQKltCdfShape +0 +49, WebRtcIsac_kQKltCdfShape +0 +56,
WebRtcIsac_kQKltCdfShape +0 +64, WebRtcIsac_kQKltCdfShape +0 +66,
WebRtcIsac_kQKltCdfShape +0 +68, WebRtcIsac_kQKltCdfShape +0 +70,
WebRtcIsac_kQKltCdfShape +0 +72, WebRtcIsac_kQKltCdfShape +0 +75,
WebRtcIsac_kQKltCdfShape +0 +77, WebRtcIsac_kQKltCdfShape +0 +79,
WebRtcIsac_kQKltCdfShape +0 +81, WebRtcIsac_kQKltCdfShape +0 +83,
WebRtcIsac_kQKltCdfShape +0 +86, WebRtcIsac_kQKltCdfShape +0 +90,
WebRtcIsac_kQKltCdfShape +0 +94, WebRtcIsac_kQKltCdfShape +0 +98,
WebRtcIsac_kQKltCdfShape +0 +102, WebRtcIsac_kQKltCdfShape +0 +107,
WebRtcIsac_kQKltCdfShape +0 +113, WebRtcIsac_kQKltCdfShape +0 +120,
WebRtcIsac_kQKltCdfShape +0 +129, WebRtcIsac_kQKltCdfShape +0 +131,
WebRtcIsac_kQKltCdfShape +0 +133, WebRtcIsac_kQKltCdfShape +0 +135,
WebRtcIsac_kQKltCdfShape +0 +137, WebRtcIsac_kQKltCdfShape +0 +141,
WebRtcIsac_kQKltCdfShape +0 +143, WebRtcIsac_kQKltCdfShape +0 +147,
WebRtcIsac_kQKltCdfShape +0 +151, WebRtcIsac_kQKltCdfShape +0 +155,
WebRtcIsac_kQKltCdfShape +0 +159, WebRtcIsac_kQKltCdfShape +0 +164,
WebRtcIsac_kQKltCdfShape +0 +168, WebRtcIsac_kQKltCdfShape +0 +172,
WebRtcIsac_kQKltCdfShape +0 +178, WebRtcIsac_kQKltCdfShape +0 +184,
WebRtcIsac_kQKltCdfShape +0 +192, WebRtcIsac_kQKltCdfShape +0 +200,
WebRtcIsac_kQKltCdfShape +0 +211, WebRtcIsac_kQKltCdfShape +0 +213,
WebRtcIsac_kQKltCdfShape +0 +215, WebRtcIsac_kQKltCdfShape +0 +217,
WebRtcIsac_kQKltCdfShape +0 +219, WebRtcIsac_kQKltCdfShape +0 +223,
WebRtcIsac_kQKltCdfShape +0 +227, WebRtcIsac_kQKltCdfShape +0 +231,
WebRtcIsac_kQKltCdfShape +0 +235, WebRtcIsac_kQKltCdfShape +0 +239,
WebRtcIsac_kQKltCdfShape +0 +243, WebRtcIsac_kQKltCdfShape +0 +248,
WebRtcIsac_kQKltCdfShape +0 +252, WebRtcIsac_kQKltCdfShape +0 +258,
WebRtcIsac_kQKltCdfShape +0 +264, WebRtcIsac_kQKltCdfShape +0 +273,
WebRtcIsac_kQKltCdfShape +0 +282, WebRtcIsac_kQKltCdfShape +0 +293,
WebRtcIsac_kQKltCdfShape +0 +308, WebRtcIsac_kQKltCdfShape +0 +310,
WebRtcIsac_kQKltCdfShape +0 +312, WebRtcIsac_kQKltCdfShape +0 +316,
WebRtcIsac_kQKltCdfShape +0 +320, WebRtcIsac_kQKltCdfShape +0 +324,
WebRtcIsac_kQKltCdfShape +0 +328, WebRtcIsac_kQKltCdfShape +0 +332,
WebRtcIsac_kQKltCdfShape +0 +336, WebRtcIsac_kQKltCdfShape +0 +341,
WebRtcIsac_kQKltCdfShape +0 +347, WebRtcIsac_kQKltCdfShape +0 +354,
WebRtcIsac_kQKltCdfShape +0 +360, WebRtcIsac_kQKltCdfShape +0 +368,
WebRtcIsac_kQKltCdfShape +0 +378, WebRtcIsac_kQKltCdfShape +0 +388,
WebRtcIsac_kQKltCdfShape +0 +400, WebRtcIsac_kQKltCdfShape +0 +418,
WebRtcIsac_kQKltCdfShape +0 +445, WebRtcIsac_kQKltCdfShape +0 +447,
WebRtcIsac_kQKltCdfShape +0 +451, WebRtcIsac_kQKltCdfShape +0 +455,
WebRtcIsac_kQKltCdfShape +0 +461, WebRtcIsac_kQKltCdfShape +0 +468,
WebRtcIsac_kQKltCdfShape +0 +474, WebRtcIsac_kQKltCdfShape +0 +480,
WebRtcIsac_kQKltCdfShape +0 +486, WebRtcIsac_kQKltCdfShape +0 +495,
WebRtcIsac_kQKltCdfShape +0 +505, WebRtcIsac_kQKltCdfShape +0 +516,
WebRtcIsac_kQKltCdfShape +0 +528, WebRtcIsac_kQKltCdfShape +0 +543,
WebRtcIsac_kQKltCdfShape +0 +564, WebRtcIsac_kQKltCdfShape +0 +583,
WebRtcIsac_kQKltCdfShape +0 +608, WebRtcIsac_kQKltCdfShape +0 +635 };
/* left KLT transforms */
const double WebRtcIsac_kKltT1Gain[4] = {
-0.79742827, 0.60341375, 0.60341375, 0.79742827 };
const double WebRtcIsac_kKltT1Shape[324] = {
0.00159597, 0.00049320, 0.00513821, 0.00021066, 0.01338581,
-0.00422367, -0.00272072, 0.00935107, 0.02047622, 0.02691189,
0.00478236, 0.03969702, 0.00886698, 0.04877604, -0.10898362,
-0.05930891, -0.03415047, 0.98889721, 0.00293558, -0.00035282,
0.01156321, -0.00195341, -0.00937631, 0.01052213, -0.02551163,
0.01644059, 0.03189927, 0.07754773, -0.08742313, -0.03026338,
0.05136248, -0.14395974, 0.17725040, 0.22664856, 0.93380230,
0.07076411, 0.00557890, -0.00222834, 0.01377569, 0.01466808,
0.02847361, -0.00603178, 0.02382480, -0.01210452, 0.03797267,
-0.02371480, 0.11260335, -0.07366682, 0.00453436, -0.04136941,
-0.07912843, -0.95031418, 0.25295337, -0.05302216, -0.00617554,
-0.00044040, -0.00653778, 0.01097838, 0.01529174, 0.01374431,
-0.00748512, -0.00020034, 0.02432713, 0.11101570, -0.08556891,
0.09282249, -0.01029446, 0.67556443, -0.67454300, 0.06910063,
0.20866865, -0.10318050, 0.00932175, 0.00524058, 0.00803610,
-0.00594676, -0.01082578, 0.01069906, 0.00546768, 0.01565291,
0.06816200, 0.10201227, 0.16812734, 0.22984074, 0.58213170,
-0.54138651, -0.51379962, 0.06847390, -0.01920037, -0.04592324,
-0.00467394, 0.00328858, 0.00377424, -0.00987448, 0.08222096,
-0.00377301, 0.04551941, -0.02592517, 0.16317082, 0.13077530,
0.22702921, -0.31215289, -0.69645962, -0.38047101, -0.39339411,
0.11124777, 0.02508035, -0.00708074, 0.00400344, 0.00040331,
0.01142402, 0.01725406, 0.01635170, 0.14285366, 0.03949233,
-0.05905676, 0.05877154, -0.17497577, -0.32479440, 0.80754464,
-0.38085603, -0.17055430, -0.03168622, -0.07531451, 0.02942002,
-0.02148095, -0.00754114, -0.00322372, 0.00567812, -0.01701521,
-0.12358320, 0.11473564, 0.09070136, 0.06533068, -0.22560802,
0.19209022, 0.81605094, 0.36592275, -0.09919829, 0.16667122,
0.16300725, 0.04803807, 0.06739263, -0.00156752, -0.01685302,
-0.00905240, -0.02297836, -0.00469939, 0.06310613, -0.16391930,
0.10919511, 0.12529293, 0.85581322, -0.32145522, 0.24539076,
0.07181839, 0.07289591, 0.14066759, 0.10406711, 0.05815518,
0.01072680, -0.00759339, 0.00053486, -0.00044865, 0.03407361,
0.01645348, 0.08758579, 0.27722240, 0.53665485, -0.74853376,
-0.01118192, -0.19805430, 0.06130619, -0.09675299, 0.08978480,
0.03405255, -0.00706867, 0.05102045, 0.03250746, 0.01849966,
-0.01216314, -0.01184187, -0.01579288, 0.00114807, 0.11376166,
0.88342114, -0.36425379, 0.13863190, 0.12524180, -0.13553892,
0.04715856, -0.12341103, 0.04531568, 0.01899360, -0.00206897,
0.00567768, -0.01444163, 0.00411946, -0.00855896, 0.00381663,
-0.01664861, -0.05534280, 0.21328278, 0.20161162, 0.72360394,
0.59130708, -0.08043791, 0.08757349, -0.13893918, -0.05147377,
0.02680690, -0.01144070, 0.00625162, -0.00634215, -0.01248947,
-0.00329455, -0.00609625, -0.00136305, -0.05097048, -0.01029851,
0.25065384, -0.16856837, -0.07123372, 0.15992623, -0.39487617,
-0.79972301, 0.18118185, -0.04826639, -0.01805578, -0.02927253,
-0.16400618, 0.07472763, 0.10376449, 0.01705406, 0.01065801,
-0.01500498, 0.02039914, 0.37776349, -0.84484186, 0.10434286,
0.15616990, 0.13474456, -0.00906238, -0.25238368, -0.03820885,
-0.10650905, -0.03880833, -0.03660028, -0.09640894, 0.00583314,
0.01922097, 0.01489911, -0.02431117, -0.09372217, 0.39404721,
-0.84786223, -0.31277121, 0.03193850, 0.01974060, 0.01887901,
0.00337911, -0.11359599, -0.02792521, -0.03220184, -0.01533311,
0.00015962, -0.04225043, -0.00933965, 0.00675311, 0.00206060,
0.15926771, 0.40199829, -0.80792558, -0.35591604, -0.17169764,
0.02830436, 0.02459982, -0.03438589, 0.00718705, -0.01798329,
-0.01594508, -0.00702430, -0.00952419, -0.00962701, -0.01307212,
-0.01749740, 0.01299602, 0.00587270, -0.36103108, -0.82039266,
-0.43092844, -0.08500097, -0.04361674, -0.00333482, 0.01250434,
-0.02538295, -0.00921797, 0.01645071, -0.01400872, 0.00317607,
0.00003277, -0.01617646, -0.00616863, -0.00882661, 0.00466157,
0.00353237, 0.91803104, -0.39503305, -0.02048964, 0.00060125,
0.01980634, 0.00300109, 0.00313880, 0.00657337, 0.00715163,
0.00000261, 0.00854276, -0.00154825, -0.00516128, 0.00909527,
0.00095609, 0.00701196, -0.00221867, -0.00156741 };
/* right KLT transforms */
const double WebRtcIsac_kKltT2Gain[36] = {
0.14572837, -0.45446306, 0.61990621, -0.52197033, 0.32145074,
-0.11026900, -0.20698282, 0.48962182, -0.27127933, -0.33627476,
0.65094037, -0.32715751, 0.40262573, -0.47844405, -0.33876075,
0.44130653, 0.37383966, -0.39964662, -0.51730480, 0.06611973,
0.49030187, 0.47512886, -0.02141226, -0.51129451, -0.58578569,
-0.39132064, -0.13187771, 0.15649421, 0.40735596, 0.54396897,
0.40381276, 0.40904942, 0.41179766, 0.41167576, 0.40840251,
0.40468132 };
const double WebRtcIsac_kKltT2Shape[36] = {
0.13427386, -0.35132558, 0.52506528, -0.59419077, 0.45075085,
-0.16312057, 0.29857439, -0.58660147, 0.34265431, 0.20879510,
-0.56063262, 0.30238345, 0.43308283, -0.41186999, -0.35288681,
0.42768996, 0.36094634, -0.45284910, -0.47116680, 0.02893449,
0.54326135, 0.45249040, -0.06264420, -0.52283830, 0.57137758,
0.44298139, 0.12617554, -0.20819946, -0.42324603, -0.48876443,
0.39597050, 0.40713935, 0.41389880, 0.41512486, 0.41130400,
0.40575001 };
/* means of log gains and LAR coefficients*/
const double WebRtcIsac_kLpcMeansGain[12] = {
-6.86881911, -5.35075273, -6.86792680, -5.36200897, -6.86401538,
-5.36921533, -6.86802969, -5.36893966, -6.86538097, -5.36315063,
-6.85535304, -5.35155315 };
const double WebRtcIsac_kLpcMeansShape[108] = {
-0.91232981, 0.26258634, -0.33716701, 0.08477430, -0.03378426,
0.14423909, 0.07036185, 0.06155019, 0.01490385, 0.04138740,
0.01427317, 0.01288970, 0.83872106, 0.25750199, 0.07988929,
-0.01957923, 0.00831390, 0.01770300, -0.90957164, 0.25732216,
-0.33385344, 0.08735740, -0.03715332, 0.14584917, 0.06998990,
0.06131968, 0.01504379, 0.04067339, 0.01428039, 0.01406460,
0.83846243, 0.26169862, 0.08109025, -0.01767055, 0.00970539,
0.01954310, -0.90490803, 0.24656405, -0.33578607, 0.08843286,
-0.03749139, 0.14443959, 0.07214669, 0.06170993, 0.01449947,
0.04134309, 0.01314762, 0.01413471, 0.83895203, 0.26748062,
0.08197507, -0.01781298, 0.00885967, 0.01922394, -0.90922472,
0.24495889, -0.33921540, 0.08877169, -0.03581332, 0.14199172,
0.07444032, 0.06185940, 0.01502054, 0.04185113, 0.01276579,
0.01355457, 0.83645358, 0.26631720, 0.08119697, -0.01835449,
0.00788512, 0.01846446, -0.90482253, 0.24658310, -0.34019734,
0.08281090, -0.03486038, 0.14359248, 0.07401336, 0.06001471,
0.01528421, 0.04254560, 0.01321472, 0.01240799, 0.83857127,
0.26281654, 0.08174380, -0.02099842, 0.00755176, 0.01699448,
-0.90132307, 0.25174308, -0.33838268, 0.07883863, -0.02877906,
0.14105407, 0.07220290, 0.06000352, 0.01684879, 0.04226844,
0.01331331, 0.01269244, 0.83832138, 0.25467485, 0.08118028,
-0.02120528, 0.00747832, 0.01567212 };

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_tables.h
*
* header file for coding tables for the LPC coefficients
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
#include "modules/audio_coding/codecs/isac/main/source/structs.h"
#define KLT_STEPSIZE 1.00000000
#define KLT_NUM_AVG_GAIN 0
#define KLT_NUM_AVG_SHAPE 0
#define KLT_NUM_MODELS 3
#define LPC_GAIN_SCALE 4.000f
#define LPC_LOBAND_SCALE 2.100f
#define LPC_LOBAND_ORDER ORDERLO
#define LPC_HIBAND_SCALE 0.450f
#define LPC_HIBAND_ORDER ORDERHI
#define LPC_GAIN_ORDER 2
#define LPC_SHAPE_ORDER (LPC_LOBAND_ORDER + LPC_HIBAND_ORDER)
#define KLT_ORDER_GAIN (LPC_GAIN_ORDER * SUBFRAMES)
#define KLT_ORDER_SHAPE (LPC_SHAPE_ORDER * SUBFRAMES)
/* cdf array for model indicator */
extern const uint16_t WebRtcIsac_kQKltModelCdf[KLT_NUM_MODELS + 1];
/* pointer to cdf array for model indicator */
extern const uint16_t* WebRtcIsac_kQKltModelCdfPtr[1];
/* initial cdf index for decoder of model indicator */
extern const uint16_t WebRtcIsac_kQKltModelInitIndex[1];
/* offset to go from rounded value to quantization index */
extern const short WebRtcIsac_kQKltQuantMinGain[12];
extern const short WebRtcIsac_kQKltQuantMinShape[108];
/* maximum quantization index */
extern const uint16_t WebRtcIsac_kQKltMaxIndGain[12];
extern const uint16_t WebRtcIsac_kQKltMaxIndShape[108];
/* index offset */
extern const uint16_t WebRtcIsac_kQKltOffsetGain[12];
extern const uint16_t WebRtcIsac_kQKltOffsetShape[108];
/* initial cdf index for KLT coefficients */
extern const uint16_t WebRtcIsac_kQKltInitIndexGain[12];
extern const uint16_t WebRtcIsac_kQKltInitIndexShape[108];
/* quantizer representation levels */
extern const double WebRtcIsac_kQKltLevelsGain[392];
extern const double WebRtcIsac_kQKltLevelsShape[578];
/* cdf tables for quantizer indices */
extern const uint16_t WebRtcIsac_kQKltCdfGain[404];
extern const uint16_t WebRtcIsac_kQKltCdfShape[686];
/* pointers to cdf tables for quantizer indices */
extern const uint16_t* WebRtcIsac_kQKltCdfPtrGain[12];
extern const uint16_t* WebRtcIsac_kQKltCdfPtrShape[108];
/* left KLT transforms */
extern const double WebRtcIsac_kKltT1Gain[4];
extern const double WebRtcIsac_kKltT1Shape[324];
/* right KLT transforms */
extern const double WebRtcIsac_kKltT2Gain[36];
extern const double WebRtcIsac_kKltT2Shape[36];
/* means of log gains and LAR coefficients */
extern const double WebRtcIsac_kLpcMeansGain[12];
extern const double WebRtcIsac_kLpcMeansShape[108];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_ */

View File

@ -1,104 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
#include "modules/audio_coding/codecs/isac/main/source/settings.h"
/* header file for coding tables for the pitch filter side-info in the entropy coder */
/********************* Pitch Filter Gain Coefficient Tables ************************/
/* cdf for quantized pitch filter gains */
const uint16_t WebRtcIsac_kQPitchGainCdf[255] = {
0, 2, 4, 6, 64, 901, 903, 905, 16954, 16956,
16961, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17411,
17514, 17516, 17583, 18790, 18796, 18802, 20760, 20777, 20782, 21722,
21724, 21728, 21738, 21740, 21742, 21744, 21746, 21748, 22224, 22227,
22230, 23214, 23229, 23239, 25086, 25108, 25120, 26088, 26094, 26098,
26175, 26177, 26179, 26181, 26183, 26185, 26484, 26507, 26522, 27705,
27731, 27750, 29767, 29799, 29817, 30866, 30883, 30885, 31025, 31029,
31031, 31033, 31035, 31037, 31114, 31126, 31134, 32687, 32722, 32767,
35718, 35742, 35757, 36943, 36952, 36954, 37115, 37128, 37130, 37132,
37134, 37136, 37143, 37145, 37152, 38843, 38863, 38897, 47458, 47467,
47474, 49040, 49061, 49063, 49145, 49157, 49159, 49161, 49163, 49165,
49167, 49169, 49171, 49757, 49770, 49782, 61333, 61344, 61346, 62860,
62883, 62885, 62887, 62889, 62891, 62893, 62895, 62897, 62899, 62901,
62903, 62905, 62907, 62909, 65496, 65498, 65500, 65521, 65523, 65525,
65527, 65529, 65531, 65533, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535};
/* index limits and ranges */
const int16_t WebRtcIsac_kIndexLowerLimitGain[3] = {
-7, -2, -1};
const int16_t WebRtcIsac_kIndexUpperLimitGain[3] = {
0, 3, 1};
const uint16_t WebRtcIsac_kIndexMultsGain[2] = {
18, 3};
/* size of cdf table */
const uint16_t WebRtcIsac_kQCdfTableSizeGain[1] = {
256};
///////////////////////////FIXED POINT
/* mean values of pitch filter gains in FIXED point */
const int16_t WebRtcIsac_kQMeanGain1Q12[144] = {
843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839, 1843, 1843, 1843, 1843, 1843,
1843, 1843, 814, 846, 1092, 1013, 1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843,
1843, 1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263, 1380, 1447, 1559, 1676,
1645, 1749, 1843, 1843, 1843, 1843, 81, 477, 563, 611, 706, 806, 849, 1012, 1192, 1128,
1330, 1489, 1425, 1576, 1826, 1741, 1843, 1843, 0, 290, 305, 356, 488, 575, 602, 741,
890, 835, 1079, 1196, 1182, 1376, 1519, 1506, 1680, 1843, 0, 47, 97, 69, 289, 381,
385, 474, 617, 664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0, 0, 0, 0,
112, 120, 190, 283, 442, 343, 526, 809, 684, 935, 1134, 1020, 1265, 1506, 0, 0,
0, 0, 0, 0, 0, 111, 256, 87, 373, 597, 430, 684, 935, 770, 1020, 1265};
const int16_t WebRtcIsac_kQMeanGain2Q12[144] = {
1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784, 1606, 1843, 1843, 1711, 1843,
1843, 1814, 1389, 1275, 1040, 1564, 1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720,
1475, 1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253, 1111, 1495, 1343, 1178,
1770, 1465, 1234, 1814, 1581, 1342, 1040, 793, 713, 1053, 895, 737, 1128, 1003, 861, 1277,
1094, 981, 1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648, 540, 948, 744,
572, 1009, 844, 636, 1234, 934, 685, 1342, 1217, 984, 537, 318, 124, 603, 423, 350,
687, 479, 322, 791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27, 0, 397,
222, 38, 513, 271, 124, 624, 325, 157, 737, 484, 233, 849, 597, 343, 27, 0,
0, 141, 0, 0, 256, 69, 0, 370, 87, 0, 484, 229, 0, 597, 343, 87};
const int16_t WebRtcIsac_kQMeanGain3Q12[144] = {
1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639, 1393, 1760, 1525, 1285, 1656,
1419, 1176, 1835, 1718, 1475, 1841, 1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299,
1040, 1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260, 1115, 1398, 1151, 1025,
1172, 1080, 790, 1176, 928, 677, 1475, 1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057,
893, 800, 1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830, 710, 875, 751,
601, 795, 642, 583, 790, 544, 475, 677, 474, 140, 987, 750, 482, 697, 573, 450,
691, 487, 303, 661, 394, 332, 537, 303, 220, 424, 168, 0, 737, 484, 229, 624,
348, 153, 441, 261, 136, 397, 166, 51, 283, 27, 0, 168, 0, 0, 484, 229,
0, 370, 57, 0, 256, 43, 0, 141, 0, 0, 27, 0, 0, 0, 0, 0};
const int16_t WebRtcIsac_kQMeanGain4Q12[144] = {
1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434, 1656, 843, 1092, 1336, 504,
757, 1007, 1843, 1843, 1843, 1838, 1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821,
1092, 249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268, 1409, 805, 961, 1131,
444, 670, 843, 0, 249, 504, 1425, 1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490,
704, 867, 81, 450, 555, 0, 0, 249, 1247, 1428, 1530, 881, 1073, 1283, 610, 759,
939, 278, 464, 645, 0, 200, 270, 0, 0, 0, 935, 1163, 1410, 528, 790, 1068,
377, 499, 717, 173, 240, 274, 0, 43, 62, 0, 0, 0, 684, 935, 1182, 343,
551, 735, 161, 262, 423, 0, 55, 27, 0, 0, 0, 0, 0, 0, 430, 684,
935, 87, 377, 597, 0, 46, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0};

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_gain_tables.h
*
* This file contains tables for the pitch filter side-info in the entropy
* coder.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
#include <stdint.h>
/* header file for coding tables for the pitch filter side-info in the entropy
* coder */
/********************* Pitch Filter Gain Coefficient Tables
* ************************/
/* cdf for quantized pitch filter gains */
extern const uint16_t WebRtcIsac_kQPitchGainCdf[255];
/* index limits and ranges */
extern const int16_t WebRtcIsac_kIndexLowerLimitGain[3];
extern const int16_t WebRtcIsac_kIndexUpperLimitGain[3];
extern const uint16_t WebRtcIsac_kIndexMultsGain[2];
/* mean values of pitch filter gains */
//(Y)
extern const int16_t WebRtcIsac_kQMeanGain1Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain2Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain3Q12[144];
extern const int16_t WebRtcIsac_kQMeanGain4Q12[144];
//(Y)
/* size of cdf table */
extern const uint16_t WebRtcIsac_kQCdfTableSizeGain[1];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_ */

Some files were not shown because too many files have changed in this diff Show More