Update to current webrtc library

This is from the upstream library commit id
3326535126e435f1ba647885ce43a8f0f3d317eb, corresponding to Chromium
88.0.4290.1.
This commit is contained in:
Arun Raghavan
2020-10-12 18:08:02 -04:00
parent b1b02581d3
commit bcec8b0b21
859 changed files with 76187 additions and 49580 deletions

315
webrtc/api/array_view.h Normal file
View File

@ -0,0 +1,315 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_ARRAY_VIEW_H_
#define API_ARRAY_VIEW_H_
#include <algorithm>
#include <array>
#include <type_traits>
#include "rtc_base/checks.h"
#include "rtc_base/type_traits.h"
namespace rtc {
// tl;dr: rtc::ArrayView is the same thing as gsl::span from the Guideline
// Support Library.
//
// Many functions read from or write to arrays. The obvious way to do this is
// to use two arguments, a pointer to the first element and an element count:
//
// bool Contains17(const int* arr, size_t size) {
// for (size_t i = 0; i < size; ++i) {
// if (arr[i] == 17)
// return true;
// }
// return false;
// }
//
// This is flexible, since it doesn't matter how the array is stored (C array,
// std::vector, rtc::Buffer, ...), but it's error-prone because the caller has
// to correctly specify the array length:
//
// Contains17(arr, arraysize(arr)); // C array
// Contains17(arr.data(), arr.size()); // std::vector
// Contains17(arr, size); // pointer + size
// ...
//
// It's also kind of messy to have two separate arguments for what is
// conceptually a single thing.
//
// Enter rtc::ArrayView<T>. It contains a T pointer (to an array it doesn't
// own) and a count, and supports the basic things you'd expect, such as
// indexing and iteration. It allows us to write our function like this:
//
// bool Contains17(rtc::ArrayView<const int> arr) {
// for (auto e : arr) {
// if (e == 17)
// return true;
// }
// return false;
// }
//
// And even better, because a bunch of things will implicitly convert to
// ArrayView, we can call it like this:
//
// Contains17(arr); // C array
// Contains17(arr); // std::vector
// Contains17(rtc::ArrayView<int>(arr, size)); // pointer + size
// Contains17(nullptr); // nullptr -> empty ArrayView
// ...
//
// ArrayView<T> stores both a pointer and a size, but you may also use
// ArrayView<T, N>, which has a size that's fixed at compile time (which means
// it only has to store the pointer).
//
// One important point is that ArrayView<T> and ArrayView<const T> are
// different types, which allow and don't allow mutation of the array elements,
// respectively. The implicit conversions work just like you'd hope, so that
// e.g. vector<int> will convert to either ArrayView<int> or ArrayView<const
// int>, but const vector<int> will convert only to ArrayView<const int>.
// (ArrayView itself can be the source type in such conversions, so
// ArrayView<int> will convert to ArrayView<const int>.)
//
// Note: ArrayView is tiny (just a pointer and a count if variable-sized, just
// a pointer if fix-sized) and trivially copyable, so it's probably cheaper to
// pass it by value than by const reference.
namespace impl {
// Magic constant for indicating that the size of an ArrayView is variable
// instead of fixed.
enum : std::ptrdiff_t { kArrayViewVarSize = -4711 };
// Base class for ArrayViews of fixed nonzero size.
template <typename T, std::ptrdiff_t Size>
class ArrayViewBase {
static_assert(Size > 0, "ArrayView size must be variable or non-negative");
public:
ArrayViewBase(T* data, size_t size) : data_(data) {}
static constexpr size_t size() { return Size; }
static constexpr bool empty() { return false; }
T* data() const { return data_; }
protected:
static constexpr bool fixed_size() { return true; }
private:
T* data_;
};
// Specialized base class for ArrayViews of fixed zero size.
template <typename T>
class ArrayViewBase<T, 0> {
public:
explicit ArrayViewBase(T* data, size_t size) {}
static constexpr size_t size() { return 0; }
static constexpr bool empty() { return true; }
T* data() const { return nullptr; }
protected:
static constexpr bool fixed_size() { return true; }
};
// Specialized base class for ArrayViews of variable size.
template <typename T>
class ArrayViewBase<T, impl::kArrayViewVarSize> {
public:
ArrayViewBase(T* data, size_t size)
: data_(size == 0 ? nullptr : data), size_(size) {}
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }
T* data() const { return data_; }
protected:
static constexpr bool fixed_size() { return false; }
private:
T* data_;
size_t size_;
};
} // namespace impl
template <typename T, std::ptrdiff_t Size = impl::kArrayViewVarSize>
class ArrayView final : public impl::ArrayViewBase<T, Size> {
public:
using value_type = T;
using const_iterator = const T*;
// Construct an ArrayView from a pointer and a length.
template <typename U>
ArrayView(U* data, size_t size)
: impl::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data());
RTC_DCHECK_EQ(size, this->size());
RTC_DCHECK_EQ(!this->data(),
this->size() == 0); // data is null iff size == 0.
}
// Construct an empty ArrayView. Note that fixed-size ArrayViews of size > 0
// cannot be empty.
ArrayView() : ArrayView(nullptr, 0) {}
ArrayView(std::nullptr_t) // NOLINT
: ArrayView() {}
ArrayView(std::nullptr_t, size_t size)
: ArrayView(static_cast<T*>(nullptr), size) {
static_assert(Size == 0 || Size == impl::kArrayViewVarSize, "");
RTC_DCHECK_EQ(0, size);
}
// Construct an ArrayView from a C-style array.
template <typename U, size_t N>
ArrayView(U (&array)[N]) // NOLINT
: ArrayView(array, N) {
static_assert(Size == N || Size == impl::kArrayViewVarSize,
"Array size must match ArrayView size");
}
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> from a
// non-const std::array instance. For an ArrayView with variable size, the
// used ctor is ArrayView(U& u) instead.
template <typename U,
size_t N,
typename std::enable_if<
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
ArrayView(std::array<U, N>& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> where T is
// const from a const(expr) std::array instance. For an ArrayView with
// variable size, the used ctor is ArrayView(U& u) instead.
template <typename U,
size_t N,
typename std::enable_if<
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
ArrayView(const std::array<U, N>& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
// (Only if size is fixed.) Construct an ArrayView from any type U that has a
// static constexpr size() method whose return value is equal to Size, and a
// data() method whose return value converts implicitly to T*. In particular,
// this means we allow conversion from ArrayView<T, N> to ArrayView<const T,
// N>, but not the other way around. We also don't allow conversion from
// ArrayView<T> to ArrayView<T, N>, or from ArrayView<T, M> to ArrayView<T,
// N> when M != N.
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {
static_assert(U::size() == Size, "Sizes must match exactly");
}
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {
static_assert(U::size() == Size, "Sizes must match exactly");
}
// (Only if size is variable.) Construct an ArrayView from any type U that
// has a size() method whose return value converts implicitly to size_t, and
// a data() method whose return value converts implicitly to T*. In
// particular, this means we allow conversion from ArrayView<T> to
// ArrayView<const T>, but not the other way around. Other allowed
// conversions include
// ArrayView<T, N> to ArrayView<T> or ArrayView<const T>,
// std::vector<T> to ArrayView<T> or ArrayView<const T>,
// const std::vector<T> to ArrayView<const T>,
// rtc::Buffer to ArrayView<uint8_t> or ArrayView<const uint8_t>, and
// const rtc::Buffer to ArrayView<const uint8_t>.
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {}
// Indexing and iteration. These allow mutation even if the ArrayView is
// const, because the ArrayView doesn't own the array. (To prevent mutation,
// use a const element type.)
T& operator[](size_t idx) const {
RTC_DCHECK_LT(idx, this->size());
RTC_DCHECK(this->data());
return this->data()[idx];
}
T* begin() const { return this->data(); }
T* end() const { return this->data() + this->size(); }
const T* cbegin() const { return this->data(); }
const T* cend() const { return this->data() + this->size(); }
ArrayView<T> subview(size_t offset, size_t size) const {
return offset < this->size()
? ArrayView<T>(this->data() + offset,
std::min(size, this->size() - offset))
: ArrayView<T>();
}
ArrayView<T> subview(size_t offset) const {
return subview(offset, this->size());
}
};
// Comparing two ArrayViews compares their (pointer,size) pairs; it does *not*
// dereference the pointers.
template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
bool operator==(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
return a.data() == b.data() && a.size() == b.size();
}
template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
bool operator!=(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
return !(a == b);
}
// Variable-size ArrayViews are the size of two pointers; fixed-size ArrayViews
// are the size of one pointer. (And as a special case, fixed-size ArrayViews
// of size 0 require no storage.)
static_assert(sizeof(ArrayView<int>) == 2 * sizeof(int*), "");
static_assert(sizeof(ArrayView<int, 17>) == sizeof(int*), "");
static_assert(std::is_empty<ArrayView<int, 0>>::value, "");
template <typename T>
inline ArrayView<T> MakeArrayView(T* data, size_t size) {
return ArrayView<T>(data, size);
}
// Only for primitive types that have the same size and aligment.
// Allow reinterpret cast of the array view to another primitive type of the
// same size.
// Template arguments order is (U, T, Size) to allow deduction of the template
// arguments in client calls: reinterpret_array_view<target_type>(array_view).
template <typename U, typename T, std::ptrdiff_t Size>
inline ArrayView<U, Size> reinterpret_array_view(ArrayView<T, Size> view) {
static_assert(sizeof(U) == sizeof(T) && alignof(U) == alignof(T),
"ArrayView reinterpret_cast is only supported for casting "
"between views that represent the same chunk of memory.");
static_assert(
std::is_fundamental<T>::value && std::is_fundamental<U>::value,
"ArrayView reinterpret_cast is only supported for casting between "
"fundamental types.");
return ArrayView<U, Size>(reinterpret_cast<U*>(view.data()), view.size());
}
} // namespace rtc
#endif // API_ARRAY_VIEW_H_

View File

@ -0,0 +1,164 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/audio_frame.h"
#include <string.h>
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
AudioFrame::AudioFrame() {
// Visual Studio doesn't like this in the class definition.
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
void swap(AudioFrame& a, AudioFrame& b) {
using std::swap;
swap(a.timestamp_, b.timestamp_);
swap(a.elapsed_time_ms_, b.elapsed_time_ms_);
swap(a.ntp_time_ms_, b.ntp_time_ms_);
swap(a.samples_per_channel_, b.samples_per_channel_);
swap(a.sample_rate_hz_, b.sample_rate_hz_);
swap(a.num_channels_, b.num_channels_);
swap(a.channel_layout_, b.channel_layout_);
swap(a.speech_type_, b.speech_type_);
swap(a.vad_activity_, b.vad_activity_);
swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_);
swap(a.packet_infos_, b.packet_infos_);
const size_t length_a = a.samples_per_channel_ * a.num_channels_;
const size_t length_b = b.samples_per_channel_ * b.num_channels_;
RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples);
RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples);
std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_);
swap(a.muted_, b.muted_);
swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
}
void AudioFrame::ResetWithoutMuting() {
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;
ntp_time_ms_ = -1;
samples_per_channel_ = 0;
sample_rate_hz_ = 0;
num_channels_ = 0;
channel_layout_ = CHANNEL_LAYOUT_NONE;
speech_type_ = kUndefined;
vad_activity_ = kVadUnknown;
profile_timestamp_ms_ = 0;
packet_infos_ = RtpPacketInfos();
absolute_capture_timestamp_ms_ = absl::nullopt;
}
void AudioFrame::UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
speech_type_ = speech_type;
vad_activity_ = vad_activity;
num_channels_ = num_channels;
channel_layout_ = GuessChannelLayout(num_channels);
if (channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED) {
RTC_DCHECK_EQ(num_channels, ChannelLayoutToChannelCount(channel_layout_));
}
const size_t length = samples_per_channel * num_channels;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
if (data != nullptr) {
memcpy(data_, data, sizeof(int16_t) * length);
muted_ = false;
} else {
muted_ = true;
}
}
void AudioFrame::CopyFrom(const AudioFrame& src) {
if (this == &src)
return;
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;
packet_infos_ = src.packet_infos_;
muted_ = src.muted();
samples_per_channel_ = src.samples_per_channel_;
sample_rate_hz_ = src.sample_rate_hz_;
speech_type_ = src.speech_type_;
vad_activity_ = src.vad_activity_;
num_channels_ = src.num_channels_;
channel_layout_ = src.channel_layout_;
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
const size_t length = samples_per_channel_ * num_channels_;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
if (!src.muted()) {
memcpy(data_, src.data(), sizeof(int16_t) * length);
muted_ = false;
}
}
void AudioFrame::UpdateProfileTimeStamp() {
profile_timestamp_ms_ = rtc::TimeMillis();
}
int64_t AudioFrame::ElapsedProfileTimeMs() const {
if (profile_timestamp_ms_ == 0) {
// Profiling has not been activated.
return -1;
}
return rtc::TimeSince(profile_timestamp_ms_);
}
const int16_t* AudioFrame::data() const {
return muted_ ? empty_data() : data_;
}
// TODO(henrik.lundin) Can we skip zeroing the buffer?
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
int16_t* AudioFrame::mutable_data() {
if (muted_) {
memset(data_, 0, kMaxDataSizeBytes);
muted_ = false;
}
return data_;
}
void AudioFrame::Mute() {
muted_ = true;
}
bool AudioFrame::muted() const {
return muted_;
}
// static
const int16_t* AudioFrame::empty_data() {
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
return &null_data[0];
}
} // namespace webrtc

View File

@ -0,0 +1,177 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_FRAME_H_
#define API_AUDIO_AUDIO_FRAME_H_
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
*
* Notes
* - This is a de-facto api, not designed for external use. The AudioFrame class
* is in need of overhaul or even replacement, and anyone depending on it
* should be prepared for that.
* - The total number of samples is samples_per_channel_ * num_channels_.
* - Stereo data is interleaved starting with the left channel.
*/
class AudioFrame {
public:
// Using constexpr here causes linker errors unless the variable also has an
// out-of-class definition, which is impractical in this header-only class.
// (This makes no sense because it compiles as an enum value, which we most
// certainly cannot take the address of, just fine.) C++17 introduces inline
// variables which should allow us to switch to constexpr and keep this a
// header-only class.
enum : size_t {
// Stereo, 32 kHz, 120 ms (2 * 32 * 120)
// Stereo, 192 kHz, 20 ms (2 * 192 * 20)
kMaxDataSizeSamples = 7680,
kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t),
};
enum VADActivity { kVadActive = 0, kVadPassive = 1, kVadUnknown = 2 };
enum SpeechType {
kNormalSpeech = 0,
kPLC = 1,
kCNG = 2,
kPLCCNG = 3,
kCodecPLC = 5,
kUndefined = 4
};
AudioFrame();
friend void swap(AudioFrame& a, AudioFrame& b);
// Resets all members to their default state.
void Reset();
// Same as Reset(), but leaves mute state unchanged. Muting a frame requires
// the buffer to be zeroed on the next call to mutable_data(). Callers
// intending to write to the buffer immediately after Reset() can instead use
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
void UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels = 1);
void CopyFrom(const AudioFrame& src);
// Sets a wall-time clock timestamp in milliseconds to be used for profiling
// of time between two points in the audio chain.
// Example:
// t0: UpdateProfileTimeStamp()
// t1: ElapsedProfileTimeMs() => t1 - t0 [msec]
void UpdateProfileTimeStamp();
// Returns the time difference between now and when UpdateProfileTimeStamp()
// was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been
// called.
int64_t ElapsedProfileTimeMs() const;
// data() returns a zeroed static buffer if the frame is muted.
// mutable_frame() always returns a non-static buffer; the first call to
// mutable_frame() zeros the non-static buffer and marks the frame unmuted.
const int16_t* data() const;
int16_t* mutable_data();
// Prefer to mute frames using AudioFrameOperations::Mute.
void Mute();
// Frame is muted by default.
bool muted() const;
size_t max_16bit_samples() const { return kMaxDataSizeSamples; }
size_t samples_per_channel() const { return samples_per_channel_; }
size_t num_channels() const { return num_channels_; }
ChannelLayout channel_layout() const { return channel_layout_; }
int sample_rate_hz() const { return sample_rate_hz_; }
void set_absolute_capture_timestamp_ms(
int64_t absolute_capture_time_stamp_ms) {
absolute_capture_timestamp_ms_ = absolute_capture_time_stamp_ms;
}
absl::optional<int64_t> absolute_capture_timestamp_ms() const {
return absolute_capture_timestamp_ms_;
}
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
// -1 represents an uninitialized value.
int64_t elapsed_time_ms_ = -1;
// NTP time of the estimated capture time in local timebase in milliseconds.
// -1 represents an uninitialized value.
int64_t ntp_time_ms_ = -1;
size_t samples_per_channel_ = 0;
int sample_rate_hz_ = 0;
size_t num_channels_ = 0;
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
SpeechType speech_type_ = kUndefined;
VADActivity vad_activity_ = kVadUnknown;
// Monotonically increasing timestamp intended for profiling of audio frames.
// Typically used for measuring elapsed time between two different points in
// the audio path. No lock is used to save resources and we are thread safe
// by design.
// TODO(nisse@webrtc.org): consider using absl::optional.
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
// TODO(bugs.webrtc.org/10757):
// Note that this information might not be fully accurate since we currently
// don't have a proper way to track it across the audio sync buffer. The
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
// |RtpPacketInfos| may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
private:
// A permanently zeroed out buffer to represent muted frames. This is a
// header-only class, so the only way to avoid creating a separate empty
// buffer per translation unit is to wrap a static in an inline function.
static const int16_t* empty_data();
int16_t data_[kMaxDataSizeSamples];
bool muted_ = true;
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in |packet_infos_|.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_FRAME_H_

View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/channel_layout.h"
#include <stddef.h>
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace webrtc {
static const int kLayoutToChannels[] = {
0, // CHANNEL_LAYOUT_NONE
0, // CHANNEL_LAYOUT_UNSUPPORTED
1, // CHANNEL_LAYOUT_MONO
2, // CHANNEL_LAYOUT_STEREO
3, // CHANNEL_LAYOUT_2_1
3, // CHANNEL_LAYOUT_SURROUND
4, // CHANNEL_LAYOUT_4_0
4, // CHANNEL_LAYOUT_2_2
4, // CHANNEL_LAYOUT_QUAD
5, // CHANNEL_LAYOUT_5_0
6, // CHANNEL_LAYOUT_5_1
5, // CHANNEL_LAYOUT_5_0_BACK
6, // CHANNEL_LAYOUT_5_1_BACK
7, // CHANNEL_LAYOUT_7_0
8, // CHANNEL_LAYOUT_7_1
8, // CHANNEL_LAYOUT_7_1_WIDE
2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
3, // CHANNEL_LAYOUT_2POINT1
4, // CHANNEL_LAYOUT_3_1
5, // CHANNEL_LAYOUT_4_1
6, // CHANNEL_LAYOUT_6_0
6, // CHANNEL_LAYOUT_6_0_FRONT
6, // CHANNEL_LAYOUT_HEXAGONAL
7, // CHANNEL_LAYOUT_6_1
7, // CHANNEL_LAYOUT_6_1_BACK
7, // CHANNEL_LAYOUT_6_1_FRONT
7, // CHANNEL_LAYOUT_7_0_FRONT
8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
8, // CHANNEL_LAYOUT_OCTAGONAL
0, // CHANNEL_LAYOUT_DISCRETE
3, // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
5, // CHANNEL_LAYOUT_4_1_QUAD_SIDE
0, // CHANNEL_LAYOUT_BITSTREAM
};
// The channel orderings for each layout as specified by FFmpeg. Each value
// represents the index of each channel in each layout. Values of -1 mean the
// channel at that index is not used for that layout. For example, the left side
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
// the order is L, R, C, LFE, LS, RS), so
// kChannelOrderings[CHANNEL_LAYOUT_5_1][SIDE_LEFT] = 4;
static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_NONE
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_UNSUPPORTED
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_MONO
{-1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_STEREO
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_2_1
{0, 1, -1, -1, -1, -1, -1, -1, 2, -1, -1},
// CHANNEL_LAYOUT_SURROUND
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_0
{0, 1, 2, -1, -1, -1, -1, -1, 3, -1, -1},
// CHANNEL_LAYOUT_2_2
{0, 1, -1, -1, -1, -1, -1, -1, -1, 2, 3},
// CHANNEL_LAYOUT_QUAD
{0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_5_0
{0, 1, 2, -1, -1, -1, -1, -1, -1, 3, 4},
// CHANNEL_LAYOUT_5_1
{0, 1, 2, 3, -1, -1, -1, -1, -1, 4, 5},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_5_0_BACK
{0, 1, 2, -1, 3, 4, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_5_1_BACK
{0, 1, 2, 3, 4, 5, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_7_0
{0, 1, 2, -1, 5, 6, -1, -1, -1, 3, 4},
// CHANNEL_LAYOUT_7_1
{0, 1, 2, 3, 6, 7, -1, -1, -1, 4, 5},
// CHANNEL_LAYOUT_7_1_WIDE
{0, 1, 2, 3, -1, -1, 6, 7, -1, 4, 5},
// CHANNEL_LAYOUT_STEREO_DOWNMIX
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_2POINT1
{0, 1, -1, 2, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_3_1
{0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_1
{0, 1, 2, 4, -1, -1, -1, -1, 3, -1, -1},
// CHANNEL_LAYOUT_6_0
{0, 1, 2, -1, -1, -1, -1, -1, 5, 3, 4},
// CHANNEL_LAYOUT_6_0_FRONT
{0, 1, -1, -1, -1, -1, 4, 5, -1, 2, 3},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_HEXAGONAL
{0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1},
// CHANNEL_LAYOUT_6_1
{0, 1, 2, 3, -1, -1, -1, -1, 6, 4, 5},
// CHANNEL_LAYOUT_6_1_BACK
{0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1},
// CHANNEL_LAYOUT_6_1_FRONT
{0, 1, -1, 6, -1, -1, 4, 5, -1, 2, 3},
// CHANNEL_LAYOUT_7_0_FRONT
{0, 1, 2, -1, -1, -1, 5, 6, -1, 3, 4},
// CHANNEL_LAYOUT_7_1_WIDE_BACK
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1},
// CHANNEL_LAYOUT_OCTAGONAL
{0, 1, 2, -1, 5, 6, -1, -1, 7, 3, 4},
// CHANNEL_LAYOUT_DISCRETE
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_1_QUAD_SIDE
{0, 1, -1, 4, -1, -1, -1, -1, -1, 2, 3},
// CHANNEL_LAYOUT_BITSTREAM
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
};
int ChannelLayoutToChannelCount(ChannelLayout layout) {
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
RTC_DCHECK_LE(kLayoutToChannels[layout], kMaxConcurrentChannels);
return kLayoutToChannels[layout];
}
// Converts a channel count into a channel layout.
ChannelLayout GuessChannelLayout(int channels) {
switch (channels) {
case 1:
return CHANNEL_LAYOUT_MONO;
case 2:
return CHANNEL_LAYOUT_STEREO;
case 3:
return CHANNEL_LAYOUT_SURROUND;
case 4:
return CHANNEL_LAYOUT_QUAD;
case 5:
return CHANNEL_LAYOUT_5_0;
case 6:
return CHANNEL_LAYOUT_5_1;
case 7:
return CHANNEL_LAYOUT_6_1;
case 8:
return CHANNEL_LAYOUT_7_1;
default:
RTC_DLOG(LS_WARNING) << "Unsupported channel count: " << channels;
}
return CHANNEL_LAYOUT_UNSUPPORTED;
}
int ChannelOrder(ChannelLayout layout, Channels channel) {
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
RTC_DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
return kChannelOrderings[layout][channel];
}
const char* ChannelLayoutToString(ChannelLayout layout) {
switch (layout) {
case CHANNEL_LAYOUT_NONE:
return "NONE";
case CHANNEL_LAYOUT_UNSUPPORTED:
return "UNSUPPORTED";
case CHANNEL_LAYOUT_MONO:
return "MONO";
case CHANNEL_LAYOUT_STEREO:
return "STEREO";
case CHANNEL_LAYOUT_2_1:
return "2.1";
case CHANNEL_LAYOUT_SURROUND:
return "SURROUND";
case CHANNEL_LAYOUT_4_0:
return "4.0";
case CHANNEL_LAYOUT_2_2:
return "QUAD_SIDE";
case CHANNEL_LAYOUT_QUAD:
return "QUAD";
case CHANNEL_LAYOUT_5_0:
return "5.0";
case CHANNEL_LAYOUT_5_1:
return "5.1";
case CHANNEL_LAYOUT_5_0_BACK:
return "5.0_BACK";
case CHANNEL_LAYOUT_5_1_BACK:
return "5.1_BACK";
case CHANNEL_LAYOUT_7_0:
return "7.0";
case CHANNEL_LAYOUT_7_1:
return "7.1";
case CHANNEL_LAYOUT_7_1_WIDE:
return "7.1_WIDE";
case CHANNEL_LAYOUT_STEREO_DOWNMIX:
return "STEREO_DOWNMIX";
case CHANNEL_LAYOUT_2POINT1:
return "2POINT1";
case CHANNEL_LAYOUT_3_1:
return "3.1";
case CHANNEL_LAYOUT_4_1:
return "4.1";
case CHANNEL_LAYOUT_6_0:
return "6.0";
case CHANNEL_LAYOUT_6_0_FRONT:
return "6.0_FRONT";
case CHANNEL_LAYOUT_HEXAGONAL:
return "HEXAGONAL";
case CHANNEL_LAYOUT_6_1:
return "6.1";
case CHANNEL_LAYOUT_6_1_BACK:
return "6.1_BACK";
case CHANNEL_LAYOUT_6_1_FRONT:
return "6.1_FRONT";
case CHANNEL_LAYOUT_7_0_FRONT:
return "7.0_FRONT";
case CHANNEL_LAYOUT_7_1_WIDE_BACK:
return "7.1_WIDE_BACK";
case CHANNEL_LAYOUT_OCTAGONAL:
return "OCTAGONAL";
case CHANNEL_LAYOUT_DISCRETE:
return "DISCRETE";
case CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC:
return "STEREO_AND_KEYBOARD_MIC";
case CHANNEL_LAYOUT_4_1_QUAD_SIDE:
return "4.1_QUAD_SIDE";
case CHANNEL_LAYOUT_BITSTREAM:
return "BITSTREAM";
}
RTC_NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
}
} // namespace webrtc

View File

@ -0,0 +1,165 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CHANNEL_LAYOUT_H_
#define API_AUDIO_CHANNEL_LAYOUT_H_
namespace webrtc {
// This file is derived from Chromium's base/channel_layout.h.
// Enumerates the various representations of the ordering of audio channels.
// Logged to UMA, so never reuse a value, always add new/greater ones!
enum ChannelLayout {
CHANNEL_LAYOUT_NONE = 0,
CHANNEL_LAYOUT_UNSUPPORTED = 1,
// Front C
CHANNEL_LAYOUT_MONO = 2,
// Front L, Front R
CHANNEL_LAYOUT_STEREO = 3,
// Front L, Front R, Back C
CHANNEL_LAYOUT_2_1 = 4,
// Front L, Front R, Front C
CHANNEL_LAYOUT_SURROUND = 5,
// Front L, Front R, Front C, Back C
CHANNEL_LAYOUT_4_0 = 6,
// Front L, Front R, Side L, Side R
CHANNEL_LAYOUT_2_2 = 7,
// Front L, Front R, Back L, Back R
CHANNEL_LAYOUT_QUAD = 8,
// Front L, Front R, Front C, Side L, Side R
CHANNEL_LAYOUT_5_0 = 9,
// Front L, Front R, Front C, LFE, Side L, Side R
CHANNEL_LAYOUT_5_1 = 10,
// Front L, Front R, Front C, Back L, Back R
CHANNEL_LAYOUT_5_0_BACK = 11,
// Front L, Front R, Front C, LFE, Back L, Back R
CHANNEL_LAYOUT_5_1_BACK = 12,
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_0 = 13,
// Front L, Front R, Front C, LFE, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_1 = 14,
// Front L, Front R, Front C, LFE, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE = 15,
// Stereo L, Stereo R
CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
// Stereo L, Stereo R, LFE
CHANNEL_LAYOUT_2POINT1 = 17,
// Stereo L, Stereo R, Front C, LFE
CHANNEL_LAYOUT_3_1 = 18,
// Stereo L, Stereo R, Front C, Rear C, LFE
CHANNEL_LAYOUT_4_1 = 19,
// Stereo L, Stereo R, Front C, Side L, Side R, Back C
CHANNEL_LAYOUT_6_0 = 20,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_6_0_FRONT = 21,
// Stereo L, Stereo R, Front C, Rear L, Rear R, Rear C
CHANNEL_LAYOUT_HEXAGONAL = 22,
// Stereo L, Stereo R, Front C, LFE, Side L, Side R, Rear Center
CHANNEL_LAYOUT_6_1 = 23,
// Stereo L, Stereo R, Front C, LFE, Back L, Back R, Rear Center
CHANNEL_LAYOUT_6_1_BACK = 24,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
CHANNEL_LAYOUT_6_1_FRONT = 25,
// Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_0_FRONT = 26,
// Front L, Front R, Front C, LFE, Back L, Back R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
// Front L, Front R, Front C, Side L, Side R, Rear L, Back R, Back C.
CHANNEL_LAYOUT_OCTAGONAL = 28,
// Channels are not explicitly mapped to speakers.
CHANNEL_LAYOUT_DISCRETE = 29,
// Front L, Front R, Front C. Front C contains the keyboard mic audio. This
// layout is only intended for input for WebRTC. The Front C channel
// is stripped away in the WebRTC audio input pipeline and never seen outside
// of that.
CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC = 30,
// Front L, Front R, Side L, Side R, LFE
CHANNEL_LAYOUT_4_1_QUAD_SIDE = 31,
// Actual channel layout is specified in the bitstream and the actual channel
// count is unknown at Chromium media pipeline level (useful for audio
// pass-through mode).
CHANNEL_LAYOUT_BITSTREAM = 32,
// Max value, must always equal the largest entry ever logged.
CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_BITSTREAM
};
// Note: Do not reorder or reassign these values; other code depends on their
// ordering to operate correctly. E.g., CoreAudio channel layout computations.
enum Channels {
LEFT = 0,
RIGHT,
CENTER,
LFE,
BACK_LEFT,
BACK_RIGHT,
LEFT_OF_CENTER,
RIGHT_OF_CENTER,
BACK_CENTER,
SIDE_LEFT,
SIDE_RIGHT,
CHANNELS_MAX =
SIDE_RIGHT, // Must always equal the largest value ever logged.
};
// The maximum number of concurrently active channels for all possible layouts.
// ChannelLayoutToChannelCount() will never return a value higher than this.
constexpr int kMaxConcurrentChannels = 8;
// Returns the expected channel position in an interleaved stream. Values of -1
// mean the channel at that index is not used for that layout. Values range
// from 0 to ChannelLayoutToChannelCount(layout) - 1.
int ChannelOrder(ChannelLayout layout, Channels channel);
// Returns the number of channels in a given ChannelLayout.
int ChannelLayoutToChannelCount(ChannelLayout layout);
// Given the number of channels, return the best layout,
// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
ChannelLayout GuessChannelLayout(int channels);
// Returns a string representation of the channel layout.
const char* ChannelLayoutToString(ChannelLayout layout);
} // namespace webrtc
#endif // API_AUDIO_CHANNEL_LAYOUT_H_

View File

@ -0,0 +1,270 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_config.h"
#include <algorithm>
#include <cmath>
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_minmax.h"
namespace webrtc {
namespace {
bool Limit(float* value, float min, float max) {
float clamped = rtc::SafeClamp(*value, min, max);
clamped = std::isfinite(clamped) ? clamped : min;
bool res = *value == clamped;
*value = clamped;
return res;
}
bool Limit(size_t* value, size_t min, size_t max) {
size_t clamped = rtc::SafeClamp(*value, min, max);
bool res = *value == clamped;
*value = clamped;
return res;
}
bool Limit(int* value, int min, int max) {
int clamped = rtc::SafeClamp(*value, min, max);
bool res = *value == clamped;
*value = clamped;
return res;
}
bool FloorLimit(size_t* value, size_t min) {
size_t clamped = *value >= min ? *value : min;
bool res = *value == clamped;
*value = clamped;
return res;
}
} // namespace
EchoCanceller3Config::EchoCanceller3Config() = default;
EchoCanceller3Config::EchoCanceller3Config(const EchoCanceller3Config& e) =
default;
EchoCanceller3Config& EchoCanceller3Config::operator=(
const EchoCanceller3Config& e) = default;
EchoCanceller3Config::Delay::Delay() = default;
EchoCanceller3Config::Delay::Delay(const EchoCanceller3Config::Delay& e) =
default;
EchoCanceller3Config::Delay& EchoCanceller3Config::Delay::operator=(
const Delay& e) = default;
EchoCanceller3Config::EchoModel::EchoModel() = default;
EchoCanceller3Config::EchoModel::EchoModel(
const EchoCanceller3Config::EchoModel& e) = default;
EchoCanceller3Config::EchoModel& EchoCanceller3Config::EchoModel::operator=(
const EchoModel& e) = default;
EchoCanceller3Config::Suppressor::Suppressor() = default;
EchoCanceller3Config::Suppressor::Suppressor(
const EchoCanceller3Config::Suppressor& e) = default;
EchoCanceller3Config::Suppressor& EchoCanceller3Config::Suppressor::operator=(
const Suppressor& e) = default;
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
float enr_transparent,
float enr_suppress,
float emr_transparent)
: enr_transparent(enr_transparent),
enr_suppress(enr_suppress),
emr_transparent(emr_transparent) {}
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
const EchoCanceller3Config::Suppressor::MaskingThresholds& e) = default;
EchoCanceller3Config::Suppressor::MaskingThresholds&
EchoCanceller3Config::Suppressor::MaskingThresholds::operator=(
const MaskingThresholds& e) = default;
EchoCanceller3Config::Suppressor::Tuning::Tuning(MaskingThresholds mask_lf,
MaskingThresholds mask_hf,
float max_inc_factor,
float max_dec_factor_lf)
: mask_lf(mask_lf),
mask_hf(mask_hf),
max_inc_factor(max_inc_factor),
max_dec_factor_lf(max_dec_factor_lf) {}
EchoCanceller3Config::Suppressor::Tuning::Tuning(
const EchoCanceller3Config::Suppressor::Tuning& e) = default;
EchoCanceller3Config::Suppressor::Tuning&
EchoCanceller3Config::Suppressor::Tuning::operator=(const Tuning& e) = default;
bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
RTC_DCHECK(config);
EchoCanceller3Config* c = config;
bool res = true;
if (c->delay.down_sampling_factor != 4 &&
c->delay.down_sampling_factor != 8) {
c->delay.down_sampling_factor = 4;
res = false;
}
res = res & Limit(&c->delay.default_delay, 0, 5000);
res = res & Limit(&c->delay.num_filters, 0, 5000);
res = res & Limit(&c->delay.delay_headroom_samples, 0, 5000);
res = res & Limit(&c->delay.hysteresis_limit_blocks, 0, 5000);
res = res & Limit(&c->delay.fixed_capture_delay_samples, 0, 5000);
res = res & Limit(&c->delay.delay_estimate_smoothing, 0.f, 1.f);
res = res & Limit(&c->delay.delay_candidate_detection_threshold, 0.f, 1.f);
res = res & Limit(&c->delay.delay_selection_thresholds.initial, 1, 250);
res = res & Limit(&c->delay.delay_selection_thresholds.converged, 1, 250);
res = res & FloorLimit(&c->filter.refined.length_blocks, 1);
res = res & Limit(&c->filter.refined.leakage_converged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.leakage_diverged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.error_floor, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.error_ceil, 0.f, 100000000.f);
res = res & Limit(&c->filter.refined.noise_gate, 0.f, 100000000.f);
res = res & FloorLimit(&c->filter.refined_initial.length_blocks, 1);
res = res & Limit(&c->filter.refined_initial.leakage_converged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.leakage_diverged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.error_floor, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.error_ceil, 0.f, 100000000.f);
res = res & Limit(&c->filter.refined_initial.noise_gate, 0.f, 100000000.f);
if (c->filter.refined.length_blocks <
c->filter.refined_initial.length_blocks) {
c->filter.refined_initial.length_blocks = c->filter.refined.length_blocks;
res = false;
}
res = res & FloorLimit(&c->filter.coarse.length_blocks, 1);
res = res & Limit(&c->filter.coarse.rate, 0.f, 1.f);
res = res & Limit(&c->filter.coarse.noise_gate, 0.f, 100000000.f);
res = res & FloorLimit(&c->filter.coarse_initial.length_blocks, 1);
res = res & Limit(&c->filter.coarse_initial.rate, 0.f, 1.f);
res = res & Limit(&c->filter.coarse_initial.noise_gate, 0.f, 100000000.f);
if (c->filter.coarse.length_blocks < c->filter.coarse_initial.length_blocks) {
c->filter.coarse_initial.length_blocks = c->filter.coarse.length_blocks;
res = false;
}
res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000);
res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f);
res = res & Limit(&c->erle.min, 1.f, 100000.f);
res = res & Limit(&c->erle.max_l, 1.f, 100000.f);
res = res & Limit(&c->erle.max_h, 1.f, 100000.f);
if (c->erle.min > c->erle.max_l || c->erle.min > c->erle.max_h) {
c->erle.min = std::min(c->erle.max_l, c->erle.max_h);
res = false;
}
res = res & Limit(&c->erle.num_sections, 1, c->filter.refined.length_blocks);
res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f);
res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
res =
res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
res = res &
Limit(&c->echo_audibility.normal_render_limit, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.floor_power, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_lf, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_mf, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_hf, 0.f,
32768.f * 32768.f);
res = res &
Limit(&c->render_levels.active_render_limit, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->render_levels.poor_excitation_render_limit, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->render_levels.poor_excitation_render_limit_ds8, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_model.noise_floor_hold, 0, 1000);
res = res & Limit(&c->echo_model.min_noise_floor_power, 0, 2000000.f);
res = res & Limit(&c->echo_model.stationary_gate_slope, 0, 1000000.f);
res = res & Limit(&c->echo_model.noise_gate_power, 0, 1000000.f);
res = res & Limit(&c->echo_model.noise_gate_slope, 0, 1000000.f);
res = res & Limit(&c->echo_model.render_pre_window_size, 0, 100);
res = res & Limit(&c->echo_model.render_post_window_size, 0, 100);
res = res & Limit(&c->comfort_noise.noise_floor_dbfs, -200.f, 0.f);
res = res & Limit(&c->suppressor.nearend_average_blocks, 1, 5000);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.enr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.enr_suppress, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.emr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.enr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.enr_suppress, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.emr_transparent, 0.f, 100.f);
res = res & Limit(&c->suppressor.normal_tuning.max_inc_factor, 0.f, 100.f);
res = res & Limit(&c->suppressor.normal_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.enr_transparent, 0.f,
100.f);
res = res &
Limit(&c->suppressor.nearend_tuning.mask_lf.enr_suppress, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.emr_transparent, 0.f,
100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.enr_transparent, 0.f,
100.f);
res = res &
Limit(&c->suppressor.nearend_tuning.mask_hf.enr_suppress, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.emr_transparent, 0.f,
100.f);
res = res & Limit(&c->suppressor.nearend_tuning.max_inc_factor, 0.f, 100.f);
res =
res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.hold_duration, 0,
10000);
res = res & Limit(&c->suppressor.dominant_nearend_detection.trigger_threshold,
0, 10000);
res = res &
Limit(&c->suppressor.subband_nearend_detection.nearend_average_blocks,
1, 1024);
res =
res & Limit(&c->suppressor.subband_nearend_detection.subband1.low, 0, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.subband1.high,
c->suppressor.subband_nearend_detection.subband1.low, 65);
res =
res & Limit(&c->suppressor.subband_nearend_detection.subband2.low, 0, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.subband2.high,
c->suppressor.subband_nearend_detection.subband2.low, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.nearend_threshold,
0.f, 1.e24f);
res = res & Limit(&c->suppressor.subband_nearend_detection.snr_threshold, 0.f,
1.e24f);
res = res & Limit(&c->suppressor.high_bands_suppression.enr_threshold, 0.f,
1000000.f);
res = res & Limit(&c->suppressor.high_bands_suppression.max_gain_during_echo,
0.f, 1.f);
res = res & Limit(&c->suppressor.high_bands_suppression
.anti_howling_activation_threshold,
0.f, 32768.f * 32768.f);
res = res & Limit(&c->suppressor.high_bands_suppression.anti_howling_gain,
0.f, 1.f);
res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f);
return res;
}
} // namespace webrtc

View File

@ -0,0 +1,228 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
#define API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
#include <stddef.h> // size_t
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Configuration struct for EchoCanceller3
struct RTC_EXPORT EchoCanceller3Config {
// Checks and updates the config parameters to lie within (mostly) reasonable
// ranges. Returns true if and only of the config did not need to be changed.
static bool Validate(EchoCanceller3Config* config);
EchoCanceller3Config();
EchoCanceller3Config(const EchoCanceller3Config& e);
EchoCanceller3Config& operator=(const EchoCanceller3Config& other);
struct Buffering {
size_t excess_render_detection_interval_blocks = 250;
size_t max_allowed_excess_render_blocks = 8;
} buffering;
struct Delay {
Delay();
Delay(const Delay& e);
Delay& operator=(const Delay& e);
size_t default_delay = 5;
size_t down_sampling_factor = 4;
size_t num_filters = 5;
size_t delay_headroom_samples = 32;
size_t hysteresis_limit_blocks = 1;
size_t fixed_capture_delay_samples = 0;
float delay_estimate_smoothing = 0.7f;
float delay_candidate_detection_threshold = 0.2f;
struct DelaySelectionThresholds {
int initial;
int converged;
} delay_selection_thresholds = {5, 20};
bool use_external_delay_estimator = false;
bool log_warning_on_delay_changes = false;
struct AlignmentMixing {
bool downmix;
bool adaptive_selection;
float activity_power_threshold;
bool prefer_first_two_channels;
};
AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true};
AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false};
} delay;
struct Filter {
struct RefinedConfiguration {
size_t length_blocks;
float leakage_converged;
float leakage_diverged;
float error_floor;
float error_ceil;
float noise_gate;
};
struct CoarseConfiguration {
size_t length_blocks;
float rate;
float noise_gate;
};
RefinedConfiguration refined = {13, 0.00005f, 0.05f,
0.001f, 2.f, 20075344.f};
CoarseConfiguration coarse = {13, 0.7f, 20075344.f};
RefinedConfiguration refined_initial = {12, 0.005f, 0.5f,
0.001f, 2.f, 20075344.f};
CoarseConfiguration coarse_initial = {12, 0.9f, 20075344.f};
size_t config_change_duration_blocks = 250;
float initial_state_seconds = 2.5f;
bool conservative_initial_phase = false;
bool enable_coarse_filter_output_usage = true;
bool use_linear_filter = true;
bool export_linear_aec_output = false;
} filter;
struct Erle {
float min = 1.f;
float max_l = 4.f;
float max_h = 1.5f;
bool onset_detection = true;
size_t num_sections = 1;
bool clamp_quality_estimate_to_zero = true;
bool clamp_quality_estimate_to_one = true;
} erle;
struct EpStrength {
float default_gain = 1.f;
float default_len = 0.83f;
bool echo_can_saturate = true;
bool bounded_erl = false;
} ep_strength;
struct EchoAudibility {
float low_render_limit = 4 * 64.f;
float normal_render_limit = 64.f;
float floor_power = 2 * 64.f;
float audibility_threshold_lf = 10;
float audibility_threshold_mf = 10;
float audibility_threshold_hf = 10;
bool use_stationarity_properties = false;
bool use_stationarity_properties_at_init = false;
} echo_audibility;
struct RenderLevels {
float active_render_limit = 100.f;
float poor_excitation_render_limit = 150.f;
float poor_excitation_render_limit_ds8 = 20.f;
float render_power_gain_db = 0.f;
} render_levels;
struct EchoRemovalControl {
bool has_clock_drift = false;
bool linear_and_stable_echo_path = false;
} echo_removal_control;
struct EchoModel {
EchoModel();
EchoModel(const EchoModel& e);
EchoModel& operator=(const EchoModel& e);
size_t noise_floor_hold = 50;
float min_noise_floor_power = 1638400.f;
float stationary_gate_slope = 10.f;
float noise_gate_power = 27509.42f;
float noise_gate_slope = 0.3f;
size_t render_pre_window_size = 1;
size_t render_post_window_size = 1;
bool model_reverb_in_nonlinear_mode = true;
} echo_model;
struct ComfortNoise {
float noise_floor_dbfs = -96.03406f;
} comfort_noise;
struct Suppressor {
Suppressor();
Suppressor(const Suppressor& e);
Suppressor& operator=(const Suppressor& e);
size_t nearend_average_blocks = 4;
struct MaskingThresholds {
MaskingThresholds(float enr_transparent,
float enr_suppress,
float emr_transparent);
MaskingThresholds(const MaskingThresholds& e);
MaskingThresholds& operator=(const MaskingThresholds& e);
float enr_transparent;
float enr_suppress;
float emr_transparent;
};
struct Tuning {
Tuning(MaskingThresholds mask_lf,
MaskingThresholds mask_hf,
float max_inc_factor,
float max_dec_factor_lf);
Tuning(const Tuning& e);
Tuning& operator=(const Tuning& e);
MaskingThresholds mask_lf;
MaskingThresholds mask_hf;
float max_inc_factor;
float max_dec_factor_lf;
};
Tuning normal_tuning = Tuning(MaskingThresholds(.3f, .4f, .3f),
MaskingThresholds(.07f, .1f, .3f),
2.0f,
0.25f);
Tuning nearend_tuning = Tuning(MaskingThresholds(1.09f, 1.1f, .3f),
MaskingThresholds(.1f, .3f, .3f),
2.0f,
0.25f);
struct DominantNearendDetection {
float enr_threshold = .25f;
float enr_exit_threshold = 10.f;
float snr_threshold = 30.f;
int hold_duration = 50;
int trigger_threshold = 12;
bool use_during_initial_phase = true;
} dominant_nearend_detection;
struct SubbandNearendDetection {
size_t nearend_average_blocks = 1;
struct SubbandRegion {
size_t low;
size_t high;
};
SubbandRegion subband1 = {1, 1};
SubbandRegion subband2 = {1, 1};
float nearend_threshold = 1.f;
float snr_threshold = 1.f;
} subband_nearend_detection;
bool use_subband_nearend_detection = false;
struct HighBandsSuppression {
float enr_threshold = 1.f;
float max_gain_during_echo = 1.f;
float anti_howling_activation_threshold = 400.f;
float anti_howling_gain = 1.f;
} high_bands_suppression;
float floor_first_increase = 0.00001f;
} suppressor;
};
} // namespace webrtc
#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_H_

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CONTROL_H_
#define API_AUDIO_ECHO_CONTROL_H_
#include <memory>
#include "rtc_base/checks.h"
namespace webrtc {
class AudioBuffer;
// Interface for an acoustic echo cancellation (AEC) submodule.
class EchoControl {
public:
// Analysis (not changing) of the render signal.
virtual void AnalyzeRender(AudioBuffer* render) = 0;
// Analysis (not changing) of the capture signal.
virtual void AnalyzeCapture(AudioBuffer* capture) = 0;
// Processes the capture signal in order to remove the echo.
virtual void ProcessCapture(AudioBuffer* capture, bool level_change) = 0;
// As above, but also returns the linear filter output.
virtual void ProcessCapture(AudioBuffer* capture,
AudioBuffer* linear_output,
bool level_change) = 0;
struct Metrics {
double echo_return_loss;
double echo_return_loss_enhancement;
int delay_ms;
};
// Collect current metrics from the echo controller.
virtual Metrics GetMetrics() const = 0;
// Provides an optional external estimate of the audio buffer delay.
virtual void SetAudioBufferDelay(int delay_ms) = 0;
// Returns wheter the signal is altered.
virtual bool ActiveProcessing() const = 0;
virtual ~EchoControl() {}
};
// Interface for a factory that creates EchoControllers.
class EchoControlFactory {
public:
virtual std::unique_ptr<EchoControl> Create(int sample_rate_hz,
int num_render_channels,
int num_capture_channels) = 0;
virtual ~EchoControlFactory() = default;
};
} // namespace webrtc
#endif // API_AUDIO_ECHO_CONTROL_H_

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_decoder.h"
#include <assert.h>
#include <memory>
#include <utility>
#include "api/array_view.h"
#include "rtc_base/checks.h"
#include "rtc_base/sanitizer.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
namespace {
class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
public:
OldStyleEncodedFrame(AudioDecoder* decoder, rtc::Buffer&& payload)
: decoder_(decoder), payload_(std::move(payload)) {}
size_t Duration() const override {
const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
return ret < 0 ? 0 : static_cast<size_t>(ret);
}
absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const override {
auto speech_type = AudioDecoder::kSpeech;
const int ret = decoder_->Decode(
payload_.data(), payload_.size(), decoder_->SampleRateHz(),
decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
return ret < 0 ? absl::nullopt
: absl::optional<DecodeResult>(
{static_cast<size_t>(ret), speech_type});
}
private:
AudioDecoder* const decoder_;
const rtc::Buffer payload_;
};
} // namespace
bool AudioDecoder::EncodedAudioFrame::IsDtxPacket() const {
return false;
}
AudioDecoder::ParseResult::ParseResult() = default;
AudioDecoder::ParseResult::ParseResult(ParseResult&& b) = default;
AudioDecoder::ParseResult::ParseResult(uint32_t timestamp,
int priority,
std::unique_ptr<EncodedAudioFrame> frame)
: timestamp(timestamp), priority(priority), frame(std::move(frame)) {
RTC_DCHECK_GE(priority, 0);
}
AudioDecoder::ParseResult::~ParseResult() = default;
AudioDecoder::ParseResult& AudioDecoder::ParseResult::operator=(
ParseResult&& b) = default;
std::vector<AudioDecoder::ParseResult> AudioDecoder::ParsePayload(
rtc::Buffer&& payload,
uint32_t timestamp) {
std::vector<ParseResult> results;
std::unique_ptr<EncodedAudioFrame> frame(
new OldStyleEncodedFrame(this, std::move(payload)));
results.emplace_back(timestamp, 0, std::move(frame));
return results;
}
int AudioDecoder::Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
TRACE_EVENT0("webrtc", "AudioDecoder::Decode");
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
int duration = PacketDuration(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
return -1;
}
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
int AudioDecoder::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
TRACE_EVENT0("webrtc", "AudioDecoder::DecodeRedundant");
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
int duration = PacketDurationRedundant(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
return -1;
}
return DecodeRedundantInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) {
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
bool AudioDecoder::HasDecodePlc() const {
return false;
}
size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
return 0;
}
// TODO(bugs.webrtc.org/9676): Remove default implementation.
void AudioDecoder::GeneratePlc(size_t /*requested_samples_per_channel*/,
rtc::BufferT<int16_t>* /*concealment_audio*/) {}
int AudioDecoder::ErrorCode() {
return 0;
}
int AudioDecoder::PacketDuration(const uint8_t* encoded,
size_t encoded_len) const {
return kNotImplemented;
}
int AudioDecoder::PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const {
return kNotImplemented;
}
bool AudioDecoder::PacketHasFec(const uint8_t* encoded,
size_t encoded_len) const {
return false;
}
AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) {
switch (type) {
case 0: // TODO(hlundin): Both iSAC and Opus return 0 for speech.
case 1:
return kSpeech;
case 2:
return kComfortNoise;
default:
assert(false);
return kSpeech;
}
}
} // namespace webrtc

View File

@ -0,0 +1,193 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
class AudioDecoder {
public:
enum SpeechType {
kSpeech = 1,
kComfortNoise = 2,
};
// Used by PacketDuration below. Save the value -1 for errors.
enum { kNotImplemented = -2 };
AudioDecoder() = default;
virtual ~AudioDecoder() = default;
class EncodedAudioFrame {
public:
struct DecodeResult {
size_t num_decoded_samples;
SpeechType speech_type;
};
virtual ~EncodedAudioFrame() = default;
// Returns the duration in samples-per-channel of this audio frame.
// If no duration can be ascertained, returns zero.
virtual size_t Duration() const = 0;
// Returns true if this packet contains DTX.
virtual bool IsDtxPacket() const;
// Decodes this frame of audio and writes the result in |decoded|.
// |decoded| must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an absl::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
// absl::optional. Decode may be called at most once per frame object.
virtual absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const = 0;
};
struct ParseResult {
ParseResult();
ParseResult(uint32_t timestamp,
int priority,
std::unique_ptr<EncodedAudioFrame> frame);
ParseResult(ParseResult&& b);
~ParseResult();
ParseResult& operator=(ParseResult&& b);
// The timestamp of the frame is in samples per channel.
uint32_t timestamp;
// The relative priority of the frame compared to other frames of the same
// payload and the same timeframe. A higher value means a lower priority.
// The highest priority is zero - negative values are not allowed.
int priority;
std::unique_ptr<EncodedAudioFrame> frame;
};
// Let the decoder parse this payload and prepare zero or more decodable
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
// ensure that the AudioDecoder object outlives any frame objects returned by
// this call. The decoder is free to swap or move the data from the |payload|
// buffer. |timestamp| is the input timestamp, in samples, corresponding to
// the start of the payload.
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp);
// TODO(bugs.webrtc.org/10098): The Decode and DecodeRedundant methods are
// obsolete; callers should call ParsePayload instead. For now, subclasses
// must still implement DecodeInternal.
// Decodes |encode_len| bytes from |encoded| and writes the result in
// |decoded|. The maximum bytes allowed to be written into |decoded| is
// |max_decoded_bytes|. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, |speech_type|
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
// sample rate is provided in |sample_rate_hz|, which must be valid for the
// codec at hand.
int Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type);
// Same as Decode(), but interfaces to the decoders redundant decode function.
// The default implementation simply calls the regular Decode() method.
int DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type);
// Indicates if the decoder implements the DecodePlc method.
virtual bool HasDecodePlc() const;
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets. The caller has to make sure that the
// memory allocated in |decoded| should accommodate |num_frames| frames.
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Asks the decoder to generate packet-loss concealment and append it to the
// end of |concealment_audio|. The concealment audio should be in
// channel-interleaved format, with as many channels as the last decoded
// packet produced. The implementation must produce at least
// requested_samples_per_channel, or nothing at all. This is a signal to the
// caller to conceal the loss with other means. If the implementation provides
// concealment samples, it is also responsible for "stitching" it together
// with the decoded audio on either side of the concealment.
// Note: The default implementation of GeneratePlc will be deleted soon. All
// implementations must provide their own, which can be a simple as a no-op.
// TODO(bugs.webrtc.org/9676): Remove default impementation.
virtual void GeneratePlc(size_t requested_samples_per_channel,
rtc::BufferT<int16_t>* concealment_audio);
// Resets the decoder state (empty buffers etc.).
virtual void Reset() = 0;
// Returns the last error code from the decoder.
virtual int ErrorCode();
// Returns the duration in samples-per-channel of the payload in |encoded|
// which is |encoded_len| bytes long. Returns kNotImplemented if no duration
// estimate is available, or -1 in case of an error.
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
// Returns the duration in samples-per-channel of the redandant payload in
// |encoded| which is |encoded_len| bytes long. Returns kNotImplemented if no
// duration estimate is available, or -1 in case of an error.
virtual int PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const;
// Detects whether a packet has forward error correction. The packet is
// comprised of the samples in |encoded| which is |encoded_len| bytes long.
// Returns true if the packet has FEC and false otherwise.
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
// Returns the actual sample rate of the decoder's output. This value may not
// change during the lifetime of the decoder.
virtual int SampleRateHz() const = 0;
// The number of channels in the decoder's output. This value may not change
// during the lifetime of the decoder.
virtual size_t Channels() const = 0;
protected:
static SpeechType ConvertSpeechType(int16_t type);
virtual int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) = 0;
virtual int DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type);
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_DECODER_H_

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_encoder.h"
#include "rtc_base/checks.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
ANAStats::ANAStats() = default;
ANAStats::~ANAStats() = default;
ANAStats::ANAStats(const ANAStats&) = default;
AudioEncoder::EncodedInfo::EncodedInfo() = default;
AudioEncoder::EncodedInfo::EncodedInfo(const EncodedInfo&) = default;
AudioEncoder::EncodedInfo::EncodedInfo(EncodedInfo&&) = default;
AudioEncoder::EncodedInfo::~EncodedInfo() = default;
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(
const EncodedInfo&) = default;
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(EncodedInfo&&) =
default;
int AudioEncoder::RtpTimestampRateHz() const {
return SampleRateHz();
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
TRACE_EVENT0("webrtc", "AudioEncoder::Encode");
RTC_CHECK_EQ(audio.size(),
static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
const size_t old_size = encoded->size();
EncodedInfo info = EncodeImpl(rtp_timestamp, audio, encoded);
RTC_CHECK_EQ(encoded->size() - old_size, info.encoded_bytes);
return info;
}
bool AudioEncoder::SetFec(bool enable) {
return !enable;
}
bool AudioEncoder::SetDtx(bool enable) {
return !enable;
}
bool AudioEncoder::GetDtx() const {
return false;
}
bool AudioEncoder::SetApplication(Application application) {
return false;
}
void AudioEncoder::SetMaxPlaybackRate(int frequency_hz) {}
void AudioEncoder::SetTargetBitrate(int target_bps) {}
rtc::ArrayView<std::unique_ptr<AudioEncoder>>
AudioEncoder::ReclaimContainedEncoders() {
return nullptr;
}
bool AudioEncoder::EnableAudioNetworkAdaptor(const std::string& config_string,
RtcEventLog* event_log) {
return false;
}
void AudioEncoder::DisableAudioNetworkAdaptor() {}
void AudioEncoder::OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction) {}
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction) {
RTC_NOTREACHED();
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, absl::nullopt);
}
void AudioEncoder::OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms) {}
void AudioEncoder::OnReceivedUplinkAllocation(BitrateAllocationUpdate update) {
OnReceivedUplinkBandwidth(update.target_bitrate.bps(),
update.bwe_period.ms());
}
void AudioEncoder::OnReceivedRtt(int rtt_ms) {}
void AudioEncoder::OnReceivedOverhead(size_t overhead_bytes_per_packet) {}
void AudioEncoder::SetReceiverFrameLengthRange(int min_frame_length_ms,
int max_frame_length_ms) {}
ANAStats AudioEncoder::GetANAStats() const {
return ANAStats();
}
} // namespace webrtc

View File

@ -0,0 +1,257 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
#include "rtc_base/deprecation.h"
namespace webrtc {
class RtcEventLog;
// Statistics related to Audio Network Adaptation.
struct ANAStats {
ANAStats();
ANAStats(const ANAStats&);
~ANAStats();
// Number of actions taken by the ANA bitrate controller since the start of
// the call. If this value is not set, it indicates that the bitrate
// controller is disabled.
absl::optional<uint32_t> bitrate_action_counter;
// Number of actions taken by the ANA channel controller since the start of
// the call. If this value is not set, it indicates that the channel
// controller is disabled.
absl::optional<uint32_t> channel_action_counter;
// Number of actions taken by the ANA DTX controller since the start of the
// call. If this value is not set, it indicates that the DTX controller is
// disabled.
absl::optional<uint32_t> dtx_action_counter;
// Number of actions taken by the ANA FEC controller since the start of the
// call. If this value is not set, it indicates that the FEC controller is
// disabled.
absl::optional<uint32_t> fec_action_counter;
// Number of times the ANA frame length controller decided to increase the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_increase_counter;
// Number of times the ANA frame length controller decided to decrease the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_decrease_counter;
// The uplink packet loss fractions as set by the ANA FEC controller. If this
// value is not set, it indicates that the ANA FEC controller is not active.
absl::optional<float> uplink_packet_loss_fraction;
};
// This is the interface class for encoders in AudioCoding module. Each codec
// type must have an implementation of this class.
class AudioEncoder {
public:
// Used for UMA logging of codec usage. The same codecs, with the
// same values, must be listed in
// src/tools/metrics/histograms/histograms.xml in chromium to log
// correct values.
enum class CodecType {
kOther = 0, // Codec not specified, and/or not listed in this enum
kOpus = 1,
kIsac = 2,
kPcmA = 3,
kPcmU = 4,
kG722 = 5,
kIlbc = 6,
// Number of histogram bins in the UMA logging of codec types. The
// total number of different codecs that are logged cannot exceed this
// number.
kMaxLoggedAudioCodecTypes
};
struct EncodedInfoLeaf {
size_t encoded_bytes = 0;
uint32_t encoded_timestamp = 0;
int payload_type = 0;
bool send_even_if_empty = false;
bool speech = true;
CodecType encoder_type = CodecType::kOther;
};
// This is the main struct for auxiliary encoding information. Each encoded
// packet should be accompanied by one EncodedInfo struct, containing the
// total number of |encoded_bytes|, the |encoded_timestamp| and the
// |payload_type|. If the packet contains redundant encodings, the |redundant|
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
// vector represents one encoding; the order of structs in the vector is the
// same as the order in which the actual payloads are written to the byte
// stream. When EncoderInfoLeaf structs are present in the vector, the main
// struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the
// vector.
struct EncodedInfo : public EncodedInfoLeaf {
EncodedInfo();
EncodedInfo(const EncodedInfo&);
EncodedInfo(EncodedInfo&&);
~EncodedInfo();
EncodedInfo& operator=(const EncodedInfo&);
EncodedInfo& operator=(EncodedInfo&&);
std::vector<EncodedInfoLeaf> redundant;
};
virtual ~AudioEncoder() = default;
// Returns the input sample rate in Hz and the number of input channels.
// These are constants set at instantiation time.
virtual int SampleRateHz() const = 0;
virtual size_t NumChannels() const = 0;
// Returns the rate at which the RTP timestamps are updated. The default
// implementation returns SampleRateHz().
virtual int RtpTimestampRateHz() const;
// Returns the number of 10 ms frames the encoder will put in the next
// packet. This value may only change when Encode() outputs a packet; i.e.,
// the encoder may vary the number of 10 ms frames from packet to packet, but
// it must decide the length of the next packet no later than when outputting
// the preceding packet.
virtual size_t Num10MsFramesInNextPacket() const = 0;
// Returns the maximum value that can be returned by
// Num10MsFramesInNextPacket().
virtual size_t Max10MsFramesInAPacket() const = 0;
// Returns the current target bitrate in bits/s. The value -1 means that the
// codec adapts the target automatically, and a current target cannot be
// provided.
virtual int GetTargetBitrate() const = 0;
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
// The encoder appends zero or more bytes of output to |encoded| and returns
// additional encoding information. Encode() checks some preconditions, calls
// EncodeImpl() which does the actual work, and then checks some
// postconditions.
EncodedInfo Encode(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded);
// Resets the encoder to its starting state, discarding any input that has
// been fed to the encoder but not yet emitted in a packet.
virtual void Reset() = 0;
// Enables or disables codec-internal FEC (forward error correction). Returns
// true if the codec was able to comply. The default implementation returns
// true when asked to disable FEC and false when asked to enable it (meaning
// that FEC isn't supported).
virtual bool SetFec(bool enable);
// Enables or disables codec-internal VAD/DTX. Returns true if the codec was
// able to comply. The default implementation returns true when asked to
// disable DTX and false when asked to enable it (meaning that DTX isn't
// supported).
virtual bool SetDtx(bool enable);
// Returns the status of codec-internal DTX. The default implementation always
// returns false.
virtual bool GetDtx() const;
// Sets the application mode. Returns true if the codec was able to comply.
// The default implementation just returns false.
enum class Application { kSpeech, kAudio };
virtual bool SetApplication(Application application);
// Tells the encoder about the highest sample rate the decoder is expected to
// use when decoding the bitstream. The encoder would typically use this
// information to adjust the quality of the encoding. The default
// implementation does nothing.
virtual void SetMaxPlaybackRate(int frequency_hz);
// This is to be deprecated. Please use |OnReceivedTargetAudioBitrate|
// instead.
// Tells the encoder what average bitrate we'd like it to produce. The
// encoder is free to adjust or disregard the given bitrate (the default
// implementation does the latter).
RTC_DEPRECATED virtual void SetTargetBitrate(int target_bps);
// Causes this encoder to let go of any other encoders it contains, and
// returns a pointer to an array where they are stored (which is required to
// live as long as this encoder). Unless the returned array is empty, you may
// not call any methods on this encoder afterwards, except for the
// destructor. The default implementation just returns an empty array.
// NOTE: This method is subject to change. Do not call or override it.
virtual rtc::ArrayView<std::unique_ptr<AudioEncoder>>
ReclaimContainedEncoders();
// Enables audio network adaptor. Returns true if successful.
virtual bool EnableAudioNetworkAdaptor(const std::string& config_string,
RtcEventLog* event_log);
// Disables audio network adaptor.
virtual void DisableAudioNetworkAdaptor();
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
// |uplink_packet_loss_fraction| is in the range [0.0, 1.0].
virtual void OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction);
RTC_DEPRECATED virtual void OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction);
// Provides target audio bitrate to this encoder to allow it to adapt.
virtual void OnReceivedTargetAudioBitrate(int target_bps);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkBandwidth(int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkAllocation(BitrateAllocationUpdate update);
// Provides RTT to this encoder to allow it to adapt.
virtual void OnReceivedRtt(int rtt_ms);
// Provides overhead to this encoder to adapt. The overhead is the number of
// bytes that will be added to each packet the encoder generates.
virtual void OnReceivedOverhead(size_t overhead_bytes_per_packet);
// To allow encoder to adapt its frame length, it must be provided the frame
// length range that receivers can accept.
virtual void SetReceiverFrameLengthRange(int min_frame_length_ms,
int max_frame_length_ms);
// Get statistics related to audio network adaptation.
virtual ANAStats GetANAStats() const;
// The range of frame lengths that are supported or nullopt if there's no sch
// information. This is used to calculated the full bitrate range, including
// overhead.
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode().
virtual EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_H_

View File

@ -0,0 +1,45 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_CALL_BITRATE_ALLOCATION_H_
#define API_CALL_BITRATE_ALLOCATION_H_
#include "api/units/data_rate.h"
#include "api/units/time_delta.h"
namespace webrtc {
// BitrateAllocationUpdate provides information to allocated streams about their
// bitrate allocation. It originates from the BitrateAllocater class and is
// propagated from there.
struct BitrateAllocationUpdate {
// The allocated target bitrate. Media streams should produce this amount of
// data. (Note that this may include packet overhead depending on
// configuration.)
DataRate target_bitrate = DataRate::Zero();
// The allocated part of the estimated link capacity. This is more stable than
// the target as it is based on the underlying link capacity estimate. This
// should be used to change encoder configuration when the cost of change is
// high.
DataRate stable_target_bitrate = DataRate::Zero();
// Predicted packet loss ratio.
double packet_loss_ratio = 0;
// Predicted round trip time.
TimeDelta round_trip_time = TimeDelta::PlusInfinity();
// |bwe_period| is deprecated, use |stable_target_bitrate| allocation instead.
TimeDelta bwe_period = TimeDelta::PlusInfinity();
// Congestion window pushback bitrate reduction fraction. Used in
// VideoStreamEncoder to reduce the bitrate by the given fraction
// by dropping frames.
double cwnd_reduce_ratio = 0;
};
} // namespace webrtc
#endif // API_CALL_BITRATE_ALLOCATION_H_

130
webrtc/api/function_view.h Normal file
View File

@ -0,0 +1,130 @@
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_FUNCTION_VIEW_H_
#define API_FUNCTION_VIEW_H_
#include <type_traits>
#include <utility>
#include "rtc_base/checks.h"
// Just like std::function, FunctionView will wrap any callable and hide its
// actual type, exposing only its signature. But unlike std::function,
// FunctionView doesn't own its callable---it just points to it. Thus, it's a
// good choice mainly as a function argument when the callable argument will
// not be called again once the function has returned.
//
// Its constructors are implicit, so that callers won't have to convert lambdas
// and other callables to FunctionView<Blah(Blah, Blah)> explicitly. This is
// safe because FunctionView is only a reference to the real callable.
//
// Example use:
//
// void SomeFunction(rtc::FunctionView<int(int)> index_transform);
// ...
// SomeFunction([](int i) { return 2 * i + 1; });
//
// Note: FunctionView is tiny (essentially just two pointers) and trivially
// copyable, so it's probably cheaper to pass it by value than by const
// reference.
namespace rtc {
template <typename T>
class FunctionView; // Undefined.
template <typename RetT, typename... ArgT>
class FunctionView<RetT(ArgT...)> final {
public:
// Constructor for lambdas and other callables; it accepts every type of
// argument except those noted in its enable_if call.
template <
typename F,
typename std::enable_if<
// Not for function pointers; we have another constructor for that
// below.
!std::is_function<typename std::remove_pointer<
typename std::remove_reference<F>::type>::type>::value &&
// Not for nullptr; we have another constructor for that below.
!std::is_same<std::nullptr_t,
typename std::remove_cv<F>::type>::value &&
// Not for FunctionView objects; we have another constructor for that
// (the implicitly declared copy constructor).
!std::is_same<FunctionView,
typename std::remove_cv<typename std::remove_reference<
F>::type>::type>::value>::type* = nullptr>
FunctionView(F&& f)
: call_(CallVoidPtr<typename std::remove_reference<F>::type>) {
f_.void_ptr = &f;
}
// Constructor that accepts function pointers. If the argument is null, the
// result is an empty FunctionView.
template <
typename F,
typename std::enable_if<std::is_function<typename std::remove_pointer<
typename std::remove_reference<F>::type>::type>::value>::type* =
nullptr>
FunctionView(F&& f)
: call_(f ? CallFunPtr<typename std::remove_pointer<F>::type> : nullptr) {
f_.fun_ptr = reinterpret_cast<void (*)()>(f);
}
// Constructor that accepts nullptr. It creates an empty FunctionView.
template <typename F,
typename std::enable_if<std::is_same<
std::nullptr_t,
typename std::remove_cv<F>::type>::value>::type* = nullptr>
FunctionView(F&& f) : call_(nullptr) {}
// Default constructor. Creates an empty FunctionView.
FunctionView() : call_(nullptr) {}
RetT operator()(ArgT... args) const {
RTC_DCHECK(call_);
return call_(f_, std::forward<ArgT>(args)...);
}
// Returns true if we have a function, false if we don't (i.e., we're null).
explicit operator bool() const { return !!call_; }
private:
union VoidUnion {
void* void_ptr;
void (*fun_ptr)();
};
template <typename F>
static RetT CallVoidPtr(VoidUnion vu, ArgT... args) {
return (*static_cast<F*>(vu.void_ptr))(std::forward<ArgT>(args)...);
}
template <typename F>
static RetT CallFunPtr(VoidUnion vu, ArgT... args) {
return (reinterpret_cast<typename std::add_pointer<F>::type>(vu.fun_ptr))(
std::forward<ArgT>(args)...);
}
// A pointer to the callable thing, with type information erased. It's a
// union because we have to use separate types depending on if the callable
// thing is a function pointer or something else.
VoidUnion f_;
// Pointer to a dispatch function that knows the type of the callable thing
// that's stored in f_, and how to call it. A FunctionView object is empty
// (null) iff call_ is null.
RetT (*call_)(VoidUnion, ArgT...);
};
} // namespace rtc
#endif // API_FUNCTION_VIEW_H_

46
webrtc/api/meson.build Normal file
View File

@ -0,0 +1,46 @@
api_sources = [
'audio/audio_frame.cc',
'audio/channel_layout.cc',
'audio/echo_canceller3_config.cc',
'audio_codecs/audio_decoder.cc',
'audio_codecs/audio_encoder.cc',
'rtp_headers.cc',
'rtp_packet_info.cc',
'task_queue/task_queue_base.cc',
'units/data_rate.cc',
'units/data_size.cc',
'units/frequency.cc',
'units/time_delta.cc',
'units/timestamp.cc',
'video/color_space.cc',
'video/hdr_metadata.cc',
'video/video_content_type.cc',
'video/video_timing.cc',
]
api_headers = [
['', 'array_view.h'],
['', 'scoped_refptr.h'],
['audio', 'echo_canceller3_config.h'],
['audio', 'echo_control.h'],
]
foreach h : api_headers
install_headers(
join_paths(h[0], h[1]),
subdir: join_paths('webrtc_audio_processing', 'api', h[0])
)
endforeach
libapi = static_library('libapi',
api_sources,
dependencies: common_deps,
include_directories: webrtc_inc,
cpp_args : common_cxxflags
)
api_dep = declare_dependency(
link_with: libapi
)

View File

@ -0,0 +1,43 @@
/*
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_REF_COUNTED_BASE_H_
#define API_REF_COUNTED_BASE_H_
#include "rtc_base/constructor_magic.h"
#include "rtc_base/ref_count.h"
#include "rtc_base/ref_counter.h"
namespace rtc {
class RefCountedBase {
public:
RefCountedBase() = default;
void AddRef() const { ref_count_.IncRef(); }
RefCountReleaseStatus Release() const {
const auto status = ref_count_.DecRef();
if (status == RefCountReleaseStatus::kDroppedLastRef) {
delete this;
}
return status;
}
protected:
virtual ~RefCountedBase() = default;
private:
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
};
} // namespace rtc
#endif // API_REF_COUNTED_BASE_H_

54
webrtc/api/rtp_headers.cc Normal file
View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/rtp_headers.h"
namespace webrtc {
RTPHeaderExtension::RTPHeaderExtension()
: hasTransmissionTimeOffset(false),
transmissionTimeOffset(0),
hasAbsoluteSendTime(false),
absoluteSendTime(0),
hasTransportSequenceNumber(false),
transportSequenceNumber(0),
hasAudioLevel(false),
voiceActivity(false),
audioLevel(0),
hasVideoRotation(false),
videoRotation(kVideoRotation_0),
hasVideoContentType(false),
videoContentType(VideoContentType::UNSPECIFIED),
has_video_timing(false) {}
RTPHeaderExtension::RTPHeaderExtension(const RTPHeaderExtension& other) =
default;
RTPHeaderExtension& RTPHeaderExtension::operator=(
const RTPHeaderExtension& other) = default;
RTPHeader::RTPHeader()
: markerBit(false),
payloadType(0),
sequenceNumber(0),
timestamp(0),
ssrc(0),
numCSRCs(0),
arrOfCSRCs(),
paddingLength(0),
headerLength(0),
payload_type_frequency(0),
extension() {}
RTPHeader::RTPHeader(const RTPHeader& other) = default;
RTPHeader& RTPHeader::operator=(const RTPHeader& other) = default;
} // namespace webrtc

190
webrtc/api/rtp_headers.h Normal file
View File

@ -0,0 +1,190 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_HEADERS_H_
#define API_RTP_HEADERS_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/units/timestamp.h"
#include "api/video/color_space.h"
#include "api/video/video_content_type.h"
#include "api/video/video_rotation.h"
#include "api/video/video_timing.h"
namespace webrtc {
struct FeedbackRequest {
// Determines whether the recv delta as specified in
// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
// should be included.
bool include_timestamps;
// Include feedback of received packets in the range [sequence_number -
// sequence_count + 1, sequence_number]. That is, no feedback will be sent if
// sequence_count is zero.
int sequence_count;
};
// The Absolute Capture Time extension is used to stamp RTP packets with a NTP
// timestamp showing when the first audio or video frame in a packet was
// originally captured. The intent of this extension is to provide a way to
// accomplish audio-to-video synchronization when RTCP-terminating intermediate
// systems (e.g. mixers) are involved. See:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
struct AbsoluteCaptureTime {
// Absolute capture timestamp is the NTP timestamp of when the first frame in
// a packet was originally captured. This timestamp MUST be based on the same
// clock as the clock used to generate NTP timestamps for RTCP sender reports
// on the capture system.
//
// Its not always possible to do an NTP clock readout at the exact moment of
// when a media frame is captured. A capture system MAY postpone the readout
// until a more convenient time. A capture system SHOULD have known delays
// (e.g. from hardware buffers) subtracted from the readout to make the final
// timestamp as close to the actual capture time as possible.
//
// This field is encoded as a 64-bit unsigned fixed-point number with the high
// 32 bits for the timestamp in seconds and low 32 bits for the fractional
// part. This is also known as the UQ32.32 format and is what the RTP
// specification defines as the canonical format to represent NTP timestamps.
uint64_t absolute_capture_timestamp;
// Estimated capture clock offset is the senders estimate of the offset
// between its own NTP clock and the capture systems NTP clock. The sender is
// here defined as the system that owns the NTP clock used to generate the NTP
// timestamps for the RTCP sender reports on this stream. The sender system is
// typically either the capture system or a mixer.
//
// This field is encoded as a 64-bit twos complement signed fixed-point
// number with the high 32 bits for the seconds and low 32 bits for the
// fractional part. Its intended to make it easy for a receiver, that knows
// how to estimate the sender systems NTP clock, to also estimate the capture
// systems NTP clock:
//
// Capture NTP Clock = Sender NTP Clock + Capture Clock Offset
absl::optional<int64_t> estimated_capture_clock_offset;
};
inline bool operator==(const AbsoluteCaptureTime& lhs,
const AbsoluteCaptureTime& rhs) {
return (lhs.absolute_capture_timestamp == rhs.absolute_capture_timestamp) &&
(lhs.estimated_capture_clock_offset ==
rhs.estimated_capture_clock_offset);
}
inline bool operator!=(const AbsoluteCaptureTime& lhs,
const AbsoluteCaptureTime& rhs) {
return !(lhs == rhs);
}
struct RTPHeaderExtension {
RTPHeaderExtension();
RTPHeaderExtension(const RTPHeaderExtension& other);
RTPHeaderExtension& operator=(const RTPHeaderExtension& other);
static constexpr int kAbsSendTimeFraction = 18;
Timestamp GetAbsoluteSendTimestamp() const {
RTC_DCHECK(hasAbsoluteSendTime);
RTC_DCHECK(absoluteSendTime < (1ul << 24));
return Timestamp::Micros((absoluteSendTime * 1000000ll) /
(1 << kAbsSendTimeFraction));
}
TimeDelta GetAbsoluteSendTimeDelta(uint32_t previous_sendtime) const {
RTC_DCHECK(hasAbsoluteSendTime);
RTC_DCHECK(absoluteSendTime < (1ul << 24));
RTC_DCHECK(previous_sendtime < (1ul << 24));
int32_t delta =
static_cast<int32_t>((absoluteSendTime - previous_sendtime) << 8) >> 8;
return TimeDelta::Micros((delta * 1000000ll) / (1 << kAbsSendTimeFraction));
}
bool hasTransmissionTimeOffset;
int32_t transmissionTimeOffset;
bool hasAbsoluteSendTime;
uint32_t absoluteSendTime;
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
bool hasTransportSequenceNumber;
uint16_t transportSequenceNumber;
absl::optional<FeedbackRequest> feedback_request;
// Audio Level includes both level in dBov and voiced/unvoiced bit. See:
// https://tools.ietf.org/html/rfc6464#section-3
bool hasAudioLevel;
bool voiceActivity;
uint8_t audioLevel;
// For Coordination of Video Orientation. See
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
// ts_126114v120700p.pdf
bool hasVideoRotation;
VideoRotation videoRotation;
// TODO(ilnik): Refactor this and one above to be absl::optional() and remove
// a corresponding bool flag.
bool hasVideoContentType;
VideoContentType videoContentType;
bool has_video_timing;
VideoSendTiming video_timing;
VideoPlayoutDelay playout_delay;
// For identification of a stream when ssrc is not signaled. See
// https://tools.ietf.org/html/draft-ietf-avtext-rid-09
// TODO(danilchap): Update url from draft to release version.
std::string stream_id;
std::string repaired_stream_id;
// For identifying the media section used to interpret this RTP packet. See
// https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
std::string mid;
absl::optional<ColorSpace> color_space;
};
enum { kRtpCsrcSize = 15 }; // RFC 3550 page 13
struct RTPHeader {
RTPHeader();
RTPHeader(const RTPHeader& other);
RTPHeader& operator=(const RTPHeader& other);
bool markerBit;
uint8_t payloadType;
uint16_t sequenceNumber;
uint32_t timestamp;
uint32_t ssrc;
uint8_t numCSRCs;
uint32_t arrOfCSRCs[kRtpCsrcSize];
size_t paddingLength;
size_t headerLength;
int payload_type_frequency;
RTPHeaderExtension extension;
};
// RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
// RTCP mode is described by RFC 5506.
enum class RtcpMode { kOff, kCompound, kReducedSize };
enum NetworkState {
kNetworkUp,
kNetworkDown,
};
} // namespace webrtc
#endif // API_RTP_HEADERS_H_

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/rtp_packet_info.h"
#include <algorithm>
#include <utility>
namespace webrtc {
RtpPacketInfo::RtpPacketInfo()
: ssrc_(0), rtp_timestamp_(0), receive_time_ms_(-1) {}
RtpPacketInfo::RtpPacketInfo(
uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
absl::optional<uint8_t> audio_level,
absl::optional<AbsoluteCaptureTime> absolute_capture_time,
int64_t receive_time_ms)
: ssrc_(ssrc),
csrcs_(std::move(csrcs)),
rtp_timestamp_(rtp_timestamp),
audio_level_(audio_level),
absolute_capture_time_(absolute_capture_time),
receive_time_ms_(receive_time_ms) {}
RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header,
int64_t receive_time_ms)
: ssrc_(rtp_header.ssrc),
rtp_timestamp_(rtp_header.timestamp),
receive_time_ms_(receive_time_ms) {
const auto& extension = rtp_header.extension;
const auto csrcs_count = std::min<size_t>(rtp_header.numCSRCs, kRtpCsrcSize);
csrcs_.assign(&rtp_header.arrOfCSRCs[0], &rtp_header.arrOfCSRCs[csrcs_count]);
if (extension.hasAudioLevel) {
audio_level_ = extension.audioLevel;
}
absolute_capture_time_ = extension.absolute_capture_time;
}
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) {
return (lhs.ssrc() == rhs.ssrc()) && (lhs.csrcs() == rhs.csrcs()) &&
(lhs.rtp_timestamp() == rhs.rtp_timestamp()) &&
(lhs.audio_level() == rhs.audio_level()) &&
(lhs.absolute_capture_time() == rhs.absolute_capture_time()) &&
(lhs.receive_time_ms() == rhs.receive_time_ms());
}
} // namespace webrtc

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_PACKET_INFO_H_
#define API_RTP_PACKET_INFO_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
//
// Structure to hold information about a received |RtpPacket|. It is primarily
// used to carry per-packet information from when a packet is received until
// the information is passed to |SourceTracker|.
//
class RTC_EXPORT RtpPacketInfo {
public:
RtpPacketInfo();
RtpPacketInfo(uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
absl::optional<uint8_t> audio_level,
absl::optional<AbsoluteCaptureTime> absolute_capture_time,
int64_t receive_time_ms);
RtpPacketInfo(const RTPHeader& rtp_header, int64_t receive_time_ms);
RtpPacketInfo(const RtpPacketInfo& other) = default;
RtpPacketInfo(RtpPacketInfo&& other) = default;
RtpPacketInfo& operator=(const RtpPacketInfo& other) = default;
RtpPacketInfo& operator=(RtpPacketInfo&& other) = default;
uint32_t ssrc() const { return ssrc_; }
void set_ssrc(uint32_t value) { ssrc_ = value; }
const std::vector<uint32_t>& csrcs() const { return csrcs_; }
void set_csrcs(std::vector<uint32_t> value) { csrcs_ = std::move(value); }
uint32_t rtp_timestamp() const { return rtp_timestamp_; }
void set_rtp_timestamp(uint32_t value) { rtp_timestamp_ = value; }
absl::optional<uint8_t> audio_level() const { return audio_level_; }
void set_audio_level(absl::optional<uint8_t> value) { audio_level_ = value; }
const absl::optional<AbsoluteCaptureTime>& absolute_capture_time() const {
return absolute_capture_time_;
}
void set_absolute_capture_time(
const absl::optional<AbsoluteCaptureTime>& value) {
absolute_capture_time_ = value;
}
int64_t receive_time_ms() const { return receive_time_ms_; }
void set_receive_time_ms(int64_t value) { receive_time_ms_ = value; }
private:
// Fields from the RTP header:
// https://tools.ietf.org/html/rfc3550#section-5.1
uint32_t ssrc_;
std::vector<uint32_t> csrcs_;
uint32_t rtp_timestamp_;
// Fields from the Audio Level header extension:
// https://tools.ietf.org/html/rfc6464#section-3
absl::optional<uint8_t> audio_level_;
// Fields from the Absolute Capture Time header extension:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
absl::optional<AbsoluteCaptureTime> absolute_capture_time_;
// Local |webrtc::Clock|-based timestamp of when the packet was received.
int64_t receive_time_ms_;
};
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs);
inline bool operator!=(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) {
return !(lhs == rhs);
}
} // namespace webrtc
#endif // API_RTP_PACKET_INFO_H_

View File

@ -0,0 +1,130 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_PACKET_INFOS_H_
#define API_RTP_PACKET_INFOS_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "api/ref_counted_base.h"
#include "api/rtp_packet_info.h"
#include "api/scoped_refptr.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Semi-immutable structure to hold information about packets used to assemble
// an audio or video frame. Uses internal reference counting to make it very
// cheap to copy.
//
// We should ideally just use |std::vector<RtpPacketInfo>| and have it
// |std::move()|-ed as the per-packet information is transferred from one object
// to another. But moving the info, instead of copying it, is not easily done
// for the current video code.
class RTC_EXPORT RtpPacketInfos {
public:
using vector_type = std::vector<RtpPacketInfo>;
using value_type = vector_type::value_type;
using size_type = vector_type::size_type;
using difference_type = vector_type::difference_type;
using const_reference = vector_type::const_reference;
using const_pointer = vector_type::const_pointer;
using const_iterator = vector_type::const_iterator;
using const_reverse_iterator = vector_type::const_reverse_iterator;
using reference = const_reference;
using pointer = const_pointer;
using iterator = const_iterator;
using reverse_iterator = const_reverse_iterator;
RtpPacketInfos() {}
explicit RtpPacketInfos(const vector_type& entries)
: data_(Data::Create(entries)) {}
explicit RtpPacketInfos(vector_type&& entries)
: data_(Data::Create(std::move(entries))) {}
RtpPacketInfos(const RtpPacketInfos& other) = default;
RtpPacketInfos(RtpPacketInfos&& other) = default;
RtpPacketInfos& operator=(const RtpPacketInfos& other) = default;
RtpPacketInfos& operator=(RtpPacketInfos&& other) = default;
const_reference operator[](size_type pos) const { return entries()[pos]; }
const_reference at(size_type pos) const { return entries().at(pos); }
const_reference front() const { return entries().front(); }
const_reference back() const { return entries().back(); }
const_iterator begin() const { return entries().begin(); }
const_iterator end() const { return entries().end(); }
const_reverse_iterator rbegin() const { return entries().rbegin(); }
const_reverse_iterator rend() const { return entries().rend(); }
const_iterator cbegin() const { return entries().cbegin(); }
const_iterator cend() const { return entries().cend(); }
const_reverse_iterator crbegin() const { return entries().crbegin(); }
const_reverse_iterator crend() const { return entries().crend(); }
bool empty() const { return entries().empty(); }
size_type size() const { return entries().size(); }
private:
class Data : public rtc::RefCountedBase {
public:
static rtc::scoped_refptr<Data> Create(const vector_type& entries) {
// Performance optimization for the empty case.
if (entries.empty()) {
return nullptr;
}
return new Data(entries);
}
static rtc::scoped_refptr<Data> Create(vector_type&& entries) {
// Performance optimization for the empty case.
if (entries.empty()) {
return nullptr;
}
return new Data(std::move(entries));
}
const vector_type& entries() const { return entries_; }
private:
explicit Data(const vector_type& entries) : entries_(entries) {}
explicit Data(vector_type&& entries) : entries_(std::move(entries)) {}
~Data() override {}
const vector_type entries_;
};
static const vector_type& empty_entries() {
static const vector_type& value = *new vector_type();
return value;
}
const vector_type& entries() const {
if (data_ != nullptr) {
return data_->entries();
} else {
return empty_entries();
}
}
rtc::scoped_refptr<Data> data_;
};
} // namespace webrtc
#endif // API_RTP_PACKET_INFOS_H_

164
webrtc/api/scoped_refptr.h Normal file
View File

@ -0,0 +1,164 @@
/*
* Copyright 2011 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Originally these classes are from Chromium.
// http://src.chromium.org/viewvc/chrome/trunk/src/base/memory/ref_counted.h?view=markup
//
// A smart pointer class for reference counted objects. Use this class instead
// of calling AddRef and Release manually on a reference counted object to
// avoid common memory leaks caused by forgetting to Release an object
// reference. Sample usage:
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
// };
//
// void some_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// foo->Method(param);
// // |foo| is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// ...
// foo = nullptr; // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
// }
//
// The above examples show how scoped_refptr<T> acts like a pointer to T.
// Given two scoped_refptr<T> classes, it is also possible to exchange
// references between the two objects, like so:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
// // now, |b| references the MyFoo object, and |a| references null.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
// object, simply use the assignment operator:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> b;
//
// b = a;
// // now, |a| and |b| each own a reference to the same MyFoo object.
// }
//
#ifndef API_SCOPED_REFPTR_H_
#define API_SCOPED_REFPTR_H_
#include <memory>
#include <utility>
namespace rtc {
template <class T>
class scoped_refptr {
public:
typedef T element_type;
scoped_refptr() : ptr_(nullptr) {}
scoped_refptr(T* p) : ptr_(p) { // NOLINT(runtime/explicit)
if (ptr_)
ptr_->AddRef();
}
scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
if (ptr_)
ptr_->AddRef();
}
template <typename U>
scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
if (ptr_)
ptr_->AddRef();
}
// Move constructors.
scoped_refptr(scoped_refptr<T>&& r) noexcept : ptr_(r.release()) {}
template <typename U>
scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.release()) {}
~scoped_refptr() {
if (ptr_)
ptr_->Release();
}
T* get() const { return ptr_; }
operator T*() const { return ptr_; }
T* operator->() const { return ptr_; }
// Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a
// null pointer, all without touching the reference count of the underlying
// pointed-to object. The object is still reference counted, and the caller of
// release() is now the proud owner of one reference, so it is responsible for
// calling Release() once on the object when no longer using it.
T* release() {
T* retVal = ptr_;
ptr_ = nullptr;
return retVal;
}
scoped_refptr<T>& operator=(T* p) {
// AddRef first so that self assignment should work
if (p)
p->AddRef();
if (ptr_)
ptr_->Release();
ptr_ = p;
return *this;
}
scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
return *this = r.ptr_;
}
template <typename U>
scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
return *this = r.get();
}
scoped_refptr<T>& operator=(scoped_refptr<T>&& r) noexcept {
scoped_refptr<T>(std::move(r)).swap(*this);
return *this;
}
template <typename U>
scoped_refptr<T>& operator=(scoped_refptr<U>&& r) noexcept {
scoped_refptr<T>(std::move(r)).swap(*this);
return *this;
}
void swap(T** pp) noexcept {
T* p = ptr_;
ptr_ = *pp;
*pp = p;
}
void swap(scoped_refptr<T>& r) noexcept { swap(&r.ptr_); }
protected:
T* ptr_;
};
} // namespace rtc
#endif // API_SCOPED_REFPTR_H_

View File

@ -0,0 +1,32 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_TASK_QUEUE_QUEUED_TASK_H_
#define API_TASK_QUEUE_QUEUED_TASK_H_
namespace webrtc {
// Base interface for asynchronously executed tasks.
// The interface basically consists of a single function, Run(), that executes
// on the target queue. For more details see the Run() method and TaskQueue.
class QueuedTask {
public:
virtual ~QueuedTask() = default;
// Main routine that will run when the task is executed on the desired queue.
// The task should return |true| to indicate that it should be deleted or
// |false| to indicate that the queue should consider ownership of the task
// having been transferred. Returning |false| can be useful if a task has
// re-posted itself to a different queue or is otherwise being re-used.
virtual bool Run() = 0;
};
} // namespace webrtc
#endif // API_TASK_QUEUE_QUEUED_TASK_H_

View File

@ -0,0 +1,79 @@
/*
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/task_queue/task_queue_base.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "rtc_base/checks.h"
#if defined(ABSL_HAVE_THREAD_LOCAL)
namespace webrtc {
namespace {
ABSL_CONST_INIT thread_local TaskQueueBase* current = nullptr;
} // namespace
TaskQueueBase* TaskQueueBase::Current() {
return current;
}
TaskQueueBase::CurrentTaskQueueSetter::CurrentTaskQueueSetter(
TaskQueueBase* task_queue)
: previous_(current) {
current = task_queue;
}
TaskQueueBase::CurrentTaskQueueSetter::~CurrentTaskQueueSetter() {
current = previous_;
}
} // namespace webrtc
#elif defined(WEBRTC_POSIX)
#include <pthread.h>
namespace webrtc {
namespace {
ABSL_CONST_INIT pthread_key_t g_queue_ptr_tls = 0;
void InitializeTls() {
RTC_CHECK(pthread_key_create(&g_queue_ptr_tls, nullptr) == 0);
}
pthread_key_t GetQueuePtrTls() {
static pthread_once_t init_once = PTHREAD_ONCE_INIT;
RTC_CHECK(pthread_once(&init_once, &InitializeTls) == 0);
return g_queue_ptr_tls;
}
} // namespace
TaskQueueBase* TaskQueueBase::Current() {
return static_cast<TaskQueueBase*>(pthread_getspecific(GetQueuePtrTls()));
}
TaskQueueBase::CurrentTaskQueueSetter::CurrentTaskQueueSetter(
TaskQueueBase* task_queue)
: previous_(TaskQueueBase::Current()) {
pthread_setspecific(GetQueuePtrTls(), task_queue);
}
TaskQueueBase::CurrentTaskQueueSetter::~CurrentTaskQueueSetter() {
pthread_setspecific(GetQueuePtrTls(), previous_);
}
} // namespace webrtc
#else
#error Unsupported platform
#endif

View File

@ -0,0 +1,83 @@
/*
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_TASK_QUEUE_TASK_QUEUE_BASE_H_
#define API_TASK_QUEUE_TASK_QUEUE_BASE_H_
#include <memory>
#include "api/task_queue/queued_task.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
// Asynchronously executes tasks in a way that guarantees that they're executed
// in FIFO order and that tasks never overlap. Tasks may always execute on the
// same worker thread and they may not. To DCHECK that tasks are executing on a
// known task queue, use IsCurrent().
class RTC_LOCKABLE RTC_EXPORT TaskQueueBase {
public:
// Starts destruction of the task queue.
// On return ensures no task are running and no new tasks are able to start
// on the task queue.
// Responsible for deallocation. Deallocation may happen syncrhoniously during
// Delete or asynchronously after Delete returns.
// Code not running on the TaskQueue should not make any assumption when
// TaskQueue is deallocated and thus should not call any methods after Delete.
// Code running on the TaskQueue should not call Delete, but can assume
// TaskQueue still exists and may call other methods, e.g. PostTask.
virtual void Delete() = 0;
// Schedules a task to execute. Tasks are executed in FIFO order.
// If |task->Run()| returns true, task is deleted on the task queue
// before next QueuedTask starts executing.
// When a TaskQueue is deleted, pending tasks will not be executed but they
// will be deleted. The deletion of tasks may happen synchronously on the
// TaskQueue or it may happen asynchronously after TaskQueue is deleted.
// This may vary from one implementation to the next so assumptions about
// lifetimes of pending tasks should not be made.
virtual void PostTask(std::unique_ptr<QueuedTask> task) = 0;
// Schedules a task to execute a specified number of milliseconds from when
// the call is made. The precision should be considered as "best effort"
// and in some cases, such as on Windows when all high precision timers have
// been used up, can be off by as much as 15 millseconds.
virtual void PostDelayedTask(std::unique_ptr<QueuedTask> task,
uint32_t milliseconds) = 0;
// Returns the task queue that is running the current thread.
// Returns nullptr if this thread is not associated with any task queue.
static TaskQueueBase* Current();
bool IsCurrent() const { return Current() == this; }
protected:
class CurrentTaskQueueSetter {
public:
explicit CurrentTaskQueueSetter(TaskQueueBase* task_queue);
CurrentTaskQueueSetter(const CurrentTaskQueueSetter&) = delete;
CurrentTaskQueueSetter& operator=(const CurrentTaskQueueSetter&) = delete;
~CurrentTaskQueueSetter();
private:
TaskQueueBase* const previous_;
};
// Users of the TaskQueue should call Delete instead of directly deleting
// this object.
virtual ~TaskQueueBase() = default;
};
struct TaskQueueDeleter {
void operator()(TaskQueueBase* task_queue) const { task_queue->Delete(); }
};
} // namespace webrtc
#endif // API_TASK_QUEUE_TASK_QUEUE_BASE_H_

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/units/data_rate.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
std::string ToString(DataRate value) {
char buf[64];
rtc::SimpleStringBuilder sb(buf);
if (value.IsPlusInfinity()) {
sb << "+inf bps";
} else if (value.IsMinusInfinity()) {
sb << "-inf bps";
} else {
if (value.bps() == 0 || value.bps() % 1000 != 0) {
sb << value.bps() << " bps";
} else {
sb << value.kbps() << " kbps";
}
}
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,155 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_UNITS_DATA_RATE_H_
#define API_UNITS_DATA_RATE_H_
#ifdef UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#include <limits>
#include <string>
#include <type_traits>
#include "api/units/data_size.h"
#include "api/units/frequency.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/units/unit_base.h"
namespace webrtc {
// DataRate is a class that represents a given data rate. This can be used to
// represent bandwidth, encoding bitrate, etc. The internal storage is bits per
// second (bps).
class DataRate final : public rtc_units_impl::RelativeUnit<DataRate> {
public:
template <typename T>
static constexpr DataRate BitsPerSec(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromValue(value);
}
template <typename T>
static constexpr DataRate BytesPerSec(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(8, value);
}
template <typename T>
static constexpr DataRate KilobitsPerSec(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1000, value);
}
static constexpr DataRate Infinity() { return PlusInfinity(); }
DataRate() = delete;
template <typename T = int64_t>
constexpr T bps() const {
return ToValue<T>();
}
template <typename T = int64_t>
constexpr T bytes_per_sec() const {
return ToFraction<8, T>();
}
template <typename T = int64_t>
constexpr T kbps() const {
return ToFraction<1000, T>();
}
constexpr int64_t bps_or(int64_t fallback_value) const {
return ToValueOr(fallback_value);
}
constexpr int64_t kbps_or(int64_t fallback_value) const {
return ToFractionOr<1000>(fallback_value);
}
private:
// Bits per second used internally to simplify debugging by making the value
// more recognizable.
friend class rtc_units_impl::UnitBase<DataRate>;
using RelativeUnit::RelativeUnit;
static constexpr bool one_sided = true;
};
namespace data_rate_impl {
inline constexpr int64_t Microbits(const DataSize& size) {
constexpr int64_t kMaxBeforeConversion =
std::numeric_limits<int64_t>::max() / 8000000;
RTC_DCHECK_LE(size.bytes(), kMaxBeforeConversion)
<< "size is too large to be expressed in microbits";
return size.bytes() * 8000000;
}
inline constexpr int64_t MillibytePerSec(const DataRate& size) {
constexpr int64_t kMaxBeforeConversion =
std::numeric_limits<int64_t>::max() / (1000 / 8);
RTC_DCHECK_LE(size.bps(), kMaxBeforeConversion)
<< "rate is too large to be expressed in microbytes per second";
return size.bps() * (1000 / 8);
}
} // namespace data_rate_impl
inline constexpr DataRate operator/(const DataSize size,
const TimeDelta duration) {
return DataRate::BitsPerSec(data_rate_impl::Microbits(size) / duration.us());
}
inline constexpr TimeDelta operator/(const DataSize size, const DataRate rate) {
return TimeDelta::Micros(data_rate_impl::Microbits(size) / rate.bps());
}
inline constexpr DataSize operator*(const DataRate rate,
const TimeDelta duration) {
int64_t microbits = rate.bps() * duration.us();
return DataSize::Bytes((microbits + 4000000) / 8000000);
}
inline constexpr DataSize operator*(const TimeDelta duration,
const DataRate rate) {
return rate * duration;
}
inline constexpr DataSize operator/(const DataRate rate,
const Frequency frequency) {
int64_t millihertz = frequency.millihertz<int64_t>();
// Note that the value is truncated here reather than rounded, potentially
// introducing an error of .5 bytes if rounding were expected.
return DataSize::Bytes(data_rate_impl::MillibytePerSec(rate) / millihertz);
}
inline constexpr Frequency operator/(const DataRate rate, const DataSize size) {
return Frequency::MilliHertz(data_rate_impl::MillibytePerSec(rate) /
size.bytes());
}
inline constexpr DataRate operator*(const DataSize size,
const Frequency frequency) {
RTC_DCHECK(frequency.IsZero() ||
size.bytes() <= std::numeric_limits<int64_t>::max() / 8 /
frequency.millihertz<int64_t>());
int64_t millibits_per_second =
size.bytes() * 8 * frequency.millihertz<int64_t>();
return DataRate::BitsPerSec((millibits_per_second + 500) / 1000);
}
inline constexpr DataRate operator*(const Frequency frequency,
const DataSize size) {
return size * frequency;
}
std::string ToString(DataRate value);
inline std::string ToLogString(DataRate value) {
return ToString(value);
}
#ifdef UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataRate value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_DATA_RATE_H_

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/units/data_size.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
std::string ToString(DataSize value) {
char buf[64];
rtc::SimpleStringBuilder sb(buf);
if (value.IsPlusInfinity()) {
sb << "+inf bytes";
} else if (value.IsMinusInfinity()) {
sb << "-inf bytes";
} else {
sb << value.bytes() << " bytes";
}
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_UNITS_DATA_SIZE_H_
#define API_UNITS_DATA_SIZE_H_
#ifdef UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#include <string>
#include <type_traits>
#include "rtc_base/units/unit_base.h"
namespace webrtc {
// DataSize is a class represeting a count of bytes.
class DataSize final : public rtc_units_impl::RelativeUnit<DataSize> {
public:
template <typename T>
static constexpr DataSize Bytes(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromValue(value);
}
static constexpr DataSize Infinity() { return PlusInfinity(); }
DataSize() = delete;
template <typename T = int64_t>
constexpr T bytes() const {
return ToValue<T>();
}
constexpr int64_t bytes_or(int64_t fallback_value) const {
return ToValueOr(fallback_value);
}
private:
friend class rtc_units_impl::UnitBase<DataSize>;
using RelativeUnit::RelativeUnit;
static constexpr bool one_sided = true;
};
std::string ToString(DataSize value);
inline std::string ToLogString(DataSize value) {
return ToString(value);
}
#ifdef UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
DataSize value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_DATA_SIZE_H_

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/units/frequency.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
std::string ToString(Frequency value) {
char buf[64];
rtc::SimpleStringBuilder sb(buf);
if (value.IsPlusInfinity()) {
sb << "+inf Hz";
} else if (value.IsMinusInfinity()) {
sb << "-inf Hz";
} else if (value.millihertz<int64_t>() % 1000 != 0) {
sb.AppendFormat("%.3f Hz", value.hertz<double>());
} else {
sb << value.hertz<int64_t>() << " Hz";
}
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_UNITS_FREQUENCY_H_
#define API_UNITS_FREQUENCY_H_
#ifdef UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#include <cstdlib>
#include <limits>
#include <string>
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/units/unit_base.h"
namespace webrtc {
class Frequency final : public rtc_units_impl::RelativeUnit<Frequency> {
public:
template <typename T>
static constexpr Frequency MilliHertz(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromValue(value);
}
template <typename T>
static constexpr Frequency Hertz(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000, value);
}
template <typename T>
static constexpr Frequency KiloHertz(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000'000, value);
}
Frequency() = delete;
template <typename T = int64_t>
constexpr T hertz() const {
return ToFraction<1000, T>();
}
template <typename T = int64_t>
constexpr T millihertz() const {
return ToValue<T>();
}
private:
friend class rtc_units_impl::UnitBase<Frequency>;
using RelativeUnit::RelativeUnit;
static constexpr bool one_sided = true;
};
inline constexpr Frequency operator/(int64_t nominator,
const TimeDelta& interval) {
constexpr int64_t kKiloPerMicro = 1000 * 1000000;
RTC_DCHECK_LE(nominator, std::numeric_limits<int64_t>::max() / kKiloPerMicro);
RTC_CHECK(interval.IsFinite());
RTC_CHECK(!interval.IsZero());
return Frequency::MilliHertz(nominator * kKiloPerMicro / interval.us());
}
inline constexpr TimeDelta operator/(int64_t nominator,
const Frequency& frequency) {
constexpr int64_t kMegaPerMilli = 1000000 * 1000;
RTC_DCHECK_LE(nominator, std::numeric_limits<int64_t>::max() / kMegaPerMilli);
RTC_CHECK(frequency.IsFinite());
RTC_CHECK(!frequency.IsZero());
return TimeDelta::Micros(nominator * kMegaPerMilli / frequency.millihertz());
}
inline constexpr double operator*(Frequency frequency, TimeDelta time_delta) {
return frequency.hertz<double>() * time_delta.seconds<double>();
}
inline constexpr double operator*(TimeDelta time_delta, Frequency frequency) {
return frequency * time_delta;
}
std::string ToString(Frequency value);
inline std::string ToLogString(Frequency value) {
return ToString(value);
}
#ifdef UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Frequency value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_FREQUENCY_H_

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/units/time_delta.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
std::string ToString(TimeDelta value) {
char buf[64];
rtc::SimpleStringBuilder sb(buf);
if (value.IsPlusInfinity()) {
sb << "+inf ms";
} else if (value.IsMinusInfinity()) {
sb << "-inf ms";
} else {
if (value.us() == 0 || (value.us() % 1000) != 0)
sb << value.us() << " us";
else if (value.ms() % 1000 != 0)
sb << value.ms() << " ms";
else
sb << value.seconds() << " s";
}
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_UNITS_TIME_DELTA_H_
#define API_UNITS_TIME_DELTA_H_
#ifdef UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#include <cstdlib>
#include <string>
#include <type_traits>
#include "rtc_base/units/unit_base.h"
namespace webrtc {
// TimeDelta represents the difference between two timestamps. Commonly this can
// be a duration. However since two Timestamps are not guaranteed to have the
// same epoch (they might come from different computers, making exact
// synchronisation infeasible), the duration covered by a TimeDelta can be
// undefined. To simplify usage, it can be constructed and converted to
// different units, specifically seconds (s), milliseconds (ms) and
// microseconds (us).
class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
public:
template <typename T>
static constexpr TimeDelta Seconds(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000'000, value);
}
template <typename T>
static constexpr TimeDelta Millis(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000, value);
}
template <typename T>
static constexpr TimeDelta Micros(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromValue(value);
}
TimeDelta() = delete;
template <typename T = int64_t>
constexpr T seconds() const {
return ToFraction<1000000, T>();
}
template <typename T = int64_t>
constexpr T ms() const {
return ToFraction<1000, T>();
}
template <typename T = int64_t>
constexpr T us() const {
return ToValue<T>();
}
template <typename T = int64_t>
constexpr T ns() const {
return ToMultiple<1000, T>();
}
constexpr int64_t seconds_or(int64_t fallback_value) const {
return ToFractionOr<1000000>(fallback_value);
}
constexpr int64_t ms_or(int64_t fallback_value) const {
return ToFractionOr<1000>(fallback_value);
}
constexpr int64_t us_or(int64_t fallback_value) const {
return ToValueOr(fallback_value);
}
constexpr TimeDelta Abs() const {
return us() < 0 ? TimeDelta::Micros(-us()) : *this;
}
private:
friend class rtc_units_impl::UnitBase<TimeDelta>;
using RelativeUnit::RelativeUnit;
static constexpr bool one_sided = false;
};
std::string ToString(TimeDelta value);
inline std::string ToLogString(TimeDelta value) {
return ToString(value);
}
#ifdef UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
TimeDelta value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_TIME_DELTA_H_

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/units/timestamp.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
std::string ToString(Timestamp value) {
char buf[64];
rtc::SimpleStringBuilder sb(buf);
if (value.IsPlusInfinity()) {
sb << "+inf ms";
} else if (value.IsMinusInfinity()) {
sb << "-inf ms";
} else {
if (value.us() == 0 || (value.us() % 1000) != 0)
sb << value.us() << " us";
else if (value.ms() % 1000 != 0)
sb << value.ms() << " ms";
else
sb << value.seconds() << " s";
}
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_UNITS_TIMESTAMP_H_
#define API_UNITS_TIMESTAMP_H_
#ifdef UNIT_TEST
#include <ostream> // no-presubmit-check TODO(webrtc:8982)
#endif // UNIT_TEST
#include <string>
#include <type_traits>
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
namespace webrtc {
// Timestamp represents the time that has passed since some unspecified epoch.
// The epoch is assumed to be before any represented timestamps, this means that
// negative values are not valid. The most notable feature is that the
// difference of two Timestamps results in a TimeDelta.
class Timestamp final : public rtc_units_impl::UnitBase<Timestamp> {
public:
template <typename T>
static constexpr Timestamp Seconds(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000'000, value);
}
template <typename T>
static constexpr Timestamp Millis(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromFraction(1'000, value);
}
template <typename T>
static constexpr Timestamp Micros(T value) {
static_assert(std::is_arithmetic<T>::value, "");
return FromValue(value);
}
Timestamp() = delete;
template <typename T = int64_t>
constexpr T seconds() const {
return ToFraction<1000000, T>();
}
template <typename T = int64_t>
constexpr T ms() const {
return ToFraction<1000, T>();
}
template <typename T = int64_t>
constexpr T us() const {
return ToValue<T>();
}
constexpr int64_t seconds_or(int64_t fallback_value) const {
return ToFractionOr<1000000>(fallback_value);
}
constexpr int64_t ms_or(int64_t fallback_value) const {
return ToFractionOr<1000>(fallback_value);
}
constexpr int64_t us_or(int64_t fallback_value) const {
return ToValueOr(fallback_value);
}
constexpr Timestamp operator+(const TimeDelta delta) const {
if (IsPlusInfinity() || delta.IsPlusInfinity()) {
RTC_DCHECK(!IsMinusInfinity());
RTC_DCHECK(!delta.IsMinusInfinity());
return PlusInfinity();
} else if (IsMinusInfinity() || delta.IsMinusInfinity()) {
RTC_DCHECK(!IsPlusInfinity());
RTC_DCHECK(!delta.IsPlusInfinity());
return MinusInfinity();
}
return Timestamp::Micros(us() + delta.us());
}
constexpr Timestamp operator-(const TimeDelta delta) const {
if (IsPlusInfinity() || delta.IsMinusInfinity()) {
RTC_DCHECK(!IsMinusInfinity());
RTC_DCHECK(!delta.IsPlusInfinity());
return PlusInfinity();
} else if (IsMinusInfinity() || delta.IsPlusInfinity()) {
RTC_DCHECK(!IsPlusInfinity());
RTC_DCHECK(!delta.IsMinusInfinity());
return MinusInfinity();
}
return Timestamp::Micros(us() - delta.us());
}
constexpr TimeDelta operator-(const Timestamp other) const {
if (IsPlusInfinity() || other.IsMinusInfinity()) {
RTC_DCHECK(!IsMinusInfinity());
RTC_DCHECK(!other.IsPlusInfinity());
return TimeDelta::PlusInfinity();
} else if (IsMinusInfinity() || other.IsPlusInfinity()) {
RTC_DCHECK(!IsPlusInfinity());
RTC_DCHECK(!other.IsMinusInfinity());
return TimeDelta::MinusInfinity();
}
return TimeDelta::Micros(us() - other.us());
}
constexpr Timestamp& operator-=(const TimeDelta delta) {
*this = *this - delta;
return *this;
}
constexpr Timestamp& operator+=(const TimeDelta delta) {
*this = *this + delta;
return *this;
}
private:
friend class rtc_units_impl::UnitBase<Timestamp>;
using UnitBase::UnitBase;
static constexpr bool one_sided = true;
};
std::string ToString(Timestamp value);
inline std::string ToLogString(Timestamp value) {
return ToString(value);
}
#ifdef UNIT_TEST
inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
std::ostream& stream, // no-presubmit-check TODO(webrtc:8982)
Timestamp value) {
return stream << ToString(value);
}
#endif // UNIT_TEST
} // namespace webrtc
#endif // API_UNITS_TIMESTAMP_H_

View File

@ -0,0 +1,187 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/color_space.h"
namespace webrtc {
namespace {
// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created
// by the funciton below. Returns true if conversion was successful, false
// otherwise.
template <typename T>
bool SetFromUint8(uint8_t enum_value, uint64_t enum_bitmask, T* out) {
if ((enum_value < 64) && ((enum_bitmask >> enum_value) & 1)) {
*out = static_cast<T>(enum_value);
return true;
}
return false;
}
// This function serves as an assert for the constexpr function below. It's on
// purpose not declared as constexpr so that it causes a build problem if enum
// values of 64 or above are used. The bitmask and the code generating it would
// have to be extended if the standard is updated to include enum values >= 64.
int EnumMustBeLessThan64() {
return -1;
}
template <typename T, size_t N>
constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
return length > 1
? (MakeMask(index, 1, values) +
MakeMask(index + 1, length - 1, values))
: (static_cast<uint8_t>(values[index]) < 64
? (uint64_t{1} << static_cast<uint8_t>(values[index]))
: EnumMustBeLessThan64());
}
// Create a bitmask where each bit corresponds to one potential enum value.
// |values| should be an array listing all possible enum values. The bit is set
// to one if the corresponding enum exists. Only works for enums with values
// less than 64.
template <typename T, size_t N>
constexpr uint64_t CreateEnumBitmask(T (&values)[N]) {
return MakeMask(0, N, values);
}
bool SetChromaSitingFromUint8(uint8_t enum_value,
ColorSpace::ChromaSiting* chroma_siting) {
constexpr ColorSpace::ChromaSiting kChromaSitings[] = {
ColorSpace::ChromaSiting::kUnspecified,
ColorSpace::ChromaSiting::kCollocated, ColorSpace::ChromaSiting::kHalf};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kChromaSitings);
return SetFromUint8(enum_value, enum_bitmask, chroma_siting);
}
} // namespace
ColorSpace::ColorSpace() = default;
ColorSpace::ColorSpace(const ColorSpace& other) = default;
ColorSpace::ColorSpace(ColorSpace&& other) = default;
ColorSpace& ColorSpace::operator=(const ColorSpace& other) = default;
ColorSpace::ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range)
: ColorSpace(primaries,
transfer,
matrix,
range,
ChromaSiting::kUnspecified,
ChromaSiting::kUnspecified,
nullptr) {}
ColorSpace::ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range,
ChromaSiting chroma_siting_horz,
ChromaSiting chroma_siting_vert,
const HdrMetadata* hdr_metadata)
: primaries_(primaries),
transfer_(transfer),
matrix_(matrix),
range_(range),
chroma_siting_horizontal_(chroma_siting_horz),
chroma_siting_vertical_(chroma_siting_vert),
hdr_metadata_(hdr_metadata ? absl::make_optional(*hdr_metadata)
: absl::nullopt) {}
ColorSpace::PrimaryID ColorSpace::primaries() const {
return primaries_;
}
ColorSpace::TransferID ColorSpace::transfer() const {
return transfer_;
}
ColorSpace::MatrixID ColorSpace::matrix() const {
return matrix_;
}
ColorSpace::RangeID ColorSpace::range() const {
return range_;
}
ColorSpace::ChromaSiting ColorSpace::chroma_siting_horizontal() const {
return chroma_siting_horizontal_;
}
ColorSpace::ChromaSiting ColorSpace::chroma_siting_vertical() const {
return chroma_siting_vertical_;
}
const HdrMetadata* ColorSpace::hdr_metadata() const {
return hdr_metadata_ ? &*hdr_metadata_ : nullptr;
}
bool ColorSpace::set_primaries_from_uint8(uint8_t enum_value) {
constexpr PrimaryID kPrimaryIds[] = {
PrimaryID::kBT709, PrimaryID::kUnspecified, PrimaryID::kBT470M,
PrimaryID::kBT470BG, PrimaryID::kSMPTE170M, PrimaryID::kSMPTE240M,
PrimaryID::kFILM, PrimaryID::kBT2020, PrimaryID::kSMPTEST428,
PrimaryID::kSMPTEST431, PrimaryID::kSMPTEST432, PrimaryID::kJEDECP22};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kPrimaryIds);
return SetFromUint8(enum_value, enum_bitmask, &primaries_);
}
bool ColorSpace::set_transfer_from_uint8(uint8_t enum_value) {
constexpr TransferID kTransferIds[] = {
TransferID::kBT709, TransferID::kUnspecified,
TransferID::kGAMMA22, TransferID::kGAMMA28,
TransferID::kSMPTE170M, TransferID::kSMPTE240M,
TransferID::kLINEAR, TransferID::kLOG,
TransferID::kLOG_SQRT, TransferID::kIEC61966_2_4,
TransferID::kBT1361_ECG, TransferID::kIEC61966_2_1,
TransferID::kBT2020_10, TransferID::kBT2020_12,
TransferID::kSMPTEST2084, TransferID::kSMPTEST428,
TransferID::kARIB_STD_B67};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kTransferIds);
return SetFromUint8(enum_value, enum_bitmask, &transfer_);
}
bool ColorSpace::set_matrix_from_uint8(uint8_t enum_value) {
constexpr MatrixID kMatrixIds[] = {
MatrixID::kRGB, MatrixID::kBT709, MatrixID::kUnspecified,
MatrixID::kFCC, MatrixID::kBT470BG, MatrixID::kSMPTE170M,
MatrixID::kSMPTE240M, MatrixID::kYCOCG, MatrixID::kBT2020_NCL,
MatrixID::kBT2020_CL, MatrixID::kSMPTE2085, MatrixID::kCDNCLS,
MatrixID::kCDCLS, MatrixID::kBT2100_ICTCP};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kMatrixIds);
return SetFromUint8(enum_value, enum_bitmask, &matrix_);
}
bool ColorSpace::set_range_from_uint8(uint8_t enum_value) {
constexpr RangeID kRangeIds[] = {RangeID::kInvalid, RangeID::kLimited,
RangeID::kFull, RangeID::kDerived};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kRangeIds);
return SetFromUint8(enum_value, enum_bitmask, &range_);
}
bool ColorSpace::set_chroma_siting_horizontal_from_uint8(uint8_t enum_value) {
return SetChromaSitingFromUint8(enum_value, &chroma_siting_horizontal_);
}
bool ColorSpace::set_chroma_siting_vertical_from_uint8(uint8_t enum_value) {
return SetChromaSitingFromUint8(enum_value, &chroma_siting_vertical_);
}
void ColorSpace::set_hdr_metadata(const HdrMetadata* hdr_metadata) {
hdr_metadata_ =
hdr_metadata ? absl::make_optional(*hdr_metadata) : absl::nullopt;
}
} // namespace webrtc

View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_COLOR_SPACE_H_
#define API_VIDEO_COLOR_SPACE_H_
#include <stdint.h>
#include "absl/types/optional.h"
#include "api/video/hdr_metadata.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// This class represents color information as specified in T-REC H.273,
// available from https://www.itu.int/rec/T-REC-H.273.
//
// WebRTC's supported codecs:
// - VP9 supports color profiles, see VP9 Bitstream & Decoding Process
// Specification Version 0.6 Section 7.2.2 "Color config semantics" available
// from https://www.webmproject.org.
// - VP8 only supports BT.601, see
// https://tools.ietf.org/html/rfc6386#section-9.2
// - H264 uses the exact same representation as T-REC H.273. See T-REC-H.264
// E.2.1, "VUI parameters semantics", available from
// https://www.itu.int/rec/T-REC-H.264.
class RTC_EXPORT ColorSpace {
public:
enum class PrimaryID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 2.
kBT709 = 1,
kUnspecified = 2,
kBT470M = 4,
kBT470BG = 5,
kSMPTE170M = 6, // Identical to BT601
kSMPTE240M = 7,
kFILM = 8,
kBT2020 = 9,
kSMPTEST428 = 10,
kSMPTEST431 = 11,
kSMPTEST432 = 12,
kJEDECP22 = 22, // Identical to EBU3213-E
// When adding/removing entries here, please make sure to do the
// corresponding change to kPrimaryIds.
};
enum class TransferID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 3.
kBT709 = 1,
kUnspecified = 2,
kGAMMA22 = 4,
kGAMMA28 = 5,
kSMPTE170M = 6,
kSMPTE240M = 7,
kLINEAR = 8,
kLOG = 9,
kLOG_SQRT = 10,
kIEC61966_2_4 = 11,
kBT1361_ECG = 12,
kIEC61966_2_1 = 13,
kBT2020_10 = 14,
kBT2020_12 = 15,
kSMPTEST2084 = 16,
kSMPTEST428 = 17,
kARIB_STD_B67 = 18,
// When adding/removing entries here, please make sure to do the
// corresponding change to kTransferIds.
};
enum class MatrixID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 4.
kRGB = 0,
kBT709 = 1,
kUnspecified = 2,
kFCC = 4,
kBT470BG = 5,
kSMPTE170M = 6,
kSMPTE240M = 7,
kYCOCG = 8,
kBT2020_NCL = 9,
kBT2020_CL = 10,
kSMPTE2085 = 11,
kCDNCLS = 12,
kCDCLS = 13,
kBT2100_ICTCP = 14,
// When adding/removing entries here, please make sure to do the
// corresponding change to kMatrixIds.
};
enum class RangeID {
// The indices are equal to the values specified at
// https://www.webmproject.org/docs/container/#colour for the element Range.
kInvalid = 0,
// Limited Rec. 709 color range with RGB values ranging from 16 to 235.
kLimited = 1,
// Full RGB color range with RGB valees from 0 to 255.
kFull = 2,
// Range is defined by MatrixCoefficients/TransferCharacteristics.
kDerived = 3,
// When adding/removing entries here, please make sure to do the
// corresponding change to kRangeIds.
};
enum class ChromaSiting {
// Chroma siting specifies how chroma is subsampled relative to the luma
// samples in a YUV video frame.
// The indices are equal to the values specified at
// https://www.webmproject.org/docs/container/#colour for the element
// ChromaSitingVert and ChromaSitingHorz.
kUnspecified = 0,
kCollocated = 1,
kHalf = 2,
// When adding/removing entries here, please make sure to do the
// corresponding change to kChromaSitings.
};
ColorSpace();
ColorSpace(const ColorSpace& other);
ColorSpace(ColorSpace&& other);
ColorSpace& operator=(const ColorSpace& other);
ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range);
ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range,
ChromaSiting chroma_siting_horizontal,
ChromaSiting chroma_siting_vertical,
const HdrMetadata* hdr_metadata);
friend bool operator==(const ColorSpace& lhs, const ColorSpace& rhs) {
return lhs.primaries_ == rhs.primaries_ && lhs.transfer_ == rhs.transfer_ &&
lhs.matrix_ == rhs.matrix_ && lhs.range_ == rhs.range_ &&
lhs.chroma_siting_horizontal_ == rhs.chroma_siting_horizontal_ &&
lhs.chroma_siting_vertical_ == rhs.chroma_siting_vertical_ &&
lhs.hdr_metadata_ == rhs.hdr_metadata_;
}
friend bool operator!=(const ColorSpace& lhs, const ColorSpace& rhs) {
return !(lhs == rhs);
}
PrimaryID primaries() const;
TransferID transfer() const;
MatrixID matrix() const;
RangeID range() const;
ChromaSiting chroma_siting_horizontal() const;
ChromaSiting chroma_siting_vertical() const;
const HdrMetadata* hdr_metadata() const;
bool set_primaries_from_uint8(uint8_t enum_value);
bool set_transfer_from_uint8(uint8_t enum_value);
bool set_matrix_from_uint8(uint8_t enum_value);
bool set_range_from_uint8(uint8_t enum_value);
bool set_chroma_siting_horizontal_from_uint8(uint8_t enum_value);
bool set_chroma_siting_vertical_from_uint8(uint8_t enum_value);
void set_hdr_metadata(const HdrMetadata* hdr_metadata);
private:
PrimaryID primaries_ = PrimaryID::kUnspecified;
TransferID transfer_ = TransferID::kUnspecified;
MatrixID matrix_ = MatrixID::kUnspecified;
RangeID range_ = RangeID::kInvalid;
ChromaSiting chroma_siting_horizontal_ = ChromaSiting::kUnspecified;
ChromaSiting chroma_siting_vertical_ = ChromaSiting::kUnspecified;
absl::optional<HdrMetadata> hdr_metadata_;
};
} // namespace webrtc
#endif // API_VIDEO_COLOR_SPACE_H_

View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/hdr_metadata.h"
namespace webrtc {
HdrMasteringMetadata::Chromaticity::Chromaticity() = default;
HdrMasteringMetadata::HdrMasteringMetadata() = default;
HdrMetadata::HdrMetadata() = default;
} // namespace webrtc

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_HDR_METADATA_H_
#define API_VIDEO_HDR_METADATA_H_
namespace webrtc {
// SMPTE ST 2086 mastering metadata,
// see https://ieeexplore.ieee.org/document/8353899.
struct HdrMasteringMetadata {
struct Chromaticity {
Chromaticity();
bool operator==(const Chromaticity& rhs) const {
return x == rhs.x && y == rhs.y;
}
bool Validate() const {
return x >= 0.0 && x <= 1.0 && y >= 0.0 && y <= 1.0;
}
// xy chromaticity coordinates must be calculated as specified in ISO
// 11664-3:2012 Section 7, and must be specified with four decimal places.
// The x coordinate should be in the range [0.0001, 0.7400] and the y
// coordinate should be in the range [0.0001, 0.8400]. Valid range [0.0000,
// 1.0000].
float x = 0.0f;
float y = 0.0f;
};
HdrMasteringMetadata();
bool operator==(const HdrMasteringMetadata& rhs) const {
return ((primary_r == rhs.primary_r) && (primary_g == rhs.primary_g) &&
(primary_b == rhs.primary_b) && (white_point == rhs.white_point) &&
(luminance_max == rhs.luminance_max) &&
(luminance_min == rhs.luminance_min));
}
bool Validate() const {
return luminance_max >= 0.0 && luminance_max <= 20000.0 &&
luminance_min >= 0.0 && luminance_min <= 5.0 &&
primary_r.Validate() && primary_g.Validate() &&
primary_b.Validate() && white_point.Validate();
}
// The nominal primaries of the mastering display.
Chromaticity primary_r;
Chromaticity primary_g;
Chromaticity primary_b;
// The nominal chromaticity of the white point of the mastering display.
Chromaticity white_point;
// The nominal maximum display luminance of the mastering display. Specified
// in the unit candela/m2. The value should be in the range [5, 10000] with
// zero decimal places. Valid range [0, 20000].
float luminance_max = 0.0f;
// The nominal minimum display luminance of the mastering display. Specified
// in the unit candela/m2. The value should be in the range [0.0001, 5.0000]
// with four decimal places. Valid range [0.0000, 5.0000].
float luminance_min = 0.0f;
};
// High dynamic range (HDR) metadata common for HDR10 and WebM/VP9-based HDR
// formats. This struct replicates the HDRMetadata struct defined in
// https://cs.chromium.org/chromium/src/media/base/hdr_metadata.h
struct HdrMetadata {
HdrMetadata();
bool operator==(const HdrMetadata& rhs) const {
return (
(max_content_light_level == rhs.max_content_light_level) &&
(max_frame_average_light_level == rhs.max_frame_average_light_level) &&
(mastering_metadata == rhs.mastering_metadata));
}
bool Validate() const {
return max_content_light_level >= 0 && max_content_light_level <= 20000 &&
max_frame_average_light_level >= 0 &&
max_frame_average_light_level <= 20000 &&
mastering_metadata.Validate();
}
HdrMasteringMetadata mastering_metadata;
// Max content light level (CLL), i.e. maximum brightness level present in the
// stream, in nits. 1 nit = 1 candela/m2. Valid range [0, 20000].
int max_content_light_level = 0;
// Max frame-average light level (FALL), i.e. maximum average brightness of
// the brightest frame in the stream, in nits. Valid range [0, 20000].
int max_frame_average_light_level = 0;
};
} // namespace webrtc
#endif // API_VIDEO_HDR_METADATA_H_

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_content_type.h"
// VideoContentType stored as a single byte, which is sent over the network.
// Structure:
//
// 0 1 2 3 4 5 6 7
// +---------------+
// |r r e e e s s c|
//
// where:
// r - reserved bits.
// e - 3-bit number of an experiment group counted from 1. 0 means there's no
// experiment ongoing.
// s - 2-bit simulcast stream id or spatial layer, counted from 1. 0 means that
// no simulcast information is set.
// c - content type. 0 means real-time video, 1 means screenshare.
//
namespace webrtc {
namespace videocontenttypehelpers {
namespace {
static constexpr uint8_t kScreenshareBitsSize = 1;
static constexpr uint8_t kScreenshareBitsMask =
(1u << kScreenshareBitsSize) - 1;
static constexpr uint8_t kSimulcastShift = 1;
static constexpr uint8_t kSimulcastBitsSize = 2;
static constexpr uint8_t kSimulcastBitsMask = ((1u << kSimulcastBitsSize) - 1)
<< kSimulcastShift; // 0b00000110
static constexpr uint8_t kExperimentShift = 3;
static constexpr uint8_t kExperimentBitsSize = 3;
static constexpr uint8_t kExperimentBitsMask =
((1u << kExperimentBitsSize) - 1) << kExperimentShift; // 0b00111000
static constexpr uint8_t kTotalBitsSize =
kScreenshareBitsSize + kSimulcastBitsSize + kExperimentBitsSize;
} // namespace
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id) {
// Store in bits 2-4.
if (experiment_id >= (1 << kExperimentBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kExperimentBitsMask) |
((experiment_id << kExperimentShift) & kExperimentBitsMask));
return true;
}
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id) {
// Store in bits 5-6.
if (simulcast_id >= (1 << kSimulcastBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kSimulcastBitsMask) |
((simulcast_id << kSimulcastShift) & kSimulcastBitsMask));
return true;
}
uint8_t GetExperimentId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kExperimentBitsMask) >>
kExperimentShift;
}
uint8_t GetSimulcastId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kSimulcastBitsMask) >>
kSimulcastShift;
}
bool IsScreenshare(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kScreenshareBitsMask) > 0;
}
bool IsValidContentType(uint8_t value) {
// Any 6-bit value is allowed.
return value < (1 << kTotalBitsSize);
}
const char* ToString(const VideoContentType& content_type) {
return IsScreenshare(content_type) ? "screen" : "realtime";
}
} // namespace videocontenttypehelpers
} // namespace webrtc

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_CONTENT_TYPE_H_
#define API_VIDEO_VIDEO_CONTENT_TYPE_H_
#include <stdint.h>
namespace webrtc {
enum class VideoContentType : uint8_t {
UNSPECIFIED = 0,
SCREENSHARE = 1,
};
namespace videocontenttypehelpers {
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id);
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id);
uint8_t GetExperimentId(const VideoContentType& content_type);
uint8_t GetSimulcastId(const VideoContentType& content_type);
bool IsScreenshare(const VideoContentType& content_type);
bool IsValidContentType(uint8_t value);
const char* ToString(const VideoContentType& content_type);
} // namespace videocontenttypehelpers
} // namespace webrtc
#endif // API_VIDEO_VIDEO_CONTENT_TYPE_H_

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_ROTATION_H_
#define API_VIDEO_VIDEO_ROTATION_H_
namespace webrtc {
// enum for clockwise rotation.
enum VideoRotation {
kVideoRotation_0 = 0,
kVideoRotation_90 = 90,
kVideoRotation_180 = 180,
kVideoRotation_270 = 270
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_ROTATION_H_

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_timing.h"
#include "api/array_view.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
uint16_t VideoSendTiming::GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
if (time_ms < base_ms) {
RTC_DLOG(LS_ERROR) << "Delta " << (time_ms - base_ms)
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
}
TimingFrameInfo::TimingFrameInfo()
: rtp_timestamp(0),
capture_time_ms(-1),
encode_start_ms(-1),
encode_finish_ms(-1),
packetization_finish_ms(-1),
pacer_exit_ms(-1),
network_timestamp_ms(-1),
network2_timestamp_ms(-1),
receive_start_ms(-1),
receive_finish_ms(-1),
decode_start_ms(-1),
decode_finish_ms(-1),
render_time_ms(-1),
flags(VideoSendTiming::kNotTriggered) {}
int64_t TimingFrameInfo::EndToEndDelay() const {
return capture_time_ms >= 0 ? decode_finish_ms - capture_time_ms : -1;
}
bool TimingFrameInfo::IsLongerThan(const TimingFrameInfo& other) const {
int64_t other_delay = other.EndToEndDelay();
return other_delay == -1 || EndToEndDelay() > other_delay;
}
bool TimingFrameInfo::operator<(const TimingFrameInfo& other) const {
return other.IsLongerThan(*this);
}
bool TimingFrameInfo::operator<=(const TimingFrameInfo& other) const {
return !IsLongerThan(other);
}
bool TimingFrameInfo::IsOutlier() const {
return !IsInvalid() && (flags & VideoSendTiming::kTriggeredBySize);
}
bool TimingFrameInfo::IsTimerTriggered() const {
return !IsInvalid() && (flags & VideoSendTiming::kTriggeredByTimer);
}
bool TimingFrameInfo::IsInvalid() const {
return flags == VideoSendTiming::kInvalid;
}
std::string TimingFrameInfo::ToString() const {
if (IsInvalid()) {
return "";
}
char buf[1024];
rtc::SimpleStringBuilder sb(buf);
sb << rtp_timestamp << ',' << capture_time_ms << ',' << encode_start_ms << ','
<< encode_finish_ms << ',' << packetization_finish_ms << ','
<< pacer_exit_ms << ',' << network_timestamp_ms << ','
<< network2_timestamp_ms << ',' << receive_start_ms << ','
<< receive_finish_ms << ',' << decode_start_ms << ',' << decode_finish_ms
<< ',' << render_time_ms << ',' << IsOutlier() << ','
<< IsTimerTriggered();
return sb.str();
}
} // namespace webrtc

View File

@ -0,0 +1,129 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_TIMING_H_
#define API_VIDEO_VIDEO_TIMING_H_
#include <stdint.h>
#include <limits>
#include <string>
namespace webrtc {
// Video timing timestamps in ms counted from capture_time_ms of a frame.
// This structure represents data sent in video-timing RTP header extension.
struct VideoSendTiming {
enum TimingFrameFlags : uint8_t {
kNotTriggered = 0, // Timing info valid, but not to be transmitted.
// Used on send-side only.
kTriggeredByTimer = 1 << 0, // Frame marked for tracing by periodic timer.
kTriggeredBySize = 1 << 1, // Frame marked for tracing due to size.
kInvalid = std::numeric_limits<uint8_t>::max() // Invalid, ignore!
};
// Returns |time_ms - base_ms| capped at max 16-bit value.
// Used to fill this data structure as per
// https://webrtc.org/experiments/rtp-hdrext/video-timing/ extension stores
// 16-bit deltas of timestamps from packet capture time.
static uint16_t GetDeltaCappedMs(int64_t base_ms, int64_t time_ms);
uint16_t encode_start_delta_ms;
uint16_t encode_finish_delta_ms;
uint16_t packetization_finish_delta_ms;
uint16_t pacer_exit_delta_ms;
uint16_t network_timestamp_delta_ms;
uint16_t network2_timestamp_delta_ms;
uint8_t flags;
};
// Used to report precise timings of a 'timing frames'. Contains all important
// timestamps for a lifetime of that specific frame. Reported as a string via
// GetStats(). Only frame which took the longest between two GetStats calls is
// reported.
struct TimingFrameInfo {
TimingFrameInfo();
// Returns end-to-end delay of a frame, if sender and receiver timestamps are
// synchronized, -1 otherwise.
int64_t EndToEndDelay() const;
// Returns true if current frame took longer to process than |other| frame.
// If other frame's clocks are not synchronized, current frame is always
// preferred.
bool IsLongerThan(const TimingFrameInfo& other) const;
// Returns true if flags are set to indicate this frame was marked for tracing
// due to the size being outside some limit.
bool IsOutlier() const;
// Returns true if flags are set to indicate this frame was marked fro tracing
// due to cyclic timer.
bool IsTimerTriggered() const;
// Returns true if the timing data is marked as invalid, in which case it
// should be ignored.
bool IsInvalid() const;
std::string ToString() const;
bool operator<(const TimingFrameInfo& other) const;
bool operator<=(const TimingFrameInfo& other) const;
uint32_t rtp_timestamp; // Identifier of a frame.
// All timestamps below are in local monotonous clock of a receiver.
// If sender clock is not yet estimated, sender timestamps
// (capture_time_ms ... pacer_exit_ms) are negative values, still
// relatively correct.
int64_t capture_time_ms; // Captrue time of a frame.
int64_t encode_start_ms; // Encode start time.
int64_t encode_finish_ms; // Encode completion time.
int64_t packetization_finish_ms; // Time when frame was passed to pacer.
int64_t pacer_exit_ms; // Time when last packet was pushed out of pacer.
// Two in-network RTP processor timestamps: meaning is application specific.
int64_t network_timestamp_ms;
int64_t network2_timestamp_ms;
int64_t receive_start_ms; // First received packet time.
int64_t receive_finish_ms; // Last received packet time.
int64_t decode_start_ms; // Decode start time.
int64_t decode_finish_ms; // Decode completion time.
int64_t render_time_ms; // Proposed render time to insure smooth playback.
uint8_t flags; // Flags indicating validity and/or why tracing was triggered.
};
// Minimum and maximum playout delay values from capture to render.
// These are best effort values.
//
// A value < 0 indicates no change from previous valid value.
//
// min = max = 0 indicates that the receiver should try and render
// frame as soon as possible.
//
// min = x, max = y indicates that the receiver is free to adapt
// in the range (x, y) based on network jitter.
struct VideoPlayoutDelay {
VideoPlayoutDelay() = default;
VideoPlayoutDelay(int min_ms, int max_ms) : min_ms(min_ms), max_ms(max_ms) {}
int min_ms = -1;
int max_ms = -1;
bool operator==(const VideoPlayoutDelay& rhs) const {
return min_ms == rhs.min_ms && max_ms == rhs.max_ms;
}
};
// TODO(bugs.webrtc.org/7660): Old name, delete after downstream use is updated.
using PlayoutDelay = VideoPlayoutDelay;
} // namespace webrtc
#endif // API_VIDEO_VIDEO_TIMING_H_