Bump to WebRTC M131 release
Ongoing fixes and improvements, transient suppressor is gone. Also, dropping isac because it doesn't seem to be useful, and is just build system deadweight now. Upstream references: Version: 131.0.6778.200 WebRTC: 79aff54b0fa9238ce3518dd9eaf9610cd6f22e82 Chromium: 2a19506ad24af755f2a215a4c61f775393e0db42
This commit is contained in:
@ -29,72 +29,17 @@ const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
|
||||
|
||||
} // namespace
|
||||
|
||||
void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
|
||||
AudioFrame* result_frame) {
|
||||
// Sanity check.
|
||||
RTC_DCHECK(result_frame);
|
||||
RTC_DCHECK_GT(result_frame->num_channels_, 0);
|
||||
RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
|
||||
|
||||
bool no_previous_data = result_frame->muted();
|
||||
if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
|
||||
// Special case we have no data to start with.
|
||||
RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
|
||||
result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
|
||||
no_previous_data = true;
|
||||
}
|
||||
|
||||
if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
|
||||
frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
|
||||
result_frame->vad_activity_ = AudioFrame::kVadActive;
|
||||
} else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
|
||||
frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
|
||||
result_frame->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
}
|
||||
|
||||
if (result_frame->speech_type_ != frame_to_add.speech_type_)
|
||||
result_frame->speech_type_ = AudioFrame::kUndefined;
|
||||
|
||||
if (!frame_to_add.muted()) {
|
||||
const int16_t* in_data = frame_to_add.data();
|
||||
int16_t* out_data = result_frame->mutable_data();
|
||||
size_t length =
|
||||
frame_to_add.samples_per_channel_ * frame_to_add.num_channels_;
|
||||
if (no_previous_data) {
|
||||
std::copy(in_data, in_data + length, out_data);
|
||||
} else {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
|
||||
static_cast<int32_t>(in_data[i]);
|
||||
out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
|
||||
if (frame->num_channels_ != 1) {
|
||||
return -1;
|
||||
}
|
||||
UpmixChannels(2, frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
|
||||
if (frame->num_channels_ != 2) {
|
||||
return -1;
|
||||
}
|
||||
DownmixChannels(1, frame);
|
||||
return frame->num_channels_ == 1 ? 0 : -1;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::QuadToStereo(const int16_t* src_audio,
|
||||
size_t samples_per_channel,
|
||||
int16_t* dst_audio) {
|
||||
for (size_t i = 0; i < samples_per_channel; i++) {
|
||||
dst_audio[i * 2] =
|
||||
void AudioFrameOperations::QuadToStereo(
|
||||
InterleavedView<const int16_t> src_audio,
|
||||
InterleavedView<int16_t> dst_audio) {
|
||||
RTC_DCHECK_EQ(NumChannels(src_audio), 4);
|
||||
RTC_DCHECK_EQ(NumChannels(dst_audio), 2);
|
||||
RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
|
||||
for (size_t i = 0; i < SamplesPerChannel(src_audio); ++i) {
|
||||
auto dst_frame = i * 2;
|
||||
dst_audio[dst_frame] =
|
||||
(static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
|
||||
dst_audio[i * 2 + 1] =
|
||||
dst_audio[dst_frame + 1] =
|
||||
(static_cast<int32_t>(src_audio[4 * i + 2]) + src_audio[4 * i + 3]) >>
|
||||
1;
|
||||
}
|
||||
@ -109,30 +54,34 @@ int AudioFrameOperations::QuadToStereo(AudioFrame* frame) {
|
||||
AudioFrame::kMaxDataSizeSamples);
|
||||
|
||||
if (!frame->muted()) {
|
||||
QuadToStereo(frame->data(), frame->samples_per_channel_,
|
||||
frame->mutable_data());
|
||||
// Note that `src` and `dst` will map in to the same buffer, but the call
|
||||
// to `mutable_data()` changes the layout of `frame`, so `src` and `dst`
|
||||
// will have different dimensions (important to call `data_view()` first).
|
||||
auto src = frame->data_view();
|
||||
auto dst = frame->mutable_data(frame->samples_per_channel_, 2);
|
||||
QuadToStereo(src, dst);
|
||||
} else {
|
||||
frame->num_channels_ = 2;
|
||||
}
|
||||
frame->num_channels_ = 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
|
||||
size_t src_channels,
|
||||
size_t samples_per_channel,
|
||||
size_t dst_channels,
|
||||
int16_t* dst_audio) {
|
||||
if (src_channels > 1 && dst_channels == 1) {
|
||||
DownmixInterleavedToMono(src_audio, samples_per_channel, src_channels,
|
||||
dst_audio);
|
||||
return;
|
||||
} else if (src_channels == 4 && dst_channels == 2) {
|
||||
QuadToStereo(src_audio, samples_per_channel, dst_audio);
|
||||
return;
|
||||
void AudioFrameOperations::DownmixChannels(
|
||||
InterleavedView<const int16_t> src_audio,
|
||||
InterleavedView<int16_t> dst_audio) {
|
||||
RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
|
||||
if (NumChannels(src_audio) > 1 && IsMono(dst_audio)) {
|
||||
// TODO(tommi): change DownmixInterleavedToMono to support InterleavedView
|
||||
// and MonoView.
|
||||
DownmixInterleavedToMono(&src_audio.data()[0], SamplesPerChannel(src_audio),
|
||||
NumChannels(src_audio), &dst_audio.data()[0]);
|
||||
} else if (NumChannels(src_audio) == 4 && NumChannels(dst_audio) == 2) {
|
||||
QuadToStereo(src_audio, dst_audio);
|
||||
} else {
|
||||
RTC_DCHECK_NOTREACHED() << "src_channels: " << NumChannels(src_audio)
|
||||
<< ", dst_channels: " << NumChannels(dst_audio);
|
||||
}
|
||||
|
||||
RTC_DCHECK_NOTREACHED() << "src_channels: " << src_channels
|
||||
<< ", dst_channels: " << dst_channels;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::DownmixChannels(size_t dst_channels,
|
||||
@ -169,14 +118,16 @@ void AudioFrameOperations::UpmixChannels(size_t target_number_of_channels,
|
||||
if (!frame->muted()) {
|
||||
// Up-mixing done in place. Going backwards through the frame ensure nothing
|
||||
// is irrevocably overwritten.
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
for (int i = frame->samples_per_channel_ - 1; i >= 0; i--) {
|
||||
auto frame_data = frame->mutable_data(frame->samples_per_channel_,
|
||||
target_number_of_channels);
|
||||
for (int i = frame->samples_per_channel_ - 1; i >= 0; --i) {
|
||||
for (size_t j = 0; j < target_number_of_channels; ++j) {
|
||||
frame_data[target_number_of_channels * i + j] = frame_data[i];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
frame->num_channels_ = target_number_of_channels;
|
||||
}
|
||||
frame->num_channels_ = target_number_of_channels;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
||||
@ -250,35 +201,6 @@ void AudioFrameOperations::Mute(AudioFrame* frame) {
|
||||
Mute(frame, true, true);
|
||||
}
|
||||
|
||||
void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
|
||||
RTC_DCHECK(frame);
|
||||
RTC_DCHECK_GT(frame->num_channels_, 0);
|
||||
if (frame->num_channels_ < 1 || frame->muted()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
|
||||
i++) {
|
||||
frame_data[i] = frame_data[i] >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
|
||||
if (frame->num_channels_ != 2) {
|
||||
return -1;
|
||||
} else if (frame->muted()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
for (size_t i = 0; i < frame->samples_per_channel_; i++) {
|
||||
frame_data[2 * i] = static_cast<int16_t>(left * frame_data[2 * i]);
|
||||
frame_data[2 * i + 1] = static_cast<int16_t>(right * frame_data[2 * i + 1]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
|
||||
if (frame->muted()) {
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user