APM: Localize/abstract the usage of AudioFrame

This CL moves the implementation of of the AudioFrame
support from the implementation of AudioProcessing
to proxy methods that map the call to the integer
stream interfaces (added in another CL).

The CL also changes the WebRTC code using the AudioFrame
interfaces to instead use the proxy methods.

This CL will be followed by one more CL that removes
the usage of the AudioFrame class from the rest of
APM (apart from the AudioProcessing API).

Bug: webrtc:5298
Change-Id: Iecb72e9fa896ebea3ac30e558489c1bac88f5891
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/170110
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Commit-Queue: Per Åhgren <peah@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30812}
This commit is contained in:
Per Åhgren
2020-03-17 13:23:58 +01:00
committed by Commit Bot
parent 469205e1ad
commit 71652f4b66
9 changed files with 148 additions and 63 deletions

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/include/audio_frame_proxies.h"
#include "api/audio/audio_frame.h"
#include "modules/audio_processing/include/audio_processing.h"
namespace webrtc {
int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame) {
if (!frame || !ap) {
return AudioProcessing::Error::kNullPointerError;
}
StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
RTC_DCHECK_EQ(frame->samples_per_channel(), input_config.num_frames());
AudioProcessing::VoiceDetectionResult vad_result =
AudioProcessing::VoiceDetectionResult::kNotAvailable;
int result = ap->ProcessStream(frame->data(), input_config, output_config,
frame->mutable_data(), &vad_result);
if (vad_result != AudioProcessing::VoiceDetectionResult::kNotAvailable) {
frame->vad_activity_ =
vad_result == AudioProcessing::VoiceDetectionResult::kDetected
? AudioFrame::VADActivity::kVadActive
: AudioFrame::VADActivity::kVadPassive;
}
return result;
}
int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame) {
if (!frame || !ap) {
return AudioProcessing::Error::kNullPointerError;
}
// Must be a native rate.
if (frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate8kHz &&
frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate16kHz &&
frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate32kHz &&
frame->sample_rate_hz_ != AudioProcessing::NativeRate::kSampleRate48kHz) {
return AudioProcessing::Error::kBadSampleRateError;
}
if (frame->num_channels_ <= 0) {
return AudioProcessing::Error::kBadNumberChannelsError;
}
StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
int result = ap->ProcessReverseStream(frame->data(), input_config,
output_config, frame->mutable_data());
return result;
}
} // namespace webrtc