Adds stereo support to FineAudioBuffer for mobile platforms.

...continuation of review in https://webrtc-review.googlesource.com/c/src/+/70781

This CL ensures that the FineAudioBuffer can support stereo and also adapts
all classes which uses the FineAudioBuffer.

Note that, this CL does NOT enable stereo on mobile platforms by default. All it does is to ensure
that we *can*. As is, the only functional change is that all clients
will now use a FineAudioBuffer implementation which supports stereo (see
separate unittest).

The FineAudioBuffer constructor has been modified since it is better to
utilize the information provided in the injected AudioDeviceBuffer pointer
instead of forcing the user to supply redundant parameters.

The capacity parameter was also removed since it adds no value now when the
more flexible rtc::BufferT is used.

I have also done local changes (not included in the CL) where I switch
all affected audio backends to stereo and verified that it works in real-time
on all affected platforms (Androiod:OpenSL ES, Android:AAudio and iOS).

Also note that, changes in:

sdk/android/src/jni/audio_device/aaudio_player.cc
sdk/android/src/jni/audio_device/aaudio_recorder.cc
sdk/android/src/jni/audio_device/opensles_player.cc
sdk/android/src/jni/audio_device/opensles_recorder.cc

are simply copies of the changes done under modules/audio_device/android since we currently
have two versions of the ADM for Android.

Bug: webrtc:9172
Change-Id: I1ed3798bd1925381d68f0f9492af921f515b9053
Reviewed-on: https://webrtc-review.googlesource.com/71201
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22998}
This commit is contained in:
henrika
2018-04-24 13:22:31 +02:00
committed by Commit Bot
parent 47d7fbd8fe
commit 29e865a5d8
13 changed files with 175 additions and 141 deletions

View File

@ -14,6 +14,7 @@
#include "modules/audio_device/fine_audio_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/ptr_util.h"
namespace webrtc {
@ -40,7 +41,9 @@ AAudioPlayer::~AAudioPlayer() {
int AAudioPlayer::Init() {
RTC_LOG(INFO) << "Init";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
if (aaudio_.audio_parameters().channels() == 2) {
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
}
return 0;
}
@ -120,12 +123,8 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
RTC_CHECK(audio_device_buffer_);
// Create a modified audio buffer class which allows us to ask for any number
// of samples (and not only multiple of 10ms) to match the optimal buffer
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
// that the buffer can cache old data and at the same time be prepared for
// increased burst size in AAudio if buffer underruns are detected.
const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer();
fine_audio_buffer_.reset(new FineAudioBuffer(
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
// size per callback used by AAudio.
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
}
bool AAudioPlayer::SpeakerVolumeIsAvailable() {
@ -209,7 +208,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
memset(audio_data, 0, num_bytes);
} else {
fine_audio_buffer_->GetPlayoutData(
rtc::MakeArrayView(static_cast<int16_t*>(audio_data), num_frames),
rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
aaudio_.samples_per_frame() * num_frames),
static_cast<int>(latency_millis_ + 0.5));
}

View File

@ -14,6 +14,7 @@
#include "modules/audio_device/fine_audio_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/timeutils.h"
#include "system_wrappers/include/sleep.h"
@ -43,7 +44,9 @@ AAudioRecorder::~AAudioRecorder() {
int AAudioRecorder::Init() {
RTC_LOG(INFO) << "Init";
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
if (aaudio_.audio_parameters().channels() == 2) {
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
}
return 0;
}
@ -117,9 +120,7 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
// Create a modified audio buffer class which allows us to deliver any number
// of samples (and not only multiples of 10ms which WebRTC uses) to match the
// native AAudio buffer size.
const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
fine_audio_buffer_.reset(new FineAudioBuffer(
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
}
bool AAudioRecorder::IsAcousticEchoCancelerSupported() const {
@ -193,7 +194,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
// Copy recorded audio in |audio_data| to the WebRTC sink using the
// FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), num_frames),
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
aaudio_.samples_per_frame() * num_frames),
static_cast<int>(latency_millis_ + 0.5));
return AAUDIO_CALLBACK_RESULT_CONTINUE;

View File

@ -18,6 +18,7 @@
#include "rtc_base/checks.h"
#include "rtc_base/format_macros.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/timeutils.h"
#include "sdk/android/src/jni/audio_device/audio_common.h"
@ -84,9 +85,7 @@ int OpenSLESPlayer::Init() {
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (audio_parameters_.channels() == 2) {
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
ALOGE("OpenSLESPlayer does not support stereo");
return -1;
ALOGW("Stereo mode is enabled");
}
return 0;
}
@ -222,13 +221,12 @@ void OpenSLESPlayer::AllocateDataBuffers() {
// recommended to construct audio buffers so that they contain an exact
// multiple of this number. If so, callbacks will occur at regular intervals,
// which reduces jitter.
const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer();
const size_t buffer_size_in_samples =
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples);
ALOGD("native buffer size in ms: %.2f",
audio_parameters_.GetBufferSizeInMilliseconds());
fine_audio_buffer_.reset(
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
2 * audio_parameters_.frames_per_buffer()));
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
// Allocated memory for audio buffers.
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
@ -407,7 +405,7 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
if (silence) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
// Avoid aquiring real audio data from WebRTC and fill the buffer with
// Avoid acquiring real audio data from WebRTC and fill the buffer with
// zeros instead. Used to prime the buffer with silence and to avoid asking
// for audio data from two different threads.
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
@ -419,7 +417,8 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
// delay estimation.
fine_audio_buffer_->GetPlayoutData(
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
audio_parameters_.frames_per_buffer()),
audio_parameters_.frames_per_buffer() *
audio_parameters_.channels()),
25);
}
// Enqueue the decoded audio buffer for playback.

View File

@ -18,6 +18,7 @@
#include "rtc_base/checks.h"
#include "rtc_base/format_macros.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/timeutils.h"
#include "sdk/android/src/jni/audio_device/audio_common.h"
@ -80,9 +81,7 @@ int OpenSLESRecorder::Init() {
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (audio_parameters_.channels() == 2) {
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
ALOGE("OpenSLESRecorder does not support stereo");
return -1;
ALOGD("Stereo mode is enabled");
}
return 0;
}
@ -353,14 +352,13 @@ void OpenSLESRecorder::AllocateDataBuffers() {
audio_parameters_.GetBytesPerBuffer());
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
RTC_DCHECK(audio_device_buffer_);
fine_audio_buffer_.reset(
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
2 * audio_parameters_.frames_per_buffer()));
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
// Allocate queue of audio buffers that stores recorded audio samples.
const int data_size_samples = audio_parameters_.frames_per_buffer();
const int buffer_size_samples =
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
audio_buffers_[i].reset(new SLint16[data_size_samples]);
audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
}
}
@ -385,11 +383,10 @@ void OpenSLESRecorder::ReadBufferQueue() {
// since there is no support to turn off built-in EC in combination with
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
// these estimates) will never be active.
const size_t size_in_samples =
static_cast<size_t>(audio_parameters_.frames_per_buffer());
fine_audio_buffer_->DeliverRecordedData(
rtc::ArrayView<const int16_t>(audio_buffers_[buffer_index_].get(),
size_in_samples),
rtc::ArrayView<const int16_t>(
audio_buffers_[buffer_index_].get(),
audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
25);
// Enqueue the utilized audio buffer and use if for recording again.
EnqueueAudioBuffer();