Improved audio buffer handling for iOS.
This change: Reduces complexity for audio playout by removing a redundant memcopy in the output audio path. Adds support for iOS simulator for playout since we now allow the audio layer to ask for different sizes of audio buffers at each callback. Real iOS devices always asks for the same size, simulators does not. This change comes without any new cost for real devices. BUG=b/37580746 Review-Url: https://codereview.webrtc.org/2894873002 Cr-Commit-Position: refs/heads/master@{#18321}
This commit is contained in:
@ -12,6 +12,7 @@
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/base/arraysize.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/format_macros.h"
|
||||
@ -209,9 +210,9 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
||||
ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes);
|
||||
ALOGD("native buffer size in ms: %.2f",
|
||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, buffer_size_in_bytes,
|
||||
audio_parameters_.sample_rate()));
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
|
||||
audio_parameters_.sample_rate(),
|
||||
2 * buffer_size_in_bytes));
|
||||
// Allocated memory for audio buffers.
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]);
|
||||
@ -398,7 +399,8 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||
// to adjust for differences in buffer size between WebRTC (10ms) and native
|
||||
// OpenSL ES.
|
||||
fine_audio_buffer_->GetPlayoutData(audio_ptr);
|
||||
fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<SLint8>(
|
||||
audio_ptr, audio_parameters_.GetBytesPerBuffer()));
|
||||
}
|
||||
// Enqueue the decoded audio buffer for playback.
|
||||
SLresult err = (*simple_buffer_queue_)
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/base/arraysize.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/format_macros.h"
|
||||
@ -335,9 +336,9 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(),
|
||||
audio_parameters_.sample_rate()));
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||
2 * audio_parameters_.GetBytesPerBuffer()));
|
||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||
const int data_size_bytes = audio_parameters_.GetBytesPerBuffer();
|
||||
audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]);
|
||||
@ -371,7 +372,8 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
||||
static_cast<size_t>(audio_parameters_.GetBytesPerBuffer());
|
||||
const int8_t* data =
|
||||
static_cast<const int8_t*>(audio_buffers_[buffer_index_].get());
|
||||
fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, 25, 25);
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::ArrayView<const int8_t>(data, size_in_bytes), 25, 25);
|
||||
// Enqueue the utilized audio buffer and use if for recording again.
|
||||
EnqueueAudioBuffer();
|
||||
}
|
||||
|
||||
@ -21,14 +21,15 @@
|
||||
namespace webrtc {
|
||||
|
||||
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
size_t desired_frame_size_bytes,
|
||||
int sample_rate)
|
||||
int sample_rate,
|
||||
size_t capacity)
|
||||
: device_buffer_(device_buffer),
|
||||
desired_frame_size_bytes_(desired_frame_size_bytes),
|
||||
sample_rate_(sample_rate),
|
||||
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
|
||||
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)) {
|
||||
LOG(INFO) << "desired_frame_size_bytes:" << desired_frame_size_bytes;
|
||||
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
|
||||
playout_buffer_(0, capacity),
|
||||
record_buffer_(0, capacity) {
|
||||
LOG(INFO) << "samples_per_10_ms_:" << samples_per_10_ms_;
|
||||
}
|
||||
|
||||
FineAudioBuffer::~FineAudioBuffer() {}
|
||||
@ -41,11 +42,11 @@ void FineAudioBuffer::ResetRecord() {
|
||||
record_buffer_.Clear();
|
||||
}
|
||||
|
||||
void FineAudioBuffer::GetPlayoutData(int8_t* buffer) {
|
||||
const size_t num_bytes = desired_frame_size_bytes_;
|
||||
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer) {
|
||||
// Ask WebRTC for new data in chunks of 10ms until we have enough to
|
||||
// fulfill the request. It is possible that the buffer already contains
|
||||
// enough samples from the last round.
|
||||
const size_t num_bytes = audio_buffer.size();
|
||||
while (playout_buffer_.size() < num_bytes) {
|
||||
// Get 10ms decoded audio from WebRTC.
|
||||
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
|
||||
@ -61,19 +62,19 @@ void FineAudioBuffer::GetPlayoutData(int8_t* buffer) {
|
||||
RTC_DCHECK_EQ(bytes_per_10_ms_, bytes_written);
|
||||
}
|
||||
// Provide the requested number of bytes to the consumer.
|
||||
memcpy(buffer, playout_buffer_.data(), num_bytes);
|
||||
memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
|
||||
// Move remaining samples to start of buffer to prepare for next round.
|
||||
memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes,
|
||||
playout_buffer_.size() - num_bytes);
|
||||
playout_buffer_.SetSize(playout_buffer_.size() - num_bytes);
|
||||
}
|
||||
|
||||
void FineAudioBuffer::DeliverRecordedData(const int8_t* buffer,
|
||||
size_t size_in_bytes,
|
||||
int playout_delay_ms,
|
||||
int record_delay_ms) {
|
||||
void FineAudioBuffer::DeliverRecordedData(
|
||||
rtc::ArrayView<const int8_t> audio_buffer,
|
||||
int playout_delay_ms,
|
||||
int record_delay_ms) {
|
||||
// Always append new data and grow the buffer if needed.
|
||||
record_buffer_.AppendData(buffer, size_in_bytes);
|
||||
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
|
||||
// Consume samples from buffer in chunks of 10ms until there is not
|
||||
// enough data left. The number of remaining bytes in the cache is given by
|
||||
// the new size of the buffer.
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/base/buffer.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
@ -28,40 +29,38 @@ class AudioDeviceBuffer;
|
||||
// in 10ms chunks when the size of the provided audio buffers differs from 10ms.
|
||||
// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
|
||||
// accumulated 10ms worth of data to the ADB every second call.
|
||||
// TODO(henrika): add support for stereo when mobile platforms need it.
|
||||
class FineAudioBuffer {
|
||||
public:
|
||||
// |device_buffer| is a buffer that provides 10ms of audio data.
|
||||
// |desired_frame_size_bytes| is the number of bytes of audio data
|
||||
// GetPlayoutData() should return on success. It is also the required size of
|
||||
// each recorded buffer used in DeliverRecordedData() calls.
|
||||
// |sample_rate| is the sample rate of the audio data. This is needed because
|
||||
// |device_buffer| delivers 10ms of data. Given the sample rate the number
|
||||
// of samples can be calculated.
|
||||
// of samples can be calculated. The |capacity| ensures that the buffer size
|
||||
// can be increased to at least capacity without further reallocation.
|
||||
FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
size_t desired_frame_size_bytes,
|
||||
int sample_rate);
|
||||
int sample_rate,
|
||||
size_t capacity);
|
||||
~FineAudioBuffer();
|
||||
|
||||
// Clears buffers and counters dealing with playour and/or recording.
|
||||
void ResetPlayout();
|
||||
void ResetRecord();
|
||||
|
||||
// |buffer| must be of equal or greater size than what is returned by
|
||||
// RequiredBufferSize(). This is to avoid unnecessary memcpy.
|
||||
void GetPlayoutData(int8_t* buffer);
|
||||
// Copies audio samples into |audio_buffer| where number of requested
|
||||
// elements is specified by |audio_buffer.size()|. The producer will always
|
||||
// fill up the audio buffer and if no audio exists, the buffer will contain
|
||||
// silence instead.
|
||||
void GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer);
|
||||
|
||||
// Consumes the audio data in |buffer| and sends it to the WebRTC layer in
|
||||
// chunks of 10ms. The provided delay estimates in |playout_delay_ms| and
|
||||
// Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer
|
||||
// in chunks of 10ms. The provided delay estimates in |playout_delay_ms| and
|
||||
// |record_delay_ms| are given to the AEC in the audio processing module.
|
||||
// They can be fixed values on most platforms and they are ignored if an
|
||||
// external (hardware/built-in) AEC is used.
|
||||
// The size of |buffer| is given by |size_in_bytes| and must be equal to
|
||||
// |desired_frame_size_bytes_|.
|
||||
// Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
|
||||
// 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
|
||||
// cache. Call #3 restarts the scheme above.
|
||||
void DeliverRecordedData(const int8_t* buffer,
|
||||
size_t size_in_bytes,
|
||||
void DeliverRecordedData(rtc::ArrayView<const int8_t> audio_buffer,
|
||||
int playout_delay_ms,
|
||||
int record_delay_ms);
|
||||
|
||||
@ -73,15 +72,14 @@ class FineAudioBuffer {
|
||||
// class and the owner must ensure that the pointer is valid during the life-
|
||||
// time of this object.
|
||||
AudioDeviceBuffer* const device_buffer_;
|
||||
// Number of bytes delivered by GetPlayoutData() call and provided to
|
||||
// DeliverRecordedData().
|
||||
const size_t desired_frame_size_bytes_;
|
||||
// Sample rate in Hertz.
|
||||
const int sample_rate_;
|
||||
// Number of audio samples per 10ms.
|
||||
const size_t samples_per_10_ms_;
|
||||
// Number of audio bytes per 10ms.
|
||||
const size_t bytes_per_10_ms_;
|
||||
// Storage for output samples from which a consumer can read audio buffers
|
||||
// in any size using GetPlayoutData().
|
||||
rtc::BufferT<int8_t> playout_buffer_;
|
||||
// Storage for input samples that are about to be delivered to the WebRTC
|
||||
// ADB or remains from the last successful delivery of a 10ms audio buffer.
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
#include <limits.h>
|
||||
#include <memory>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/modules/audio_device/mock_audio_device_buffer.h"
|
||||
#include "webrtc/test/gmock.h"
|
||||
#include "webrtc/test/gtest.h"
|
||||
@ -24,6 +25,9 @@ using ::testing::Return;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const int kSampleRate = 44100;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
|
||||
// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy
|
||||
// to detect errors. This function verifies that the buffers contain such data.
|
||||
// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
|
||||
@ -80,8 +84,7 @@ ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
|
||||
const int kSamplesPer10Ms = sample_rate * 10 / 1000;
|
||||
void RunFineBufferTest(int frame_size_in_samples) {
|
||||
const int kFrameSizeBytes =
|
||||
frame_size_in_samples * static_cast<int>(sizeof(int16_t));
|
||||
const int kNumberOfFrames = 5;
|
||||
@ -114,33 +117,29 @@ void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
|
||||
.Times(kNumberOfUpdateBufferCalls - 1)
|
||||
.WillRepeatedly(Return(kSamplesPer10Ms));
|
||||
|
||||
FineAudioBuffer fine_buffer(&audio_device_buffer, kFrameSizeBytes,
|
||||
sample_rate);
|
||||
FineAudioBuffer fine_buffer(&audio_device_buffer, kSampleRate,
|
||||
kFrameSizeBytes);
|
||||
std::unique_ptr<int8_t[]> out_buffer(new int8_t[kFrameSizeBytes]);
|
||||
std::unique_ptr<int8_t[]> in_buffer(new int8_t[kFrameSizeBytes]);
|
||||
|
||||
std::unique_ptr<int8_t[]> out_buffer;
|
||||
out_buffer.reset(new int8_t[kFrameSizeBytes]);
|
||||
std::unique_ptr<int8_t[]> in_buffer;
|
||||
in_buffer.reset(new int8_t[kFrameSizeBytes]);
|
||||
for (int i = 0; i < kNumberOfFrames; ++i) {
|
||||
fine_buffer.GetPlayoutData(out_buffer.get());
|
||||
fine_buffer.GetPlayoutData(
|
||||
rtc::ArrayView<int8_t>(out_buffer.get(), kFrameSizeBytes));
|
||||
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
|
||||
UpdateInputBuffer(in_buffer.get(), i, kFrameSizeBytes);
|
||||
fine_buffer.DeliverRecordedData(in_buffer.get(), kFrameSizeBytes, 0, 0);
|
||||
fine_buffer.DeliverRecordedData(
|
||||
rtc::ArrayView<const int8_t>(in_buffer.get(), kFrameSizeBytes), 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(FineBufferTest, BufferLessThan10ms) {
|
||||
const int kSampleRate = 44100;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
const int kFrameSizeSamples = kSamplesPer10Ms - 50;
|
||||
RunFineBufferTest(kSampleRate, kFrameSizeSamples);
|
||||
RunFineBufferTest(kFrameSizeSamples);
|
||||
}
|
||||
|
||||
TEST(FineBufferTest, GreaterThan10ms) {
|
||||
const int kSampleRate = 44100;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
const int kFrameSizeSamples = kSamplesPer10Ms + 50;
|
||||
RunFineBufferTest(kSampleRate, kFrameSizeSamples);
|
||||
RunFineBufferTest(kFrameSizeSamples);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -260,10 +260,6 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
|
||||
// to WebRTC and the remaining part is stored.
|
||||
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||
|
||||
// Extra audio buffer to be used by the playout side for rendering audio.
|
||||
// The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes().
|
||||
std::unique_ptr<int8_t[]> playout_audio_buffer_;
|
||||
|
||||
// Provides a mechanism for encapsulating one or more buffers of audio data.
|
||||
// Only used on the recording side.
|
||||
AudioBufferList audio_record_buffer_list_;
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/base/atomicops.h"
|
||||
#include "webrtc/base/bind.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
@ -69,6 +70,11 @@ enum AudioDeviceMessageType : uint32_t {
|
||||
using ios::CheckAndLogError;
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
// Returns true when the code runs on a device simulator.
|
||||
static bool DeviceIsSimulator() {
|
||||
return ios::GetDeviceName() == "x86_64";
|
||||
}
|
||||
|
||||
// Helper method that logs essential device information strings.
|
||||
static void LogDeviceInfo() {
|
||||
LOG(LS_INFO) << "LogDeviceInfo";
|
||||
@ -86,6 +92,10 @@ static void LogDeviceInfo() {
|
||||
&& __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
|
||||
LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
|
||||
#endif
|
||||
#if TARGET_IPHONE_SIMULATOR
|
||||
LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined";
|
||||
#endif
|
||||
LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator();
|
||||
}
|
||||
}
|
||||
#endif // !defined(NDEBUG)
|
||||
@ -395,7 +405,7 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(
|
||||
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
|
||||
num_frames);
|
||||
int8_t* data = static_cast<int8_t*>(audio_buffer->mData);
|
||||
fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes,
|
||||
fine_audio_buffer_->DeliverRecordedData(rtc::ArrayView<const int8_t>(data, size_in_bytes),
|
||||
kFixedPlayoutDelayEstimate,
|
||||
kFixedRecordDelayEstimate);
|
||||
return noErr;
|
||||
@ -423,26 +433,11 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
||||
memset(destination, 0, size_in_bytes);
|
||||
return noErr;
|
||||
}
|
||||
// Produce silence and log a warning message for the case when Core Audio is
|
||||
// asking for an invalid number of audio frames. I don't expect this to happen
|
||||
// but it is done as a safety measure to avoid bad audio if such as case would
|
||||
// ever be triggered e.g. in combination with BT devices.
|
||||
const size_t frames_per_buffer = playout_parameters_.frames_per_buffer();
|
||||
if (num_frames != frames_per_buffer) {
|
||||
RTCLogWarning(@"Expected %u frames but got %u",
|
||||
static_cast<unsigned int>(frames_per_buffer),
|
||||
static_cast<unsigned int>(num_frames));
|
||||
*flags |= kAudioUnitRenderAction_OutputIsSilence;
|
||||
memset(destination, 0, size_in_bytes);
|
||||
return noErr;
|
||||
}
|
||||
|
||||
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
|
||||
// the native I/O audio unit) to a preallocated intermediate buffer and
|
||||
// copy the result to the audio buffer in the |io_data| destination.
|
||||
int8_t* source = playout_audio_buffer_.get();
|
||||
fine_audio_buffer_->GetPlayoutData(source);
|
||||
memcpy(destination, source, size_in_bytes);
|
||||
// the native I/O audio unit) and copy the result to the audio buffer in the
|
||||
// |io_data| destination.
|
||||
fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
|
||||
return noErr;
|
||||
}
|
||||
|
||||
@ -632,13 +627,12 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
|
||||
|
||||
// Create a modified audio buffer class which allows us to ask for,
|
||||
// or deliver, any number of samples (and not only multiple of 10ms) to match
|
||||
// the native audio unit buffer size.
|
||||
// the native audio unit buffer size. Use a reasonable capacity to avoid
|
||||
// reallocations while audio is played to reduce risk of glitches.
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
const size_t buffer_size_in_bytes = playout_parameters_.GetBytesPerBuffer();
|
||||
const size_t capacity_in_bytes = 2 * playout_parameters_.GetBytesPerBuffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, buffer_size_in_bytes,
|
||||
playout_parameters_.sample_rate()));
|
||||
playout_audio_buffer_.reset(new SInt8[buffer_size_in_bytes]);
|
||||
audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_bytes));
|
||||
|
||||
// Allocate AudioBuffers to be used as storage for the received audio.
|
||||
// The AudioBufferList structure works as a placeholder for the
|
||||
|
||||
Reference in New Issue
Block a user