Simplifies FineAudioBuffer by using rtc::Buffer

BUG=NONE

Review-Url: https://codereview.webrtc.org/2715963002
Cr-Commit-Position: refs/heads/master@{#16864}
This commit is contained in:
henrika
2017-02-27 05:14:17 -08:00
committed by Commit bot
parent 686aa37382
commit b3ebc1aa59
5 changed files with 38 additions and 88 deletions

View File

@ -205,19 +205,16 @@ void OpenSLESPlayer::AllocateDataBuffers() {
// recommended to construct audio buffers so that they contain an exact
// multiple of this number. If so, callbacks will occur at regular intervals,
// which reduces jitter.
ALOGD("native buffer size: %" PRIuS, audio_parameters_.GetBytesPerBuffer());
const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer();
ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes);
ALOGD("native buffer size in ms: %.2f",
audio_parameters_.GetBufferSizeInMilliseconds());
fine_audio_buffer_.reset(new FineAudioBuffer(
audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(),
audio_parameters_.sample_rate()));
// Each buffer must be of this size to avoid unnecessary memcpy while caching
// data between successive callbacks.
const size_t required_buffer_size =
fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
ALOGD("required buffer size: %" PRIuS, required_buffer_size);
fine_audio_buffer_.reset(
new FineAudioBuffer(audio_device_buffer_, buffer_size_in_bytes,
audio_parameters_.sample_rate()));
// Allocated memory for audio buffers.
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
audio_buffers_[i].reset(new SLint8[required_buffer_size]);
audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]);
}
}

View File

@ -27,25 +27,14 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
desired_frame_size_bytes_(desired_frame_size_bytes),
sample_rate_(sample_rate),
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
playout_cached_buffer_start_(0),
playout_cached_bytes_(0) {
playout_cache_buffer_.reset(new int8_t[bytes_per_10_ms_]);
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)) {
LOG(INFO) << "desired_frame_size_bytes:" << desired_frame_size_bytes;
}
FineAudioBuffer::~FineAudioBuffer() {}
size_t FineAudioBuffer::RequiredPlayoutBufferSizeBytes() {
// It is possible that we store the desired frame size - 1 samples. Since new
// audio frames are pulled in chunks of 10ms we will need a buffer that can
// hold desired_frame_size - 1 + 10ms of data. We omit the - 1.
return desired_frame_size_bytes_ + bytes_per_10_ms_;
}
void FineAudioBuffer::ResetPlayout() {
playout_cached_buffer_start_ = 0;
playout_cached_bytes_ = 0;
memset(playout_cache_buffer_.get(), 0, bytes_per_10_ms_);
playout_buffer_.Clear();
}
void FineAudioBuffer::ResetRecord() {
@ -53,50 +42,30 @@ void FineAudioBuffer::ResetRecord() {
}
void FineAudioBuffer::GetPlayoutData(int8_t* buffer) {
if (desired_frame_size_bytes_ <= playout_cached_bytes_) {
memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_],
desired_frame_size_bytes_);
playout_cached_buffer_start_ += desired_frame_size_bytes_;
playout_cached_bytes_ -= desired_frame_size_bytes_;
RTC_CHECK_LT(playout_cached_buffer_start_ + playout_cached_bytes_,
bytes_per_10_ms_);
return;
}
memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_],
playout_cached_bytes_);
// Push another n*10ms of audio to |buffer|. n > 1 if
// |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we
// write the audio after the cached bytes copied earlier.
int8_t* unwritten_buffer = &buffer[playout_cached_bytes_];
int bytes_left =
static_cast<int>(desired_frame_size_bytes_ - playout_cached_bytes_);
// Ceiling of integer division: 1 + ((x - 1) / y)
size_t number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_);
for (size_t i = 0; i < number_of_requests; ++i) {
const size_t num_bytes = desired_frame_size_bytes_;
// Ask WebRTC for new data in chunks of 10ms until we have enough to
// fulfill the request. It is possible that the buffer already contains
// enough samples from the last round.
while (playout_buffer_.size() < num_bytes) {
// Get 10ms decoded audio from WebRTC.
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
if (static_cast<size_t>(num_out) != samples_per_10_ms_) {
RTC_CHECK_EQ(num_out, 0);
playout_cached_bytes_ = 0;
return;
}
unwritten_buffer += bytes_per_10_ms_;
RTC_CHECK_GE(bytes_left, 0);
bytes_left -= static_cast<int>(bytes_per_10_ms_);
// Append |bytes_per_10_ms_| elements to the end of the buffer.
const size_t bytes_written = playout_buffer_.AppendData(
bytes_per_10_ms_, [&](rtc::ArrayView<int8_t> buf) {
const size_t samples_per_channel =
device_buffer_->GetPlayoutData(buf.data());
// TODO(henrika): this class is only used on mobile devices and is
// currently limited to mono. Modifications are needed for stereo.
return sizeof(int16_t) * samples_per_channel;
});
RTC_DCHECK_EQ(bytes_per_10_ms_, bytes_written);
}
RTC_CHECK_LE(bytes_left, 0);
// Put the samples that were written to |buffer| but are not used in the
// cache.
size_t cache_location = desired_frame_size_bytes_;
int8_t* cache_ptr = &buffer[cache_location];
playout_cached_bytes_ = number_of_requests * bytes_per_10_ms_ -
(desired_frame_size_bytes_ - playout_cached_bytes_);
// If playout_cached_bytes_ is larger than the cache buffer, uninitialized
// memory will be read.
RTC_CHECK_LE(playout_cached_bytes_, bytes_per_10_ms_);
RTC_CHECK_EQ(-bytes_left, playout_cached_bytes_);
playout_cached_buffer_start_ = 0;
memcpy(playout_cache_buffer_.get(), cache_ptr, playout_cached_bytes_);
// Provide the requested number of bytes to the consumer.
memcpy(buffer, playout_buffer_.data(), num_bytes);
// Move remaining samples to start of buffer to prepare for next round.
memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes,
playout_buffer_.size() - num_bytes);
playout_buffer_.SetSize(playout_buffer_.size() - num_bytes);
}
void FineAudioBuffer::DeliverRecordedData(const int8_t* buffer,

View File

@ -42,10 +42,6 @@ class FineAudioBuffer {
int sample_rate);
~FineAudioBuffer();
// Returns the required size of |buffer| when calling GetPlayoutData(). If
// the buffer is smaller memory trampling will happen.
size_t RequiredPlayoutBufferSizeBytes();
// Clears buffers and counters dealing with playour and/or recording.
void ResetPlayout();
void ResetRecord();
@ -60,8 +56,7 @@ class FineAudioBuffer {
// They can be fixed values on most platforms and they are ignored if an
// external (hardware/built-in) AEC is used.
// The size of |buffer| is given by |size_in_bytes| and must be equal to
// |desired_frame_size_bytes_|. A RTC_CHECK will be hit if this is not the
// case.
// |desired_frame_size_bytes_|.
// Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
// 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
// cache. Call #3 restarts the scheme above.
@ -87,12 +82,7 @@ class FineAudioBuffer {
const size_t samples_per_10_ms_;
// Number of audio bytes per 10ms.
const size_t bytes_per_10_ms_;
// Storage for output samples that are not yet asked for.
std::unique_ptr<int8_t[]> playout_cache_buffer_;
// Location of first unread output sample.
size_t playout_cached_buffer_start_;
// Number of bytes stored in output (contain samples to be played out) cache.
size_t playout_cached_bytes_;
rtc::BufferT<int8_t> playout_buffer_;
// Storage for input samples that are about to be delivered to the WebRTC
// ADB or remains from the last successful delivery of a 10ms audio buffer.
rtc::BufferT<int8_t> record_buffer_;

View File

@ -118,7 +118,7 @@ void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
sample_rate);
std::unique_ptr<int8_t[]> out_buffer;
out_buffer.reset(new int8_t[fine_buffer.RequiredPlayoutBufferSizeBytes()]);
out_buffer.reset(new int8_t[kFrameSizeBytes]);
std::unique_ptr<int8_t[]> in_buffer;
in_buffer.reset(new int8_t[kFrameSizeBytes]);
for (int i = 0; i < kNumberOfFrames; ++i) {

View File

@ -627,17 +627,11 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
// or deliver, any number of samples (and not only multiple of 10ms) to match
// the native audio unit buffer size.
RTC_DCHECK(audio_device_buffer_);
const size_t buffer_size_in_bytes = playout_parameters_.GetBytesPerBuffer();
fine_audio_buffer_.reset(new FineAudioBuffer(
audio_device_buffer_, playout_parameters_.GetBytesPerBuffer(),
audio_device_buffer_, buffer_size_in_bytes,
playout_parameters_.sample_rate()));
// The extra/temporary playoutbuffer must be of this size to avoid
// unnecessary memcpy while caching data between successive callbacks.
const int required_playout_buffer_size =
fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
LOG(LS_INFO) << " required playout buffer size: "
<< required_playout_buffer_size;
playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]);
playout_audio_buffer_.reset(new SInt8[buffer_size_in_bytes]);
// Allocate AudioBuffers to be used as storage for the received audio.
// The AudioBufferList structure works as a placeholder for the