FineAudioBuffer now uses 16-bit audio samples to match the AudioDeviceBuffer.
This work is also done as a preparation for adding stereo support to the FineAudioBuffer. Review hints: Actual changes are in modules/audio_device/fine_audio_buffer.h,cc, the rest is just adaptations to match these changes. We do have a forked ADM today, hence, some changes are duplicated. The changes have been verified on all affected platforms. Bug: webrtc:6560 Change-Id: I413af41c43809f61455c45ad383fc4b1c65e1fa1 Reviewed-on: https://webrtc-review.googlesource.com/70781 Commit-Queue: Henrik Andreassson <henrika@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22938}
This commit is contained in:
@ -122,7 +122,7 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
|||||||
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
||||||
// that the buffer can cache old data and at the same time be prepared for
|
// that the buffer can cache old data and at the same time be prepared for
|
||||||
// increased burst size in AAudio if buffer underruns are detected.
|
// increased burst size in AAudio if buffer underruns are detected.
|
||||||
const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
|
const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer();
|
||||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||||
}
|
}
|
||||||
@ -184,16 +184,16 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
|
|||||||
|
|
||||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||||
// and write that data into |audio_data| to be played out by AAudio.
|
// and write that data into |audio_data| to be played out by AAudio.
|
||||||
const size_t num_bytes =
|
|
||||||
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
|
||||||
// Prime output with zeros during a short initial phase to avoid distortion.
|
// Prime output with zeros during a short initial phase to avoid distortion.
|
||||||
// TODO(henrika): do more work to figure out of if the initial forced silence
|
// TODO(henrika): do more work to figure out of if the initial forced silence
|
||||||
// period is really needed.
|
// period is really needed.
|
||||||
if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
|
if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
|
||||||
|
const size_t num_bytes =
|
||||||
|
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
||||||
memset(audio_data, 0, num_bytes);
|
memset(audio_data, 0, num_bytes);
|
||||||
} else {
|
} else {
|
||||||
fine_audio_buffer_->GetPlayoutData(
|
fine_audio_buffer_->GetPlayoutData(
|
||||||
rtc::ArrayView<int8_t>(static_cast<int8_t*>(audio_data), num_bytes),
|
rtc::MakeArrayView(static_cast<int16_t*>(audio_data), num_frames),
|
||||||
static_cast<int>(latency_millis_ + 0.5));
|
static_cast<int>(latency_millis_ + 0.5));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -180,11 +180,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
|
|||||||
}
|
}
|
||||||
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
||||||
// FineAudioBuffer object.
|
// FineAudioBuffer object.
|
||||||
const size_t num_bytes =
|
|
||||||
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
|
||||||
fine_audio_buffer_->DeliverRecordedData(
|
fine_audio_buffer_->DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t>(static_cast<const int8_t*>(audio_data),
|
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), num_frames),
|
||||||
num_bytes),
|
|
||||||
static_cast<int>(latency_millis_ + 0.5));
|
static_cast<int>(latency_millis_ + 0.5));
|
||||||
|
|
||||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||||
|
|||||||
@ -212,16 +212,16 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
|||||||
// recommended to construct audio buffers so that they contain an exact
|
// recommended to construct audio buffers so that they contain an exact
|
||||||
// multiple of this number. If so, callbacks will occur at regular intervals,
|
// multiple of this number. If so, callbacks will occur at regular intervals,
|
||||||
// which reduces jitter.
|
// which reduces jitter.
|
||||||
const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer();
|
const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer();
|
||||||
ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes);
|
ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples);
|
||||||
ALOGD("native buffer size in ms: %.2f",
|
ALOGD("native buffer size in ms: %.2f",
|
||||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||||
fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
|
fine_audio_buffer_.reset(
|
||||||
audio_parameters_.sample_rate(),
|
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||||
2 * buffer_size_in_bytes));
|
2 * audio_parameters_.frames_per_buffer()));
|
||||||
// Allocated memory for audio buffers.
|
// Allocated memory for audio buffers.
|
||||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||||
audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]);
|
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -393,13 +393,14 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
|||||||
ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
|
ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
|
||||||
}
|
}
|
||||||
last_play_time_ = current_time;
|
last_play_time_ = current_time;
|
||||||
SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
|
SLint8* audio_ptr8 =
|
||||||
|
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
|
||||||
if (silence) {
|
if (silence) {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||||
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
||||||
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
||||||
// for audio data from two different threads.
|
// for audio data from two different threads.
|
||||||
memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer());
|
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
|
||||||
} else {
|
} else {
|
||||||
RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
|
RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
|
||||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||||
@ -407,13 +408,13 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
|||||||
// OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
|
// OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
|
||||||
// delay estimation.
|
// delay estimation.
|
||||||
fine_audio_buffer_->GetPlayoutData(
|
fine_audio_buffer_->GetPlayoutData(
|
||||||
rtc::ArrayView<SLint8>(audio_ptr,
|
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
|
||||||
audio_parameters_.GetBytesPerBuffer()),
|
audio_parameters_.frames_per_buffer()),
|
||||||
25);
|
25);
|
||||||
}
|
}
|
||||||
// Enqueue the decoded audio buffer for playback.
|
// Enqueue the decoded audio buffer for playback.
|
||||||
SLresult err = (*simple_buffer_queue_)
|
SLresult err = (*simple_buffer_queue_)
|
||||||
->Enqueue(simple_buffer_queue_, audio_ptr,
|
->Enqueue(simple_buffer_queue_, audio_ptr8,
|
||||||
audio_parameters_.GetBytesPerBuffer());
|
audio_parameters_.GetBytesPerBuffer());
|
||||||
if (SL_RESULT_SUCCESS != err) {
|
if (SL_RESULT_SUCCESS != err) {
|
||||||
ALOGE("Enqueue failed: %d", err);
|
ALOGE("Enqueue failed: %d", err);
|
||||||
|
|||||||
@ -143,9 +143,8 @@ class OpenSLESPlayer {
|
|||||||
SLDataFormat_PCM pcm_format_;
|
SLDataFormat_PCM pcm_format_;
|
||||||
|
|
||||||
// Queue of audio buffers to be used by the player object for rendering
|
// Queue of audio buffers to be used by the player object for rendering
|
||||||
// audio. They will be used in a Round-robin way and the size of each buffer
|
// audio.
|
||||||
// is given by FineAudioBuffer::RequiredBufferSizeBytes().
|
std::unique_ptr<SLint16[]> audio_buffers_[kNumOfOpenSLESBuffers];
|
||||||
std::unique_ptr<SLint8[]> audio_buffers_[kNumOfOpenSLESBuffers];
|
|
||||||
|
|
||||||
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||||
// in chunks of 10ms. It then allows for this data to be pulled in
|
// in chunks of 10ms. It then allows for this data to be pulled in
|
||||||
|
|||||||
@ -344,12 +344,12 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
|||||||
RTC_DCHECK(audio_device_buffer_);
|
RTC_DCHECK(audio_device_buffer_);
|
||||||
fine_audio_buffer_.reset(
|
fine_audio_buffer_.reset(
|
||||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||||
2 * audio_parameters_.GetBytesPerBuffer()));
|
2 * audio_parameters_.frames_per_buffer()));
|
||||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||||
const int data_size_bytes = audio_parameters_.GetBytesPerBuffer();
|
const int data_size_samples = audio_parameters_.frames_per_buffer();
|
||||||
audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]);
|
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
|
||||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||||
audio_buffers_[i].reset(new SLint8[data_size_bytes]);
|
audio_buffers_[i].reset(new SLint16[data_size_samples]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,12 +374,12 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
|||||||
// since there is no support to turn off built-in EC in combination with
|
// since there is no support to turn off built-in EC in combination with
|
||||||
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
||||||
// these estimates) will never be active.
|
// these estimates) will never be active.
|
||||||
const size_t size_in_bytes =
|
const size_t size_in_samples =
|
||||||
static_cast<size_t>(audio_parameters_.GetBytesPerBuffer());
|
static_cast<size_t>(audio_parameters_.frames_per_buffer());
|
||||||
const int8_t* data =
|
|
||||||
static_cast<const int8_t*>(audio_buffers_[buffer_index_].get());
|
|
||||||
fine_audio_buffer_->DeliverRecordedData(
|
fine_audio_buffer_->DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t>(data, size_in_bytes), 25);
|
rtc::ArrayView<const int16_t>(audio_buffers_[buffer_index_].get(),
|
||||||
|
size_in_samples),
|
||||||
|
25);
|
||||||
// Enqueue the utilized audio buffer and use if for recording again.
|
// Enqueue the utilized audio buffer and use if for recording again.
|
||||||
EnqueueAudioBuffer();
|
EnqueueAudioBuffer();
|
||||||
}
|
}
|
||||||
@ -387,8 +387,10 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
|||||||
bool OpenSLESRecorder::EnqueueAudioBuffer() {
|
bool OpenSLESRecorder::EnqueueAudioBuffer() {
|
||||||
SLresult err =
|
SLresult err =
|
||||||
(*simple_buffer_queue_)
|
(*simple_buffer_queue_)
|
||||||
->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(),
|
->Enqueue(
|
||||||
audio_parameters_.GetBytesPerBuffer());
|
simple_buffer_queue_,
|
||||||
|
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get()),
|
||||||
|
audio_parameters_.GetBytesPerBuffer());
|
||||||
if (SL_RESULT_SUCCESS != err) {
|
if (SL_RESULT_SUCCESS != err) {
|
||||||
ALOGE("Enqueue failed: %s", GetSLErrorString(err));
|
ALOGE("Enqueue failed: %s", GetSLErrorString(err));
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -176,9 +176,9 @@ class OpenSLESRecorder {
|
|||||||
|
|
||||||
// Queue of audio buffers to be used by the recorder object for capturing
|
// Queue of audio buffers to be used by the recorder object for capturing
|
||||||
// audio. They will be used in a Round-robin way and the size of each buffer
|
// audio. They will be used in a Round-robin way and the size of each buffer
|
||||||
// is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to
|
// is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to
|
||||||
// the native OpenSL ES buffer size.
|
// the native OpenSL ES buffer size.
|
||||||
std::unique_ptr<std::unique_ptr<SLint8[]>[]> audio_buffers_;
|
std::unique_ptr<std::unique_ptr<SLint16[]>[]> audio_buffers_;
|
||||||
|
|
||||||
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
||||||
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
||||||
|
|||||||
@ -26,7 +26,6 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
|||||||
: device_buffer_(device_buffer),
|
: device_buffer_(device_buffer),
|
||||||
sample_rate_(sample_rate),
|
sample_rate_(sample_rate),
|
||||||
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
|
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
|
||||||
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
|
|
||||||
playout_buffer_(0, capacity),
|
playout_buffer_(0, capacity),
|
||||||
record_buffer_(0, capacity) {
|
record_buffer_(0, capacity) {
|
||||||
RTC_LOG(INFO) << "samples_per_10_ms_: " << samples_per_10_ms_;
|
RTC_LOG(INFO) << "samples_per_10_ms_: " << samples_per_10_ms_;
|
||||||
@ -42,52 +41,53 @@ void FineAudioBuffer::ResetRecord() {
|
|||||||
record_buffer_.Clear();
|
record_buffer_.Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer,
|
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||||
int playout_delay_ms) {
|
int playout_delay_ms) {
|
||||||
// Ask WebRTC for new data in chunks of 10ms until we have enough to
|
// Ask WebRTC for new data in chunks of 10ms until we have enough to
|
||||||
// fulfill the request. It is possible that the buffer already contains
|
// fulfill the request. It is possible that the buffer already contains
|
||||||
// enough samples from the last round.
|
// enough samples from the last round.
|
||||||
const size_t num_bytes = audio_buffer.size();
|
while (playout_buffer_.size() < audio_buffer.size()) {
|
||||||
while (playout_buffer_.size() < num_bytes) {
|
|
||||||
// Get 10ms decoded audio from WebRTC.
|
// Get 10ms decoded audio from WebRTC.
|
||||||
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
|
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
|
||||||
// Append |bytes_per_10_ms_| elements to the end of the buffer.
|
// Append |bytes_per_10_ms_| elements to the end of the buffer.
|
||||||
const size_t bytes_written = playout_buffer_.AppendData(
|
const size_t samples_written = playout_buffer_.AppendData(
|
||||||
bytes_per_10_ms_, [&](rtc::ArrayView<int8_t> buf) {
|
samples_per_10_ms_, [&](rtc::ArrayView<int16_t> buf) {
|
||||||
const size_t samples_per_channel =
|
const size_t samples_per_channel =
|
||||||
device_buffer_->GetPlayoutData(buf.data());
|
device_buffer_->GetPlayoutData(buf.data());
|
||||||
// TODO(henrika): this class is only used on mobile devices and is
|
// TODO(henrika): this class is only used on mobile devices and is
|
||||||
// currently limited to mono. Modifications are needed for stereo.
|
// currently limited to mono. Modifications are needed for stereo.
|
||||||
return sizeof(int16_t) * samples_per_channel;
|
return samples_per_channel;
|
||||||
});
|
});
|
||||||
RTC_DCHECK_EQ(bytes_per_10_ms_, bytes_written);
|
RTC_DCHECK_EQ(samples_per_10_ms_, samples_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
|
||||||
// Provide the requested number of bytes to the consumer.
|
// Provide the requested number of bytes to the consumer.
|
||||||
memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
|
memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
|
||||||
// Move remaining samples to start of buffer to prepare for next round.
|
// Move remaining samples to start of buffer to prepare for next round.
|
||||||
memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes,
|
memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(),
|
||||||
playout_buffer_.size() - num_bytes);
|
(playout_buffer_.size() - audio_buffer.size()) * sizeof(int16_t));
|
||||||
playout_buffer_.SetSize(playout_buffer_.size() - num_bytes);
|
playout_buffer_.SetSize(playout_buffer_.size() - audio_buffer.size());
|
||||||
// Cache playout latency for usage in DeliverRecordedData();
|
// Cache playout latency for usage in DeliverRecordedData();
|
||||||
playout_delay_ms_ = playout_delay_ms;
|
playout_delay_ms_ = playout_delay_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FineAudioBuffer::DeliverRecordedData(
|
void FineAudioBuffer::DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t> audio_buffer,
|
rtc::ArrayView<const int16_t> audio_buffer,
|
||||||
int record_delay_ms) {
|
int record_delay_ms) {
|
||||||
// Always append new data and grow the buffer if needed.
|
// Always append new data and grow the buffer if needed.
|
||||||
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
|
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
|
||||||
// Consume samples from buffer in chunks of 10ms until there is not
|
// Consume samples from buffer in chunks of 10ms until there is not
|
||||||
// enough data left. The number of remaining bytes in the cache is given by
|
// enough data left. The number of remaining samples in the cache is given by
|
||||||
// the new size of the buffer.
|
// the new size of the buffer.
|
||||||
while (record_buffer_.size() >= bytes_per_10_ms_) {
|
while (record_buffer_.size() >= samples_per_10_ms_) {
|
||||||
device_buffer_->SetRecordedBuffer(record_buffer_.data(),
|
device_buffer_->SetRecordedBuffer(record_buffer_.data(),
|
||||||
samples_per_10_ms_);
|
samples_per_10_ms_);
|
||||||
device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
|
device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
|
||||||
device_buffer_->DeliverRecordedData();
|
device_buffer_->DeliverRecordedData();
|
||||||
memmove(record_buffer_.data(), record_buffer_.data() + bytes_per_10_ms_,
|
memmove(record_buffer_.data(), record_buffer_.data() + samples_per_10_ms_,
|
||||||
record_buffer_.size() - bytes_per_10_ms_);
|
(record_buffer_.size() - samples_per_10_ms_) * sizeof(int16_t));
|
||||||
record_buffer_.SetSize(record_buffer_.size() - bytes_per_10_ms_);
|
record_buffer_.SetSize(record_buffer_.size() - samples_per_10_ms_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -21,12 +21,13 @@ namespace webrtc {
|
|||||||
|
|
||||||
class AudioDeviceBuffer;
|
class AudioDeviceBuffer;
|
||||||
|
|
||||||
// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with audio data
|
// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with 16-bit PCM
|
||||||
// corresponding to 10ms of data. It then allows for this data to be pulled in
|
// audio samples corresponding to 10ms of data. It then allows for this data
|
||||||
// a finer or coarser granularity. I.e. interacting with this class instead of
|
// to be pulled in a finer or coarser granularity. I.e. interacting with this
|
||||||
// directly with the AudioDeviceBuffer one can ask for any number of audio data
|
// class instead of directly with the AudioDeviceBuffer one can ask for any
|
||||||
// samples. This class also ensures that audio data can be delivered to the ADB
|
// number of audio data samples. This class also ensures that audio data can be
|
||||||
// in 10ms chunks when the size of the provided audio buffers differs from 10ms.
|
// delivered to the ADB in 10ms chunks when the size of the provided audio
|
||||||
|
// buffers differs from 10ms.
|
||||||
// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
|
// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
|
||||||
// accumulated 10ms worth of data to the ADB every second call.
|
// accumulated 10ms worth of data to the ADB every second call.
|
||||||
// TODO(henrika): add support for stereo when mobile platforms need it.
|
// TODO(henrika): add support for stereo when mobile platforms need it.
|
||||||
@ -42,7 +43,7 @@ class FineAudioBuffer {
|
|||||||
size_t capacity);
|
size_t capacity);
|
||||||
~FineAudioBuffer();
|
~FineAudioBuffer();
|
||||||
|
|
||||||
// Clears buffers and counters dealing with playour and/or recording.
|
// Clears buffers and counters dealing with playout and/or recording.
|
||||||
void ResetPlayout();
|
void ResetPlayout();
|
||||||
void ResetRecord();
|
void ResetRecord();
|
||||||
|
|
||||||
@ -52,7 +53,7 @@ class FineAudioBuffer {
|
|||||||
// silence instead. The provided delay estimate in |playout_delay_ms| should
|
// silence instead. The provided delay estimate in |playout_delay_ms| should
|
||||||
// contain an estime of the latency between when an audio frame is read from
|
// contain an estime of the latency between when an audio frame is read from
|
||||||
// WebRTC and when it is played out on the speaker.
|
// WebRTC and when it is played out on the speaker.
|
||||||
void GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer,
|
void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||||
int playout_delay_ms);
|
int playout_delay_ms);
|
||||||
|
|
||||||
// Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer
|
// Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer
|
||||||
@ -62,9 +63,9 @@ class FineAudioBuffer {
|
|||||||
// They can be fixed values on most platforms and they are ignored if an
|
// They can be fixed values on most platforms and they are ignored if an
|
||||||
// external (hardware/built-in) AEC is used.
|
// external (hardware/built-in) AEC is used.
|
||||||
// Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
|
// Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
|
||||||
// 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
|
// 5ms of data and sends a total of 10ms to WebRTC and clears the internal
|
||||||
// cache. Call #3 restarts the scheme above.
|
// cache. Call #3 restarts the scheme above.
|
||||||
void DeliverRecordedData(rtc::ArrayView<const int8_t> audio_buffer,
|
void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
|
||||||
int record_delay_ms);
|
int record_delay_ms);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -79,14 +80,12 @@ class FineAudioBuffer {
|
|||||||
const int sample_rate_;
|
const int sample_rate_;
|
||||||
// Number of audio samples per 10ms.
|
// Number of audio samples per 10ms.
|
||||||
const size_t samples_per_10_ms_;
|
const size_t samples_per_10_ms_;
|
||||||
// Number of audio bytes per 10ms.
|
|
||||||
const size_t bytes_per_10_ms_;
|
|
||||||
// Storage for output samples from which a consumer can read audio buffers
|
// Storage for output samples from which a consumer can read audio buffers
|
||||||
// in any size using GetPlayoutData().
|
// in any size using GetPlayoutData().
|
||||||
rtc::BufferT<int8_t> playout_buffer_;
|
rtc::BufferT<int16_t> playout_buffer_;
|
||||||
// Storage for input samples that are about to be delivered to the WebRTC
|
// Storage for input samples that are about to be delivered to the WebRTC
|
||||||
// ADB or remains from the last successful delivery of a 10ms audio buffer.
|
// ADB or remains from the last successful delivery of a 10ms audio buffer.
|
||||||
rtc::BufferT<int8_t> record_buffer_;
|
rtc::BufferT<int16_t> record_buffer_;
|
||||||
// Contains latest delay estimate given to GetPlayoutData().
|
// Contains latest delay estimate given to GetPlayoutData().
|
||||||
int playout_delay_ms_ = 0;
|
int playout_delay_ms_ = 0;
|
||||||
};
|
};
|
||||||
|
|||||||
@ -34,7 +34,7 @@ const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
|||||||
// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
|
// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
|
||||||
// will happen.
|
// will happen.
|
||||||
// |buffer| is the audio buffer to verify.
|
// |buffer| is the audio buffer to verify.
|
||||||
bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) {
|
bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) {
|
||||||
int start_value = (buffer_number * size) % SCHAR_MAX;
|
int start_value = (buffer_number * size) % SCHAR_MAX;
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
if (buffer[i] != (i + start_value) % SCHAR_MAX) {
|
if (buffer[i] != (i + start_value) % SCHAR_MAX) {
|
||||||
@ -52,10 +52,9 @@ bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) {
|
|||||||
// |samples_per_10_ms| is the number of samples that should be written to the
|
// |samples_per_10_ms| is the number of samples that should be written to the
|
||||||
// buffer (|arg0|).
|
// buffer (|arg0|).
|
||||||
ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
|
ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
|
||||||
int8_t* buffer = static_cast<int8_t*>(arg0);
|
int16_t* buffer = static_cast<int16_t*>(arg0);
|
||||||
int bytes_per_10_ms = samples_per_10_ms * static_cast<int>(sizeof(int16_t));
|
int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
|
||||||
int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX;
|
for (int i = 0; i < samples_per_10_ms; ++i) {
|
||||||
for (int i = 0; i < bytes_per_10_ms; ++i) {
|
|
||||||
buffer[i] = (i + start_value) % SCHAR_MAX;
|
buffer[i] = (i + start_value) % SCHAR_MAX;
|
||||||
}
|
}
|
||||||
return samples_per_10_ms;
|
return samples_per_10_ms;
|
||||||
@ -63,7 +62,7 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
|
|||||||
|
|
||||||
// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer()
|
// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer()
|
||||||
// for details.
|
// for details.
|
||||||
void UpdateInputBuffer(int8_t* buffer, int iteration, int size) {
|
void UpdateInputBuffer(int16_t* buffer, int iteration, int size) {
|
||||||
int start_value = (iteration * size) % SCHAR_MAX;
|
int start_value = (iteration * size) % SCHAR_MAX;
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
buffer[i] = (i + start_value) % SCHAR_MAX;
|
buffer[i] = (i + start_value) % SCHAR_MAX;
|
||||||
@ -75,18 +74,16 @@ void UpdateInputBuffer(int8_t* buffer, int iteration, int size) {
|
|||||||
// supplied using a buffer size that is smaller or larger than 10ms.
|
// supplied using a buffer size that is smaller or larger than 10ms.
|
||||||
// See VerifyBuffer() for details.
|
// See VerifyBuffer() for details.
|
||||||
ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
|
ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
|
||||||
const int8_t* buffer = static_cast<const int8_t*>(arg0);
|
const int16_t* buffer = static_cast<const int16_t*>(arg0);
|
||||||
int bytes_per_10_ms = samples_per_10_ms * static_cast<int>(sizeof(int16_t));
|
int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
|
||||||
int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX;
|
for (int i = 0; i < samples_per_10_ms; ++i) {
|
||||||
for (int i = 0; i < bytes_per_10_ms; ++i) {
|
|
||||||
EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX);
|
EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RunFineBufferTest(int frame_size_in_samples) {
|
void RunFineBufferTest(int frame_size_in_samples) {
|
||||||
const int kFrameSizeBytes =
|
const int kFrameSizeSamples = frame_size_in_samples;
|
||||||
frame_size_in_samples * static_cast<int>(sizeof(int16_t));
|
|
||||||
const int kNumberOfFrames = 5;
|
const int kNumberOfFrames = 5;
|
||||||
// Ceiling of integer division: 1 + ((x - 1) / y)
|
// Ceiling of integer division: 1 + ((x - 1) / y)
|
||||||
const int kNumberOfUpdateBufferCalls =
|
const int kNumberOfUpdateBufferCalls =
|
||||||
@ -118,17 +115,17 @@ void RunFineBufferTest(int frame_size_in_samples) {
|
|||||||
.WillRepeatedly(Return(kSamplesPer10Ms));
|
.WillRepeatedly(Return(kSamplesPer10Ms));
|
||||||
|
|
||||||
FineAudioBuffer fine_buffer(&audio_device_buffer, kSampleRate,
|
FineAudioBuffer fine_buffer(&audio_device_buffer, kSampleRate,
|
||||||
kFrameSizeBytes);
|
kFrameSizeSamples);
|
||||||
std::unique_ptr<int8_t[]> out_buffer(new int8_t[kFrameSizeBytes]);
|
std::unique_ptr<int16_t[]> out_buffer(new int16_t[kFrameSizeSamples]);
|
||||||
std::unique_ptr<int8_t[]> in_buffer(new int8_t[kFrameSizeBytes]);
|
std::unique_ptr<int16_t[]> in_buffer(new int16_t[kFrameSizeSamples]);
|
||||||
|
|
||||||
for (int i = 0; i < kNumberOfFrames; ++i) {
|
for (int i = 0; i < kNumberOfFrames; ++i) {
|
||||||
fine_buffer.GetPlayoutData(
|
fine_buffer.GetPlayoutData(
|
||||||
rtc::ArrayView<int8_t>(out_buffer.get(), kFrameSizeBytes), 0);
|
rtc::ArrayView<int16_t>(out_buffer.get(), kFrameSizeSamples), 0);
|
||||||
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
|
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeSamples));
|
||||||
UpdateInputBuffer(in_buffer.get(), i, kFrameSizeBytes);
|
UpdateInputBuffer(in_buffer.get(), i, kFrameSizeSamples);
|
||||||
fine_buffer.DeliverRecordedData(
|
fine_buffer.DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t>(in_buffer.get(), kFrameSizeBytes), 0);
|
rtc::ArrayView<const int16_t>(in_buffer.get(), kFrameSizeSamples), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -246,7 +246,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
|
|||||||
// On real iOS devices, the size will be fixed and set once. For iOS
|
// On real iOS devices, the size will be fixed and set once. For iOS
|
||||||
// simulators, the size can vary from callback to callback and the size
|
// simulators, the size can vary from callback to callback and the size
|
||||||
// will be changed dynamically to account for this behavior.
|
// will be changed dynamically to account for this behavior.
|
||||||
rtc::BufferT<int8_t> record_audio_buffer_;
|
rtc::BufferT<int16_t> record_audio_buffer_;
|
||||||
|
|
||||||
// Set to 1 when recording is active and 0 otherwise.
|
// Set to 1 when recording is active and 0 otherwise.
|
||||||
volatile int recording_;
|
volatile int recording_;
|
||||||
|
|||||||
@ -360,12 +360,11 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
|
|||||||
// Simply return if recording is not enabled.
|
// Simply return if recording is not enabled.
|
||||||
if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
|
if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
|
||||||
|
|
||||||
const size_t num_bytes = num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
|
|
||||||
// Set the size of our own audio buffer and clear it first to avoid copying
|
// Set the size of our own audio buffer and clear it first to avoid copying
|
||||||
// in combination with potential reallocations.
|
// in combination with potential reallocations.
|
||||||
// On real iOS devices, the size will only be set once (at first callback).
|
// On real iOS devices, the size will only be set once (at first callback).
|
||||||
record_audio_buffer_.Clear();
|
record_audio_buffer_.Clear();
|
||||||
record_audio_buffer_.SetSize(num_bytes);
|
record_audio_buffer_.SetSize(num_frames);
|
||||||
|
|
||||||
// Allocate AudioBuffers to be used as storage for the received audio.
|
// Allocate AudioBuffers to be used as storage for the received audio.
|
||||||
// The AudioBufferList structure works as a placeholder for the
|
// The AudioBufferList structure works as a placeholder for the
|
||||||
@ -376,8 +375,9 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
|
|||||||
audio_buffer_list.mNumberBuffers = 1;
|
audio_buffer_list.mNumberBuffers = 1;
|
||||||
AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
|
AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
|
||||||
audio_buffer->mNumberChannels = record_parameters_.channels();
|
audio_buffer->mNumberChannels = record_parameters_.channels();
|
||||||
audio_buffer->mDataByteSize = record_audio_buffer_.size();
|
audio_buffer->mDataByteSize =
|
||||||
audio_buffer->mData = record_audio_buffer_.data();
|
record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample;
|
||||||
|
audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
|
||||||
|
|
||||||
// Obtain the recorded audio samples by initiating a rendering cycle.
|
// Obtain the recorded audio samples by initiating a rendering cycle.
|
||||||
// Since it happens on the input bus, the |io_data| parameter is a reference
|
// Since it happens on the input bus, the |io_data| parameter is a reference
|
||||||
@ -409,16 +409,13 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
|||||||
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
|
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
|
||||||
RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
|
RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
|
||||||
|
|
||||||
// Get pointer to internal audio buffer to which new audio data shall be
|
|
||||||
// written.
|
|
||||||
const size_t size_in_bytes = audio_buffer->mDataByteSize;
|
|
||||||
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
|
|
||||||
int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData);
|
|
||||||
// Produce silence and give audio unit a hint about it if playout is not
|
// Produce silence and give audio unit a hint about it if playout is not
|
||||||
// activated.
|
// activated.
|
||||||
if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
|
if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
|
||||||
|
const size_t size_in_bytes = audio_buffer->mDataByteSize;
|
||||||
|
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
|
||||||
*flags |= kAudioUnitRenderAction_OutputIsSilence;
|
*flags |= kAudioUnitRenderAction_OutputIsSilence;
|
||||||
memset(destination, 0, size_in_bytes);
|
memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes);
|
||||||
return noErr;
|
return noErr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,8 +451,9 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
|||||||
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
|
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
|
||||||
// the native I/O audio unit) and copy the result to the audio buffer in the
|
// the native I/O audio unit) and copy the result to the audio buffer in the
|
||||||
// |io_data| destination.
|
// |io_data| destination.
|
||||||
fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes),
|
fine_audio_buffer_->GetPlayoutData(
|
||||||
kFixedPlayoutDelayEstimate);
|
rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
|
||||||
|
kFixedPlayoutDelayEstimate);
|
||||||
return noErr;
|
return noErr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -704,9 +702,9 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
|
|||||||
// the native audio unit buffer size. Use a reasonable capacity to avoid
|
// the native audio unit buffer size. Use a reasonable capacity to avoid
|
||||||
// reallocations while audio is played to reduce risk of glitches.
|
// reallocations while audio is played to reduce risk of glitches.
|
||||||
RTC_DCHECK(audio_device_buffer_);
|
RTC_DCHECK(audio_device_buffer_);
|
||||||
const size_t capacity_in_bytes = 2 * playout_parameters_.GetBytesPerBuffer();
|
const size_t capacity_in_samples = 2 * playout_parameters_.frames_per_buffer();
|
||||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||||
audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_bytes));
|
audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_samples));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioDeviceIOS::CreateAudioUnit() {
|
bool AudioDeviceIOS::CreateAudioUnit() {
|
||||||
|
|||||||
@ -123,7 +123,7 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
|||||||
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
||||||
// that the buffer can cache old data and at the same time be prepared for
|
// that the buffer can cache old data and at the same time be prepared for
|
||||||
// increased burst size in AAudio if buffer underruns are detected.
|
// increased burst size in AAudio if buffer underruns are detected.
|
||||||
const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
|
const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer();
|
||||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||||
}
|
}
|
||||||
@ -200,16 +200,16 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
|
|||||||
|
|
||||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||||
// and write that data into |audio_data| to be played out by AAudio.
|
// and write that data into |audio_data| to be played out by AAudio.
|
||||||
const size_t num_bytes =
|
|
||||||
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
|
||||||
// Prime output with zeros during a short initial phase to avoid distortion.
|
// Prime output with zeros during a short initial phase to avoid distortion.
|
||||||
// TODO(henrika): do more work to figure out of if the initial forced silence
|
// TODO(henrika): do more work to figure out of if the initial forced silence
|
||||||
// period is really needed.
|
// period is really needed.
|
||||||
if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
|
if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
|
||||||
|
const size_t num_bytes =
|
||||||
|
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
||||||
memset(audio_data, 0, num_bytes);
|
memset(audio_data, 0, num_bytes);
|
||||||
} else {
|
} else {
|
||||||
fine_audio_buffer_->GetPlayoutData(
|
fine_audio_buffer_->GetPlayoutData(
|
||||||
rtc::ArrayView<int8_t>(static_cast<int8_t*>(audio_data), num_bytes),
|
rtc::MakeArrayView(static_cast<int16_t*>(audio_data), num_frames),
|
||||||
static_cast<int>(latency_millis_ + 0.5));
|
static_cast<int>(latency_millis_ + 0.5));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -192,11 +192,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
|
|||||||
}
|
}
|
||||||
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
||||||
// FineAudioBuffer object.
|
// FineAudioBuffer object.
|
||||||
const size_t num_bytes =
|
|
||||||
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
|
||||||
fine_audio_buffer_->DeliverRecordedData(
|
fine_audio_buffer_->DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t>(static_cast<const int8_t*>(audio_data),
|
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), num_frames),
|
||||||
num_bytes),
|
|
||||||
static_cast<int>(latency_millis_ + 0.5));
|
static_cast<int>(latency_millis_ + 0.5));
|
||||||
|
|
||||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||||
|
|||||||
@ -222,16 +222,16 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
|||||||
// recommended to construct audio buffers so that they contain an exact
|
// recommended to construct audio buffers so that they contain an exact
|
||||||
// multiple of this number. If so, callbacks will occur at regular intervals,
|
// multiple of this number. If so, callbacks will occur at regular intervals,
|
||||||
// which reduces jitter.
|
// which reduces jitter.
|
||||||
const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer();
|
const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer();
|
||||||
ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes);
|
ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples);
|
||||||
ALOGD("native buffer size in ms: %.2f",
|
ALOGD("native buffer size in ms: %.2f",
|
||||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||||
fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
|
fine_audio_buffer_.reset(
|
||||||
audio_parameters_.sample_rate(),
|
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||||
2 * buffer_size_in_bytes));
|
2 * audio_parameters_.frames_per_buffer()));
|
||||||
// Allocated memory for audio buffers.
|
// Allocated memory for audio buffers.
|
||||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||||
audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]);
|
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,13 +403,14 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
|||||||
ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
|
ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
|
||||||
}
|
}
|
||||||
last_play_time_ = current_time;
|
last_play_time_ = current_time;
|
||||||
SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
|
SLint8* audio_ptr8 =
|
||||||
|
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
|
||||||
if (silence) {
|
if (silence) {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||||
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
||||||
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
||||||
// for audio data from two different threads.
|
// for audio data from two different threads.
|
||||||
memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer());
|
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
|
||||||
} else {
|
} else {
|
||||||
RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
|
RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
|
||||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||||
@ -417,13 +418,13 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
|||||||
// OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
|
// OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
|
||||||
// delay estimation.
|
// delay estimation.
|
||||||
fine_audio_buffer_->GetPlayoutData(
|
fine_audio_buffer_->GetPlayoutData(
|
||||||
rtc::ArrayView<SLint8>(audio_ptr,
|
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
|
||||||
audio_parameters_.GetBytesPerBuffer()),
|
audio_parameters_.frames_per_buffer()),
|
||||||
25);
|
25);
|
||||||
}
|
}
|
||||||
// Enqueue the decoded audio buffer for playback.
|
// Enqueue the decoded audio buffer for playback.
|
||||||
SLresult err = (*simple_buffer_queue_)
|
SLresult err = (*simple_buffer_queue_)
|
||||||
->Enqueue(simple_buffer_queue_, audio_ptr,
|
->Enqueue(simple_buffer_queue_, audio_ptr8,
|
||||||
audio_parameters_.GetBytesPerBuffer());
|
audio_parameters_.GetBytesPerBuffer());
|
||||||
if (SL_RESULT_SUCCESS != err) {
|
if (SL_RESULT_SUCCESS != err) {
|
||||||
ALOGE("Enqueue failed: %d", err);
|
ALOGE("Enqueue failed: %d", err);
|
||||||
|
|||||||
@ -140,9 +140,8 @@ class OpenSLESPlayer : public AudioOutput {
|
|||||||
SLDataFormat_PCM pcm_format_;
|
SLDataFormat_PCM pcm_format_;
|
||||||
|
|
||||||
// Queue of audio buffers to be used by the player object for rendering
|
// Queue of audio buffers to be used by the player object for rendering
|
||||||
// audio. They will be used in a Round-robin way and the size of each buffer
|
// audio.
|
||||||
// is given by FineAudioBuffer::RequiredBufferSizeBytes().
|
std::unique_ptr<SLint16[]> audio_buffers_[kNumOfOpenSLESBuffers];
|
||||||
std::unique_ptr<SLint8[]> audio_buffers_[kNumOfOpenSLESBuffers];
|
|
||||||
|
|
||||||
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||||
// in chunks of 10ms. It then allows for this data to be pulled in
|
// in chunks of 10ms. It then allows for this data to be pulled in
|
||||||
|
|||||||
@ -355,12 +355,12 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
|||||||
RTC_DCHECK(audio_device_buffer_);
|
RTC_DCHECK(audio_device_buffer_);
|
||||||
fine_audio_buffer_.reset(
|
fine_audio_buffer_.reset(
|
||||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||||
2 * audio_parameters_.GetBytesPerBuffer()));
|
2 * audio_parameters_.frames_per_buffer()));
|
||||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||||
const int data_size_bytes = audio_parameters_.GetBytesPerBuffer();
|
const int data_size_samples = audio_parameters_.frames_per_buffer();
|
||||||
audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]);
|
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
|
||||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||||
audio_buffers_[i].reset(new SLint8[data_size_bytes]);
|
audio_buffers_[i].reset(new SLint16[data_size_samples]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,12 +385,12 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
|||||||
// since there is no support to turn off built-in EC in combination with
|
// since there is no support to turn off built-in EC in combination with
|
||||||
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
||||||
// these estimates) will never be active.
|
// these estimates) will never be active.
|
||||||
const size_t size_in_bytes =
|
const size_t size_in_samples =
|
||||||
static_cast<size_t>(audio_parameters_.GetBytesPerBuffer());
|
static_cast<size_t>(audio_parameters_.frames_per_buffer());
|
||||||
const int8_t* data =
|
|
||||||
static_cast<const int8_t*>(audio_buffers_[buffer_index_].get());
|
|
||||||
fine_audio_buffer_->DeliverRecordedData(
|
fine_audio_buffer_->DeliverRecordedData(
|
||||||
rtc::ArrayView<const int8_t>(data, size_in_bytes), 25);
|
rtc::ArrayView<const int16_t>(audio_buffers_[buffer_index_].get(),
|
||||||
|
size_in_samples),
|
||||||
|
25);
|
||||||
// Enqueue the utilized audio buffer and use if for recording again.
|
// Enqueue the utilized audio buffer and use if for recording again.
|
||||||
EnqueueAudioBuffer();
|
EnqueueAudioBuffer();
|
||||||
}
|
}
|
||||||
@ -398,8 +398,10 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
|||||||
bool OpenSLESRecorder::EnqueueAudioBuffer() {
|
bool OpenSLESRecorder::EnqueueAudioBuffer() {
|
||||||
SLresult err =
|
SLresult err =
|
||||||
(*simple_buffer_queue_)
|
(*simple_buffer_queue_)
|
||||||
->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(),
|
->Enqueue(
|
||||||
audio_parameters_.GetBytesPerBuffer());
|
simple_buffer_queue_,
|
||||||
|
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get()),
|
||||||
|
audio_parameters_.GetBytesPerBuffer());
|
||||||
if (SL_RESULT_SUCCESS != err) {
|
if (SL_RESULT_SUCCESS != err) {
|
||||||
ALOGE("Enqueue failed: %s", GetSLErrorString(err));
|
ALOGE("Enqueue failed: %s", GetSLErrorString(err));
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -173,9 +173,9 @@ class OpenSLESRecorder : public AudioInput {
|
|||||||
|
|
||||||
// Queue of audio buffers to be used by the recorder object for capturing
|
// Queue of audio buffers to be used by the recorder object for capturing
|
||||||
// audio. They will be used in a Round-robin way and the size of each buffer
|
// audio. They will be used in a Round-robin way and the size of each buffer
|
||||||
// is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to
|
// is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to
|
||||||
// the native OpenSL ES buffer size.
|
// the native OpenSL ES buffer size.
|
||||||
std::unique_ptr<std::unique_ptr<SLint8[]>[]> audio_buffers_;
|
std::unique_ptr<std::unique_ptr<SLint16[]>[]> audio_buffers_;
|
||||||
|
|
||||||
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
||||||
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
||||||
|
|||||||
Reference in New Issue
Block a user