Add an AudioRingBuffer class wrapper for the ring_buffer.h C interface.

Integrate it in Blocker to demonstrate use.

TEST=beamforming sounds good.
R=aluebs@webrtc.org, mgraczyk@chromium.org, sahark@google.com

Review URL: https://webrtc-codereview.appspot.com/36799004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@8157 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2015-01-26 21:23:53 +00:00
parent 4dba2e98a2
commit 041035b390
11 changed files with 256 additions and 41 deletions

View File

@ -19,8 +19,12 @@ config("common_audio_config") {
source_set("common_audio") { source_set("common_audio") {
sources = [ sources = [
"../modules/audio_processing/channel_buffer.cc",
"../modules/audio_processing/channel_buffer.h",
"audio_converter.cc", "audio_converter.cc",
"audio_converter.h", "audio_converter.h",
"audio_ring_buffer.cc",
"audio_ring_buffer.h",
"audio_util.cc", "audio_util.cc",
"blocker.cc", "blocker.cc",
"blocker.h", "blocker.h",

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/audio_ring_buffer.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/ring_buffer.h"
// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
namespace webrtc {
AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
for (size_t i = 0; i < channels; ++i)
buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
}
AudioRingBuffer::~AudioRingBuffer() {
for (auto buf : buffers_)
WebRtc_FreeBuffer(buf);
}
void AudioRingBuffer::Write(const float* const* data, size_t channels,
size_t frames) {
DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
CHECK_EQ(written, frames);
}
}
void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
size_t read = WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
CHECK_EQ(read, frames);
}
}
size_t AudioRingBuffer::ReadFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_read(buffers_[0]);
}
size_t AudioRingBuffer::WriteFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_write(buffers_[0]);
}
void AudioRingBuffer::MoveReadPosition(int frames) {
for (auto buf : buffers_) {
int moved = WebRtc_MoveReadPtr(buf, frames);
CHECK_EQ(moved, frames);
}
}
} // namespace webrtc

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stddef.h>
#include <vector>
struct RingBuffer;
namespace webrtc {
// A ring buffer tailored for float deinterleaved audio. Any operation that
// cannot be performed as requested will cause a crash (e.g. insufficient data
// in the buffer to fulfill a read request.)
class AudioRingBuffer final {
public:
// Specify the number of channels and maximum number of frames the buffer will
// contain.
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copy |data| to the buffer and advance the write pointer. |channels| must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copy from the buffer to |data| and advance the read pointer. |channels|
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);
size_t ReadFramesAvailable() const;
size_t WriteFramesAvailable() const;
// Positive values advance the read pointer and negative values withdraw
// the read pointer (i.e. flush and stuff the buffer respectively.)
void MoveReadPosition(int frames);
private:
// We don't use a ScopedVector because it doesn't support a specialized
// deleter (like scoped_ptr for instance.)
std::vector<RingBuffer*> buffers_;
};
} // namespace webrtc

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/audio_ring_buffer.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_processing/channel_buffer.h"
namespace webrtc {
class AudioRingBufferTest :
public ::testing::TestWithParam< ::testing::tuple<int, int, int, int> > {
};
void ReadAndWriteTest(const ChannelBuffer<float>& input,
size_t num_write_chunk_frames,
size_t num_read_chunk_frames,
size_t buffer_frames,
ChannelBuffer<float>* output) {
const size_t num_channels = input.num_channels();
const size_t total_frames = input.samples_per_channel();
AudioRingBuffer buf(num_channels, buffer_frames);
scoped_ptr<float*[]> slice(new float*[num_channels]);
size_t input_pos = 0;
size_t output_pos = 0;
while (input_pos + buf.WriteFramesAvailable() < total_frames) {
// Write until the buffer is as full as possible.
while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
buf.Write(input.Slice(slice.get(), static_cast<int>(input_pos)),
num_channels, num_write_chunk_frames);
input_pos += num_write_chunk_frames;
}
// Read until the buffer is as empty as possible.
while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
EXPECT_LT(output_pos, total_frames);
buf.Read(output->Slice(slice.get(), static_cast<int>(output_pos)),
num_channels, num_read_chunk_frames);
output_pos += num_read_chunk_frames;
}
}
// Write and read the last bit.
if (input_pos < total_frames)
buf.Write(input.Slice(slice.get(), static_cast<int>(input_pos)),
num_channels, total_frames - input_pos);
if (buf.ReadFramesAvailable())
buf.Read(output->Slice(slice.get(), static_cast<int>(output_pos)),
num_channels, buf.ReadFramesAvailable());
EXPECT_EQ(0u, buf.ReadFramesAvailable());
}
TEST_P(AudioRingBufferTest, ReadDataMatchesWrittenData) {
const size_t kFrames = 5000;
const size_t num_channels = ::testing::get<3>(GetParam());
// Initialize the input data to an increasing sequence.
ChannelBuffer<float> input(kFrames, static_cast<int>(num_channels));
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
input.channels()[i][j] = i * j;
ChannelBuffer<float> output(kFrames, static_cast<int>(num_channels));
ReadAndWriteTest(input,
::testing::get<0>(GetParam()),
::testing::get<1>(GetParam()),
::testing::get<2>(GetParam()),
&output);
// Verify the read data matches the input.
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
EXPECT_EQ(input.channels()[i][j], output.channels()[i][j]);
}
INSTANTIATE_TEST_CASE_P(
AudioRingBufferTest, AudioRingBufferTest,
::testing::Combine(::testing::Values(10, 20, 42), // num_write_chunk_frames
::testing::Values(1, 10, 17), // num_read_chunk_frames
::testing::Values(100, 256), // buffer_frames
::testing::Values(1, 4))); // num_channels
TEST_F(AudioRingBufferTest, MoveReadPosition) {
const size_t kNumChannels = 1;
const float kInputArray[] = {1, 2, 3, 4};
const size_t kNumFrames = sizeof(kInputArray) / sizeof(*kInputArray);
ChannelBuffer<float> input(kInputArray, kNumFrames, kNumChannels);
AudioRingBuffer buf(kNumChannels, kNumFrames);
buf.Write(input.channels(), kNumChannels, kNumFrames);
buf.MoveReadPosition(3);
ChannelBuffer<float> output(1, kNumChannels);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(4, output.data()[0]);
buf.MoveReadPosition(-3);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(2, output.data()[0]);
}
} // namespace webrtc

View File

@ -110,7 +110,7 @@ Blocker::Blocker(int chunk_size,
num_output_channels_(num_output_channels), num_output_channels_(num_output_channels),
initial_delay_(block_size_ - gcd(chunk_size, shift_amount)), initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
frame_offset_(0), frame_offset_(0),
input_buffer_(chunk_size_ + initial_delay_, num_input_channels_), input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
output_buffer_(chunk_size_ + initial_delay_, num_output_channels_), output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
input_block_(block_size_, num_input_channels_), input_block_(block_size_, num_input_channels_),
output_block_(block_size_, num_output_channels_), output_block_(block_size_, num_output_channels_),
@ -118,15 +118,8 @@ Blocker::Blocker(int chunk_size,
shift_amount_(shift_amount), shift_amount_(shift_amount),
callback_(callback) { callback_(callback) {
CHECK_LE(num_output_channels_, num_input_channels_); CHECK_LE(num_output_channels_, num_input_channels_);
memcpy(window_.get(), window, block_size_ * sizeof(float)); memcpy(window_.get(), window, block_size_ * sizeof(float));
size_t buffer_size = chunk_size_ + initial_delay_; input_buffer_.MoveReadPosition(-initial_delay_);
memset(input_buffer_.channels()[0],
0,
buffer_size * num_input_channels_ * sizeof(float));
memset(output_buffer_.channels()[0],
0,
buffer_size * num_output_channels_ * sizeof(float));
} }
// When block_size < chunk_size the input and output buffers look like this: // When block_size < chunk_size the input and output buffers look like this:
@ -177,25 +170,14 @@ void Blocker::ProcessChunk(const float* const* input,
CHECK_EQ(num_input_channels, num_input_channels_); CHECK_EQ(num_input_channels, num_input_channels_);
CHECK_EQ(num_output_channels, num_output_channels_); CHECK_EQ(num_output_channels, num_output_channels_);
// Copy new data into input buffer at input_buffer_.Write(input, num_input_channels, chunk_size_);
// [|initial_delay_|, |chunk_size_| + |initial_delay_|].
CopyFrames(input,
0,
chunk_size_,
num_input_channels_,
input_buffer_.channels(),
initial_delay_);
int first_frame_in_block = frame_offset_; int first_frame_in_block = frame_offset_;
// Loop through blocks. // Loop through blocks.
while (first_frame_in_block < chunk_size_) { while (first_frame_in_block < chunk_size_) {
CopyFrames(input_buffer_.channels(), input_buffer_.Read(input_block_.channels(), num_input_channels,
first_frame_in_block, block_size_);
block_size_, input_buffer_.MoveReadPosition(-block_size_ + shift_amount_);
num_input_channels_,
input_block_.channels(),
0);
ApplyWindow(window_.get(), ApplyWindow(window_.get(),
block_size_, block_size_,
@ -231,15 +213,6 @@ void Blocker::ProcessChunk(const float* const* input,
output, output,
0); 0);
// Copy input buffer [chunk_size_, chunk_size_ + initial_delay]
// to input buffer [0, initial_delay]
MoveFrames(input_buffer_.channels(),
chunk_size,
initial_delay_,
num_input_channels_,
input_buffer_.channels(),
0);
// Copy output buffer [chunk_size_, chunk_size_ + initial_delay] // Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
// to output buffer [0, initial_delay], zero the rest. // to output buffer [0, initial_delay], zero the rest.
MoveFrames(output_buffer_.channels(), MoveFrames(output_buffer_.channels(),

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_ #ifndef WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_
#define WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_ #define WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_
#include "webrtc/common_audio/audio_ring_buffer.h"
#include "webrtc/modules/audio_processing/channel_buffer.h" #include "webrtc/modules/audio_processing/channel_buffer.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h" #include "webrtc/system_wrappers/interface/scoped_ptr.h"
@ -93,8 +94,10 @@ class Blocker {
// input and output buffers are responsible for saving those frames between // input and output buffers are responsible for saving those frames between
// calls to ProcessChunk(). // calls to ProcessChunk().
// //
// Both contain |initial delay| + |chunk_size| frames. // Both contain |initial delay| + |chunk_size| frames. The input is a fairly
ChannelBuffer<float> input_buffer_; // standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
ChannelBuffer<float> output_buffer_; ChannelBuffer<float> output_buffer_;
// Space for the input block (can't wrap because of windowing). // Space for the input block (can't wrap because of windowing).

View File

@ -29,8 +29,12 @@
], ],
}, },
'sources': [ 'sources': [
'../modules/audio_processing/channel_buffer.cc',
'../modules/audio_processing/channel_buffer.h',
'audio_converter.cc', 'audio_converter.cc',
'audio_converter.h', 'audio_converter.h',
'audio_ring_buffer.cc',
'audio_ring_buffer.h',
'audio_util.cc', 'audio_util.cc',
'blocker.cc', 'blocker.cc',
'blocker.h', 'blocker.h',
@ -228,6 +232,7 @@
], ],
'sources': [ 'sources': [
'audio_converter_unittest.cc', 'audio_converter_unittest.cc',
'audio_ring_buffer_unittest.cc',
'audio_util_unittest.cc', 'audio_util_unittest.cc',
'blocker_unittest.cc', 'blocker_unittest.cc',
'fir_filter_unittest.cc', 'fir_filter_unittest.cc',

View File

@ -22,7 +22,7 @@ extern "C" {
typedef struct RingBuffer RingBuffer; typedef struct RingBuffer RingBuffer;
// Returns NULL on failure. // Creates and initializes the buffer. Returns NULL on failure.
RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size); RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size);
void WebRtc_InitBuffer(RingBuffer* handle); void WebRtc_InitBuffer(RingBuffer* handle);
void WebRtc_FreeBuffer(void* handle); void WebRtc_FreeBuffer(void* handle);

View File

@ -76,8 +76,6 @@ source_set("audio_processing") {
"beamformer/covariance_matrix_generator.cc", "beamformer/covariance_matrix_generator.cc",
"beamformer/covariance_matrix_generator.h", "beamformer/covariance_matrix_generator.h",
"beamformer/matrix.h", "beamformer/matrix.h",
"channel_buffer.cc",
"channel_buffer.h",
"common.h", "common.h",
"echo_cancellation_impl.cc", "echo_cancellation_impl.cc",
"echo_cancellation_impl.h", "echo_cancellation_impl.h",

View File

@ -85,8 +85,6 @@
'beamformer/covariance_matrix_generator.cc', 'beamformer/covariance_matrix_generator.cc',
'beamformer/covariance_matrix_generator.h', 'beamformer/covariance_matrix_generator.h',
'beamformer/matrix.h', 'beamformer/matrix.h',
'channel_buffer.cc',
'channel_buffer.h',
'common.h', 'common.h',
'echo_cancellation_impl.cc', 'echo_cancellation_impl.cc',
'echo_cancellation_impl.h', 'echo_cancellation_impl.h',

View File

@ -19,7 +19,8 @@
namespace webrtc { namespace webrtc {
// Helper to encapsulate a contiguous data buffer with access to a pointer // Helper to encapsulate a contiguous data buffer with access to a pointer
// array of the deinterleaved channels. // array of the deinterleaved channels. The buffer is zero initialized at
// creation.
template <typename T> template <typename T>
class ChannelBuffer { class ChannelBuffer {
public: public:
@ -74,6 +75,19 @@ class ChannelBuffer {
T* const* channels() { return channels_.get(); } T* const* channels() { return channels_.get(); }
const T* const* channels() const { return channels_.get(); } const T* const* channels() const { return channels_.get(); }
// Sets the |slice| pointers to the |start_frame| position for each channel.
// Returns |slice| for convenience.
const T* const* Slice(T** slice, int start_frame) const {
DCHECK_LT(start_frame, samples_per_channel_);
for (int i = 0; i < num_channels_; ++i)
slice[i] = &channels_[i][start_frame];
return slice;
}
T** Slice(T** slice, int start_frame) {
const ChannelBuffer<T>* t = this;
return const_cast<T**>(t->Slice(slice, start_frame));
}
int samples_per_channel() const { return samples_per_channel_; } int samples_per_channel() const { return samples_per_channel_; }
int num_channels() const { return num_channels_; } int num_channels() const { return num_channels_; }
int length() const { return samples_per_channel_ * num_channels_; } int length() const { return samples_per_channel_ * num_channels_; }