Revert of Moved ring-buffer related files from common_audio to audio_processing" (patchset #2 id:20001 of https://codereview.webrtc.org/1858123003/ )

Reason for revert:
Because of down-stream dependencies, this CL needs to be reverted.

The dependencies will be resolved and then the CL will be relanded.

Original issue's description:
> Revert "Revert of Moved ring-buffer related files from common_audio to audio_processing (patchset #8 id:150001 of https://codereview.webrtc.org/1846903004/ )"
>
> This reverts commit c54aad6ae07fe2a44a65be403386bd7d7d865e5b.
>
> BUG=webrtc:5724
> NOPRESUBMIT=true
>
> Committed: https://crrev.com/8864fe5e08f8d8711612526dee9a812adfcd3be1
> Cr-Commit-Position: refs/heads/master@{#12247}

TBR=henrik.lundin@webrtc.org,ivoc@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:5724

Review URL: https://codereview.webrtc.org/1855393004

Cr-Commit-Position: refs/heads/master@{#12248}
This commit is contained in:
peah
2016-04-05 14:57:48 -07:00
committed by Commit bot
parent 8864fe5e08
commit faed4ab24b
31 changed files with 320 additions and 373 deletions

View File

@ -108,21 +108,13 @@ source_set("audio_processing") {
"transient/wpd_tree.h",
"typing_detection.cc",
"typing_detection.h",
"utility/audio_ring_buffer.cc",
"utility/audio_ring_buffer.h",
"utility/block_mean_calculator.cc",
"utility/block_mean_calculator.h",
"utility/blocker.cc",
"utility/blocker.h",
"utility/delay_estimator.c",
"utility/delay_estimator.h",
"utility/delay_estimator_internal.h",
"utility/delay_estimator_wrapper.c",
"utility/delay_estimator_wrapper.h",
"utility/lapped_transform.cc",
"utility/lapped_transform.h",
"utility/ring_buffer.c",
"utility/ring_buffer.h",
"vad/common.h",
"vad/gmm.cc",
"vad/gmm.h",

View File

@ -24,6 +24,9 @@
#include <stdlib.h>
#include <string.h>
extern "C" {
#include "webrtc/common_audio/ring_buffer.h"
}
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
@ -33,7 +36,6 @@ extern "C" {
#include "webrtc/modules/audio_processing/logging/aec_logging.h"
extern "C" {
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
}
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"

View File

@ -11,14 +11,13 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_INTERNAL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_INTERNAL_H_
extern "C" {
#include "webrtc/common_audio/ring_buffer.h"
}
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core.h"
#include "webrtc/modules/audio_processing/utility/block_mean_calculator.h"
extern "C" {
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
}
#include "webrtc/typedefs.h"
namespace webrtc {

View File

@ -21,14 +21,12 @@
#include <string.h>
extern "C" {
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
}
#include "webrtc/modules/audio_processing/aec/aec_core.h"
#include "webrtc/modules/audio_processing/aec/aec_resampler.h"
#include "webrtc/modules/audio_processing/aec/echo_cancellation_internal.h"
extern "C" {
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
}
#include "webrtc/typedefs.h"
namespace webrtc {

View File

@ -11,10 +11,10 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_INTERNAL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_INTERNAL_H_
#include "webrtc/modules/audio_processing/aec/aec_core.h"
extern "C" {
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include "webrtc/common_audio/ring_buffer.h"
}
#include "webrtc/modules/audio_processing/aec/aec_core.h"
namespace webrtc {

View File

@ -14,10 +14,10 @@
#include <stddef.h>
#include <stdlib.h>
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/real_fft.h"
#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include "webrtc/system_wrappers/include/compile_assert_c.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"

View File

@ -13,9 +13,9 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/aecm/aecm_defines.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include "webrtc/typedefs.h"
#ifdef _MSC_VER // visual c++

View File

@ -14,10 +14,10 @@
#include <stddef.h>
#include <stdlib.h>
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/real_fft.h"
#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include "webrtc/system_wrappers/include/compile_assert_c.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"
@ -768,3 +768,4 @@ static void ComfortNoise(AecmCore* aecm,
out[i].imag = WebRtcSpl_AddSatW16(out[i].imag, uImag[i]);
}
}

View File

@ -15,9 +15,9 @@
#endif
#include <stdlib.h>
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/aecm/aecm_core.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#define BUF_SIZE_FRAMES 50 // buffer size (frames)
// Maximum length of resampled signal. Must be an integer multiple of frames

View File

@ -118,21 +118,13 @@
'transient/wpd_tree.h',
'typing_detection.cc',
'typing_detection.h',
'utility/audio_ring_buffer.cc',
'utility/audio_ring_buffer.h',
'utility/block_mean_calculator.cc',
'utility/block_mean_calculator.h',
'utility/blocker.cc',
'utility/blocker.h',
'utility/delay_estimator.c',
'utility/delay_estimator.h',
'utility/delay_estimator_internal.h',
'utility/delay_estimator_wrapper.c',
'utility/delay_estimator_wrapper.h',
'utility/lapped_transform.cc',
'utility/lapped_transform.h',
'utility/ring_buffer.c',
'utility/ring_buffer.h',
'vad/common.h',
'vad/gmm.cc',
'vad/gmm.h',

View File

@ -19,10 +19,11 @@
#include <memory>
#include <vector>
#include "webrtc/common_audio/lapped_transform.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
#include "webrtc/modules/audio_processing/utility/lapped_transform.h"
namespace webrtc {
// Enhances sound sources coming directly in front of a uniform linear array

View File

@ -16,10 +16,10 @@
#include <vector>
#include "webrtc/base/swap_queue.h"
#include "webrtc/common_audio/lapped_transform.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
#include "webrtc/modules/audio_processing/render_queue_item_verifier.h"
#include "webrtc/modules/audio_processing/utility/lapped_transform.h"
#include "webrtc/modules/audio_processing/vad/voice_activity_detector.h"
namespace webrtc {

View File

@ -1,75 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/utility/audio_ring_buffer.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
namespace webrtc {
AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
buffers_.reserve(channels);
for (size_t i = 0; i < channels; ++i)
buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
}
AudioRingBuffer::~AudioRingBuffer() {
for (auto buf : buffers_)
WebRtc_FreeBuffer(buf);
}
void AudioRingBuffer::Write(const float* const* data, size_t channels,
size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
RTC_CHECK_EQ(written, frames);
}
}
void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t read =
WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
RTC_CHECK_EQ(read, frames);
}
}
size_t AudioRingBuffer::ReadFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_read(buffers_[0]);
}
size_t AudioRingBuffer::WriteFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_write(buffers_[0]);
}
void AudioRingBuffer::MoveReadPositionForward(size_t frames) {
for (auto buf : buffers_) {
const size_t moved =
static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
void AudioRingBuffer::MoveReadPositionBackward(size_t frames) {
for (auto buf : buffers_) {
const size_t moved = static_cast<size_t>(
-WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
} // namespace webrtc

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_AUDIO_RING_BUFFER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_AUDIO_RING_BUFFER_H_
#include <stddef.h>
#include <vector>
struct RingBuffer;
namespace webrtc {
// A ring buffer tailored for float deinterleaved audio. Any operation that
// cannot be performed as requested will cause a crash (e.g. insufficient data
// in the buffer to fulfill a read request.)
class AudioRingBuffer final {
public:
// Specify the number of channels and maximum number of frames the buffer will
// contain.
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copies |data| to the buffer and advances the write pointer. |channels| must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copies from the buffer to |data| and advances the read pointer. |channels|
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);
size_t ReadFramesAvailable() const;
size_t WriteFramesAvailable() const;
// Moves the read position. The forward version advances the read pointer
// towards the write pointer and the backward verison withdraws the read
// pointer away from the write pointer (i.e. flushing and stuffing the buffer
// respectively.)
void MoveReadPositionForward(size_t frames);
void MoveReadPositionBackward(size_t frames);
private:
// TODO(kwiberg): Use std::vector<std::unique_ptr<RingBuffer>> instead.
std::vector<RingBuffer*> buffers_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_AUDIO_RING_BUFFER_H_

View File

@ -1,113 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <tuple>
#include "webrtc/modules/audio_processing/utility/audio_ring_buffer.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_audio/channel_buffer.h"
namespace webrtc {
class AudioRingBufferTest :
public ::testing::TestWithParam< ::testing::tuple<int, int, int, int> > {
};
void ReadAndWriteTest(const ChannelBuffer<float>& input,
size_t num_write_chunk_frames,
size_t num_read_chunk_frames,
size_t buffer_frames,
ChannelBuffer<float>* output) {
const size_t num_channels = input.num_channels();
const size_t total_frames = input.num_frames();
AudioRingBuffer buf(num_channels, buffer_frames);
std::unique_ptr<float* []> slice(new float*[num_channels]);
size_t input_pos = 0;
size_t output_pos = 0;
while (input_pos + buf.WriteFramesAvailable() < total_frames) {
// Write until the buffer is as full as possible.
while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
buf.Write(input.Slice(slice.get(), input_pos), num_channels,
num_write_chunk_frames);
input_pos += num_write_chunk_frames;
}
// Read until the buffer is as empty as possible.
while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
EXPECT_LT(output_pos, total_frames);
buf.Read(output->Slice(slice.get(), output_pos), num_channels,
num_read_chunk_frames);
output_pos += num_read_chunk_frames;
}
}
// Write and read the last bit.
if (input_pos < total_frames) {
buf.Write(input.Slice(slice.get(), input_pos), num_channels,
total_frames - input_pos);
}
if (buf.ReadFramesAvailable()) {
buf.Read(output->Slice(slice.get(), output_pos), num_channels,
buf.ReadFramesAvailable());
}
EXPECT_EQ(0u, buf.ReadFramesAvailable());
}
TEST_P(AudioRingBufferTest, ReadDataMatchesWrittenData) {
const size_t kFrames = 5000;
const size_t num_channels = ::testing::get<3>(GetParam());
// Initialize the input data to an increasing sequence.
ChannelBuffer<float> input(kFrames, static_cast<int>(num_channels));
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
input.channels()[i][j] = (i + 1) * (j + 1);
ChannelBuffer<float> output(kFrames, static_cast<int>(num_channels));
ReadAndWriteTest(input,
::testing::get<0>(GetParam()),
::testing::get<1>(GetParam()),
::testing::get<2>(GetParam()),
&output);
// Verify the read data matches the input.
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
EXPECT_EQ(input.channels()[i][j], output.channels()[i][j]);
}
INSTANTIATE_TEST_CASE_P(
AudioRingBufferTest, AudioRingBufferTest,
::testing::Combine(::testing::Values(10, 20, 42), // num_write_chunk_frames
::testing::Values(1, 10, 17), // num_read_chunk_frames
::testing::Values(100, 256), // buffer_frames
::testing::Values(1, 4))); // num_channels
TEST_F(AudioRingBufferTest, MoveReadPosition) {
const size_t kNumChannels = 1;
const float kInputArray[] = {1, 2, 3, 4};
const size_t kNumFrames = sizeof(kInputArray) / sizeof(*kInputArray);
ChannelBuffer<float> input(kNumFrames, kNumChannels);
input.SetDataForTesting(kInputArray, kNumFrames);
AudioRingBuffer buf(kNumChannels, kNumFrames);
buf.Write(input.channels(), kNumChannels, kNumFrames);
buf.MoveReadPositionForward(3);
ChannelBuffer<float> output(1, kNumChannels);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(4, output.channels()[0][0]);
buf.MoveReadPositionBackward(3);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(2, output.channels()[0][0]);
}
} // namespace webrtc

View File

@ -1,236 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/utility/blocker.h"
#include <string.h>
#include "webrtc/base/checks.h"
namespace {
// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
void AddFrames(const float* const* a,
size_t a_start_index,
const float* const* b,
int b_start_index,
size_t num_frames,
size_t num_channels,
float* const* result,
size_t result_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
result[i][j + result_start_index] =
a[i][j + a_start_index] + b[i][j + b_start_index];
}
}
}
// Copies |src| into |dst| channel by channel.
void CopyFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
size_t num_channels,
float* const* dst,
size_t dst_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
memcpy(&dst[i][dst_start_index],
&src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
// Moves |src| into |dst| channel by channel.
void MoveFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
size_t num_channels,
float* const* dst,
size_t dst_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
memmove(&dst[i][dst_start_index],
&src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
void ZeroOut(float* const* buffer,
size_t starting_idx,
size_t num_frames,
size_t num_channels) {
for (size_t i = 0; i < num_channels; ++i) {
memset(&buffer[i][starting_idx], 0,
num_frames * sizeof(buffer[i][starting_idx]));
}
}
// Pointwise multiplies each channel of |frames| with |window|. Results are
// stored in |frames|.
void ApplyWindow(const float* window,
size_t num_frames,
size_t num_channels,
float* const* frames) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
frames[i][j] = frames[i][j] * window[j];
}
}
}
size_t gcd(size_t a, size_t b) {
size_t tmp;
while (b) {
tmp = a;
a = b;
b = tmp % b;
}
return a;
}
} // namespace
namespace webrtc {
Blocker::Blocker(size_t chunk_size,
size_t block_size,
size_t num_input_channels,
size_t num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback)
: chunk_size_(chunk_size),
block_size_(block_size),
num_input_channels_(num_input_channels),
num_output_channels_(num_output_channels),
initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
frame_offset_(0),
input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
input_block_(block_size_, num_input_channels_),
output_block_(block_size_, num_output_channels_),
window_(new float[block_size_]),
shift_amount_(shift_amount),
callback_(callback) {
RTC_CHECK_LE(num_output_channels_, num_input_channels_);
RTC_CHECK_LE(shift_amount_, block_size_);
memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
input_buffer_.MoveReadPositionBackward(initial_delay_);
}
// When block_size < chunk_size the input and output buffers look like this:
//
// delay* chunk_size chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// 1. New input gets read into sections _b_ and _c_ of the input buffer.
// 2. We block starting from frame_offset.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from sections _a_ or _b_ of the input buffer.
// 4. We window the current block, fire the callback for processing, window
// again, and overlap/add to the output buffer.
// 5. We copy sections _a_ and _b_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy section _c_ into
// section _a_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _b_ and _c_.
//
// When block_size > chunk_size the input and output buffers look like this:
//
// chunk_size delay* chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// The procedure is the same as above, except for:
// 1. New input gets read into section _c_ of the input buffer.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from section _a_ of the input buffer.
// 5. We copy section _a_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy sections _b_ and _c_
// into section _a_ and _b_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _a_ and _b_.
//
// * delay here refers to inintial_delay_
//
// TODO(claguna): Look at using ring buffers to eliminate some copies.
void Blocker::ProcessChunk(const float* const* input,
size_t chunk_size,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) {
RTC_CHECK_EQ(chunk_size, chunk_size_);
RTC_CHECK_EQ(num_input_channels, num_input_channels_);
RTC_CHECK_EQ(num_output_channels, num_output_channels_);
input_buffer_.Write(input, num_input_channels, chunk_size_);
size_t first_frame_in_block = frame_offset_;
// Loop through blocks.
while (first_frame_in_block < chunk_size_) {
input_buffer_.Read(input_block_.channels(), num_input_channels,
block_size_);
input_buffer_.MoveReadPositionBackward(block_size_ - shift_amount_);
ApplyWindow(window_.get(),
block_size_,
num_input_channels_,
input_block_.channels());
callback_->ProcessBlock(input_block_.channels(),
block_size_,
num_input_channels_,
num_output_channels_,
output_block_.channels());
ApplyWindow(window_.get(),
block_size_,
num_output_channels_,
output_block_.channels());
AddFrames(output_buffer_.channels(),
first_frame_in_block,
output_block_.channels(),
0,
block_size_,
num_output_channels_,
output_buffer_.channels(),
first_frame_in_block);
first_frame_in_block += shift_amount_;
}
// Copy output buffer to output
CopyFrames(output_buffer_.channels(),
0,
chunk_size_,
num_output_channels_,
output,
0);
// Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
// to output buffer [0, initial_delay], zero the rest.
MoveFrames(output_buffer_.channels(),
chunk_size,
initial_delay_,
num_output_channels_,
output_buffer_.channels(),
0);
ZeroOut(output_buffer_.channels(),
initial_delay_,
chunk_size_,
num_output_channels_);
// Calculate new starting frames.
frame_offset_ = first_frame_in_block - chunk_size_;
}
} // namespace webrtc

View File

@ -1,124 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCKER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCKER_H_
#include <memory>
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/modules/audio_processing/utility/audio_ring_buffer.h"
namespace webrtc {
// The callback function to process audio in the time domain. Input has already
// been windowed, and output will be windowed. The number of input channels
// must be >= the number of output channels.
class BlockerCallback {
public:
virtual ~BlockerCallback() {}
virtual void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) = 0;
};
// The main purpose of Blocker is to abstract away the fact that often we
// receive a different number of audio frames than our transform takes. For
// example, most FFTs work best when the fft-size is a power of 2, but suppose
// we receive 20ms of audio at a sample rate of 48000. That comes to 960 frames
// of audio, which is not a power of 2. Blocker allows us to specify the
// transform and all other necessary processing via the Process() callback
// function without any constraints on the transform-size
// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
// We handle this for the multichannel audio case, allowing for different
// numbers of input and output channels (for example, beamforming takes 2 or
// more input channels and returns 1 output channel). Audio signals are
// represented as deinterleaved floats in the range [-1, 1].
//
// Blocker is responsible for:
// - blocking audio while handling potential discontinuities on the edges
// of chunks
// - windowing blocks before sending them to Process()
// - windowing processed blocks, and overlap-adding them together before
// sending back a processed chunk
//
// To use blocker:
// 1. Impelment a BlockerCallback object |bc|.
// 2. Instantiate a Blocker object |b|, passing in |bc|.
// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
//
// A small amount of delay is added to the first received chunk to deal with
// the difference in chunk/block sizes. This delay is <= chunk_size.
//
// Ownership of window is retained by the caller. That is, Blocker makes a
// copy of window and does not attempt to delete it.
class Blocker {
public:
Blocker(size_t chunk_size,
size_t block_size,
size_t num_input_channels,
size_t num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback);
void ProcessChunk(const float* const* input,
size_t chunk_size,
size_t num_input_channels,
size_t num_output_channels,
float* const* output);
private:
const size_t chunk_size_;
const size_t block_size_;
const size_t num_input_channels_;
const size_t num_output_channels_;
// The number of frames of delay to add at the beginning of the first chunk.
const size_t initial_delay_;
// The frame index into the input buffer where the first block should be read
// from. This is necessary because shift_amount_ is not necessarily a
// multiple of chunk_size_, so blocks won't line up at the start of the
// buffer.
size_t frame_offset_;
// Since blocks nearly always overlap, there are certain blocks that require
// frames from the end of one chunk and the beginning of the next chunk. The
// input and output buffers are responsible for saving those frames between
// calls to ProcessChunk().
//
// Both contain |initial delay| + |chunk_size| frames. The input is a fairly
// standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
ChannelBuffer<float> output_buffer_;
// Space for the input block (can't wrap because of windowing).
ChannelBuffer<float> input_block_;
// Space for the output block (can't wrap because of overlap/add).
ChannelBuffer<float> output_block_;
std::unique_ptr<float[]> window_;
// The amount of frames between the start of contiguous blocks. For example,
// |shift_amount_| = |block_size_| / 2 for a Hann window.
size_t shift_amount_;
BlockerCallback* callback_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCKER_H_

View File

@ -1,345 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "webrtc/modules/audio_processing/utility/blocker.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
namespace {
// Callback Function to add 3 to every sample in the signal.
class PlusThreeBlockerCallback : public webrtc::BlockerCallback {
public:
void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) override {
for (size_t i = 0; i < num_output_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
output[i][j] = input[i][j] + 3;
}
}
}
};
// No-op Callback Function.
class CopyBlockerCallback : public webrtc::BlockerCallback {
public:
void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) override {
for (size_t i = 0; i < num_output_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
output[i][j] = input[i][j];
}
}
}
};
} // namespace
namespace webrtc {
// Tests blocking with a window that multiplies the signal by 2, a callback
// that adds 3 to each sample in the signal, and different combinations of chunk
// size, block size, and shift amount.
class BlockerTest : public ::testing::Test {
protected:
void RunTest(Blocker* blocker,
size_t chunk_size,
size_t num_frames,
const float* const* input,
float* const* input_chunk,
float* const* output,
float* const* output_chunk,
size_t num_input_channels,
size_t num_output_channels) {
size_t start = 0;
size_t end = chunk_size - 1;
while (end < num_frames) {
CopyTo(input_chunk, 0, start, num_input_channels, chunk_size, input);
blocker->ProcessChunk(input_chunk,
chunk_size,
num_input_channels,
num_output_channels,
output_chunk);
CopyTo(output, start, 0, num_output_channels, chunk_size, output_chunk);
start += chunk_size;
end += chunk_size;
}
}
void ValidateSignalEquality(const float* const* expected,
const float* const* actual,
size_t num_channels,
size_t num_frames) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
}
}
}
void ValidateInitialDelay(const float* const* output,
size_t num_channels,
size_t num_frames,
size_t initial_delay) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
if (j < initial_delay) {
EXPECT_FLOAT_EQ(output[i][j], 0.f);
} else {
EXPECT_GT(output[i][j], 0.f);
}
}
}
}
static void CopyTo(float* const* dst,
size_t start_index_dst,
size_t start_index_src,
size_t num_channels,
size_t num_frames,
const float* const* src) {
for (size_t i = 0; i < num_channels; ++i) {
memcpy(&dst[i][start_index_dst],
&src[i][start_index_src],
num_frames * sizeof(float));
}
}
};
TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 10;
const size_t kBlockSize = 4;
const size_t kChunkSize = 5;
const size_t kShiftAmount = 2;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumInputChannels][kNumFrames] = {
{6, 6, 12, 20, 20, 20, 20, 20, 20, 20},
{6, 6, 12, 28, 28, 28, 28, 28, 28, 28}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumInputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize,
kBlockSize,
kNumInputChannels,
kNumOutputChannels,
kWindow,
kShiftAmount,
&callback);
RunTest(&blocker,
kChunkSize,
kNumFrames,
input_cb.channels(),
input_chunk_cb.channels(),
actual_output_cb.channels(),
output_chunk_cb.channels(),
kNumInputChannels,
kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(),
kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 12;
const size_t kBlockSize = 4;
const size_t kChunkSize = 6;
const size_t kShiftAmount = 3;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
{6, 10, 10, 20, 10, 10, 20, 10, 10, 20, 10, 10},
{6, 14, 14, 28, 14, 14, 28, 14, 14, 28, 14, 14}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize,
kBlockSize,
kNumInputChannels,
kNumOutputChannels,
kWindow,
kShiftAmount,
&callback);
RunTest(&blocker,
kChunkSize,
kNumFrames,
input_cb.channels(),
input_chunk_cb.channels(),
actual_output_cb.channels(),
output_chunk_cb.channels(),
kNumInputChannels,
kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(),
kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, TestBlockerNoOverlap) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 12;
const size_t kBlockSize = 4;
const size_t kChunkSize = 4;
const size_t kShiftAmount = 4;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
{14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize,
kBlockSize,
kNumInputChannels,
kNumOutputChannels,
kWindow,
kShiftAmount,
&callback);
RunTest(&blocker,
kChunkSize,
kNumFrames,
input_cb.channels(),
input_chunk_cb.channels(),
actual_output_cb.channels(),
output_chunk_cb.channels(),
kNumInputChannels,
kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(),
kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, InitialDelaysAreMinimum) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 1280;
const size_t kChunkSize[] =
{80, 80, 80, 80, 80, 80, 160, 160, 160, 160, 160, 160};
const size_t kBlockSize[] =
{64, 64, 64, 128, 128, 128, 128, 128, 128, 256, 256, 256};
const size_t kShiftAmount[] =
{16, 32, 64, 32, 64, 128, 32, 64, 128, 64, 128, 256};
const size_t kInitialDelay[] =
{48, 48, 48, 112, 112, 112, 96, 96, 96, 224, 224, 224};
float input[kNumInputChannels][kNumFrames];
for (size_t i = 0; i < kNumInputChannels; ++i) {
for (size_t j = 0; j < kNumFrames; ++j) {
input[i][j] = i + 1;
}
}
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(input[0], sizeof(input) / sizeof(**input));
ChannelBuffer<float> output_cb(kNumFrames, kNumOutputChannels);
CopyBlockerCallback callback;
for (size_t i = 0; i < arraysize(kChunkSize); ++i) {
std::unique_ptr<float[]> window(new float[kBlockSize[i]]);
for (size_t j = 0; j < kBlockSize[i]; ++j) {
window[j] = 1.f;
}
ChannelBuffer<float> input_chunk_cb(kChunkSize[i], kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize[i], kNumOutputChannels);
Blocker blocker(kChunkSize[i],
kBlockSize[i],
kNumInputChannels,
kNumOutputChannels,
window.get(),
kShiftAmount[i],
&callback);
RunTest(&blocker,
kChunkSize[i],
kNumFrames,
input_cb.channels(),
input_chunk_cb.channels(),
output_cb.channels(),
output_chunk_cb.channels(),
kNumInputChannels,
kNumOutputChannels);
ValidateInitialDelay(output_cb.channels(),
kNumOutputChannels,
kNumFrames,
kInitialDelay[i]);
}
}
} // namespace webrtc

View File

@ -1,102 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/utility/lapped_transform.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/real_fourier.h"
namespace webrtc {
void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) {
RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
RTC_CHECK_EQ(parent_->block_length_, num_frames);
for (size_t i = 0; i < num_input_channels; ++i) {
memcpy(parent_->real_buf_.Row(i), input[i],
num_frames * sizeof(*input[0]));
parent_->fft_->Forward(parent_->real_buf_.Row(i),
parent_->cplx_pre_.Row(i));
}
size_t block_length = RealFourier::ComplexLength(
RealFourier::FftOrder(num_frames));
RTC_CHECK_EQ(parent_->cplx_length_, block_length);
parent_->block_processor_->ProcessAudioBlock(parent_->cplx_pre_.Array(),
num_input_channels,
parent_->cplx_length_,
num_output_channels,
parent_->cplx_post_.Array());
for (size_t i = 0; i < num_output_channels; ++i) {
parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
parent_->real_buf_.Row(i));
memcpy(output[i], parent_->real_buf_.Row(i),
num_frames * sizeof(*input[0]));
}
}
LappedTransform::LappedTransform(size_t num_in_channels,
size_t num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback)
: blocker_callback_(this),
num_in_channels_(num_in_channels),
num_out_channels_(num_out_channels),
block_length_(block_length),
chunk_length_(chunk_length),
block_processor_(callback),
blocker_(chunk_length_,
block_length_,
num_in_channels_,
num_out_channels_,
window,
shift_amount,
&blocker_callback_),
fft_(rtc::ScopedToUnique(
RealFourier::Create(RealFourier::FftOrder(block_length_)))),
cplx_length_(RealFourier::ComplexLength(fft_->order())),
real_buf_(num_in_channels,
block_length_,
RealFourier::kFftBufferAlignment),
cplx_pre_(num_in_channels,
cplx_length_,
RealFourier::kFftBufferAlignment),
cplx_post_(num_out_channels,
cplx_length_,
RealFourier::kFftBufferAlignment) {
RTC_CHECK(num_in_channels_ > 0 && num_out_channels_ > 0);
RTC_CHECK_GT(block_length_, 0u);
RTC_CHECK_GT(chunk_length_, 0u);
RTC_CHECK(block_processor_);
// block_length_ power of 2?
RTC_CHECK_EQ(0u, block_length_ & (block_length_ - 1));
}
void LappedTransform::ProcessChunk(const float* const* in_chunk,
float* const* out_chunk) {
blocker_.ProcessChunk(in_chunk, chunk_length_, num_in_channels_,
num_out_channels_, out_chunk);
}
} // namespace webrtc

View File

@ -1,124 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_LAPPED_TRANSFORM_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_LAPPED_TRANSFORM_H_
#include <complex>
#include <memory>
#include "webrtc/common_audio/real_fourier.h"
#include "webrtc/modules/audio_processing/utility/blocker.h"
#include "webrtc/system_wrappers/include/aligned_array.h"
namespace webrtc {
// Helper class for audio processing modules which operate on frequency domain
// input derived from the windowed time domain audio stream.
//
// The input audio chunk is sliced into possibly overlapping blocks, multiplied
// by a window and transformed with an FFT implementation. The transformed data
// is supplied to the given callback for processing. The processed output is
// then inverse transformed into the time domain and spliced back into a chunk
// which constitutes the final output of this processing module.
class LappedTransform {
public:
class Callback {
public:
virtual ~Callback() {}
virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
size_t num_in_channels, size_t frames,
size_t num_out_channels,
std::complex<float>* const* out_block) = 0;
};
// Construct a transform instance. |chunk_length| is the number of samples in
// each channel. |window| defines the window, owned by the caller (a copy is
// made internally); |window| should have length equal to |block_length|.
// |block_length| defines the length of a block, in samples.
// |shift_amount| is in samples. |callback| is the caller-owned audio
// processing function called for each block of the input chunk.
LappedTransform(size_t num_in_channels,
size_t num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback);
~LappedTransform() {}
// Main audio processing helper method. Internally slices |in_chunk| into
// blocks, transforms them to frequency domain, calls the callback for each
// block and returns a de-blocked time domain chunk of audio through
// |out_chunk|. Both buffers are caller-owned.
void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
// Get the chunk length.
//
// The chunk length is the number of samples per channel that must be passed
// to ProcessChunk via the parameter in_chunk.
//
// Returns the same chunk_length passed to the LappedTransform constructor.
size_t chunk_length() const { return chunk_length_; }
// Get the number of input channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// in_chunk.
//
// Returns the same num_in_channels passed to the LappedTransform constructor.
size_t num_in_channels() const { return num_in_channels_; }
// Get the number of output channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// out_chunk.
//
// Returns the same num_out_channels passed to the LappedTransform
// constructor.
size_t num_out_channels() const { return num_out_channels_; }
private:
// Internal middleware callback, given to the blocker. Transforms each block
// and hands it over to the processing method given at construction time.
class BlockThunk : public BlockerCallback {
public:
explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
virtual void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output);
private:
LappedTransform* const parent_;
} blocker_callback_;
const size_t num_in_channels_;
const size_t num_out_channels_;
const size_t block_length_;
const size_t chunk_length_;
Callback* const block_processor_;
Blocker blocker_;
std::unique_ptr<RealFourier> fft_;
const size_t cplx_length_;
AlignedArray<float> real_buf_;
AlignedArray<std::complex<float> > cplx_pre_;
AlignedArray<std::complex<float> > cplx_post_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_LAPPED_TRANSFORM_H_

View File

@ -1,208 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/utility/lapped_transform.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include "testing/gtest/include/gtest/gtest.h"
using std::complex;
namespace {
class NoopCallback : public webrtc::LappedTransform::Callback {
public:
NoopCallback() : block_num_(0) {}
virtual void ProcessAudioBlock(const complex<float>* const* in_block,
size_t in_channels,
size_t frames,
size_t out_channels,
complex<float>* const* out_block) {
RTC_CHECK_EQ(in_channels, out_channels);
for (size_t i = 0; i < out_channels; ++i) {
memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
}
++block_num_;
}
size_t block_num() {
return block_num_;
}
private:
size_t block_num_;
};
class FftCheckerCallback : public webrtc::LappedTransform::Callback {
public:
FftCheckerCallback() : block_num_(0) {}
virtual void ProcessAudioBlock(const complex<float>* const* in_block,
size_t in_channels,
size_t frames,
size_t out_channels,
complex<float>* const* out_block) {
RTC_CHECK_EQ(in_channels, out_channels);
size_t full_length = (frames - 1) * 2;
++block_num_;
if (block_num_ > 0) {
ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
1e-5f);
ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
for (size_t i = 1; i < frames; ++i) {
ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
ASSERT_NEAR(in_block[0][i].imag(), 0.0f, 1e-5f);
}
}
}
size_t block_num() {
return block_num_;
}
private:
size_t block_num_;
};
void SetFloatArray(float value, int rows, int cols, float* const* array) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
array[i][j] = value;
}
}
}
} // namespace
namespace webrtc {
TEST(LappedTransformTest, Windowless) {
const size_t kChannels = 3;
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
const size_t kShiftAmount = 64;
NoopCallback noop;
// Rectangular window.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], 1.0f);
LappedTransform trans(kChannels, kChannels, kChunkLength, window,
kBlockLength, kShiftAmount, &noop);
float in_buffer[kChannels][kChunkLength];
float* in_chunk[kChannels];
float out_buffer[kChannels][kChunkLength];
float* out_chunk[kChannels];
in_chunk[0] = in_buffer[0];
in_chunk[1] = in_buffer[1];
in_chunk[2] = in_buffer[2];
out_chunk[0] = out_buffer[0];
out_chunk[1] = out_buffer[1];
out_chunk[2] = out_buffer[2];
SetFloatArray(2.0f, kChannels, kChunkLength, in_chunk);
SetFloatArray(-1.0f, kChannels, kChunkLength, out_chunk);
trans.ProcessChunk(in_chunk, out_chunk);
for (size_t i = 0; i < kChannels; ++i) {
for (size_t j = 0; j < kChunkLength; ++j) {
ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
}
}
ASSERT_EQ(kChunkLength / kBlockLength, noop.block_num());
}
TEST(LappedTransformTest, IdentityProcessor) {
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
const size_t kShiftAmount = 32;
NoopCallback noop;
// Identity window for |overlap = block_size / 2|.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], std::sqrt(0.5f));
LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kShiftAmount,
&noop);
float in_buffer[kChunkLength];
float* in_chunk = in_buffer;
float out_buffer[kChunkLength];
float* out_chunk = out_buffer;
SetFloatArray(2.0f, 1, kChunkLength, &in_chunk);
SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
trans.ProcessChunk(&in_chunk, &out_chunk);
for (size_t i = 0; i < kChunkLength; ++i) {
ASSERT_NEAR(out_chunk[i],
(i < kBlockLength - kShiftAmount) ? 0.0f : 2.0f,
1e-5f);
}
ASSERT_EQ(kChunkLength / kShiftAmount, noop.block_num());
}
TEST(LappedTransformTest, Callbacks) {
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
FftCheckerCallback call;
// Rectangular window.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], 1.0f);
LappedTransform trans(1, 1, kChunkLength, window, kBlockLength,
kBlockLength, &call);
float in_buffer[kChunkLength];
float* in_chunk = in_buffer;
float out_buffer[kChunkLength];
float* out_chunk = out_buffer;
SetFloatArray(1.0f, 1, kChunkLength, &in_chunk);
SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
trans.ProcessChunk(&in_chunk, &out_chunk);
ASSERT_EQ(kChunkLength / kBlockLength, call.block_num());
}
TEST(LappedTransformTest, chunk_length) {
const size_t kBlockLength = 64;
FftCheckerCallback call;
const float window[kBlockLength] = {};
// Make sure that chunk_length returns the same value passed to the
// LappedTransform constructor.
{
const size_t kExpectedChunkLength = 512;
const LappedTransform trans(1, 1, kExpectedChunkLength, window,
kBlockLength, kBlockLength, &call);
EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
}
{
const size_t kExpectedChunkLength = 160;
const LappedTransform trans(1, 1, kExpectedChunkLength, window,
kBlockLength, kBlockLength, &call);
EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
}
}
} // namespace webrtc

View File

@ -1,247 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
// otherwise specified, functions return 0 on success and -1 on error.
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include <stddef.h> // size_t
#include <stdlib.h>
#include <string.h>
enum Wrap {
SAME_WRAP,
DIFF_WRAP
};
struct RingBuffer {
size_t read_pos;
size_t write_pos;
size_t element_count;
size_t element_size;
enum Wrap rw_wrap;
char* data;
};
// Get address of region(s) from which we can read data.
// If the region is contiguous, |data_ptr_bytes_2| will be zero.
// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
// region. Returns room available to be read or |element_count|, whichever is
// smaller.
static size_t GetBufferReadRegions(RingBuffer* buf,
size_t element_count,
void** data_ptr_1,
size_t* data_ptr_bytes_1,
void** data_ptr_2,
size_t* data_ptr_bytes_2) {
const size_t readable_elements = WebRtc_available_read(buf);
const size_t read_elements = (readable_elements < element_count ?
readable_elements : element_count);
const size_t margin = buf->element_count - buf->read_pos;
// Check to see if read is not contiguous.
if (read_elements > margin) {
// Write data in two blocks that wrap the buffer.
*data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
*data_ptr_bytes_1 = margin * buf->element_size;
*data_ptr_2 = buf->data;
*data_ptr_bytes_2 = (read_elements - margin) * buf->element_size;
} else {
*data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
*data_ptr_bytes_1 = read_elements * buf->element_size;
*data_ptr_2 = NULL;
*data_ptr_bytes_2 = 0;
}
return read_elements;
}
RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size) {
RingBuffer* self = NULL;
if (element_count == 0 || element_size == 0) {
return NULL;
}
self = malloc(sizeof(RingBuffer));
if (!self) {
return NULL;
}
self->data = malloc(element_count * element_size);
if (!self->data) {
free(self);
self = NULL;
return NULL;
}
self->element_count = element_count;
self->element_size = element_size;
WebRtc_InitBuffer(self);
return self;
}
void WebRtc_InitBuffer(RingBuffer* self) {
self->read_pos = 0;
self->write_pos = 0;
self->rw_wrap = SAME_WRAP;
// Initialize buffer to zeros
memset(self->data, 0, self->element_count * self->element_size);
}
void WebRtc_FreeBuffer(void* handle) {
RingBuffer* self = (RingBuffer*)handle;
if (!self) {
return;
}
free(self->data);
free(self);
}
size_t WebRtc_ReadBuffer(RingBuffer* self,
void** data_ptr,
void* data,
size_t element_count) {
if (self == NULL) {
return 0;
}
if (data == NULL) {
return 0;
}
{
void* buf_ptr_1 = NULL;
void* buf_ptr_2 = NULL;
size_t buf_ptr_bytes_1 = 0;
size_t buf_ptr_bytes_2 = 0;
const size_t read_count = GetBufferReadRegions(self,
element_count,
&buf_ptr_1,
&buf_ptr_bytes_1,
&buf_ptr_2,
&buf_ptr_bytes_2);
if (buf_ptr_bytes_2 > 0) {
// We have a wrap around when reading the buffer. Copy the buffer data to
// |data| and point to it.
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2);
buf_ptr_1 = data;
} else if (!data_ptr) {
// No wrap, but a memcpy was requested.
memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
}
if (data_ptr) {
// |buf_ptr_1| == |data| in the case of a wrap.
*data_ptr = buf_ptr_1;
}
// Update read position
WebRtc_MoveReadPtr(self, (int) read_count);
return read_count;
}
}
size_t WebRtc_WriteBuffer(RingBuffer* self,
const void* data,
size_t element_count) {
if (!self) {
return 0;
}
if (!data) {
return 0;
}
{
const size_t free_elements = WebRtc_available_write(self);
const size_t write_elements = (free_elements < element_count ? free_elements
: element_count);
size_t n = write_elements;
const size_t margin = self->element_count - self->write_pos;
if (write_elements > margin) {
// Buffer wrap around when writing.
memcpy(self->data + self->write_pos * self->element_size,
data, margin * self->element_size);
self->write_pos = 0;
n -= margin;
self->rw_wrap = DIFF_WRAP;
}
memcpy(self->data + self->write_pos * self->element_size,
((const char*) data) + ((write_elements - n) * self->element_size),
n * self->element_size);
self->write_pos += n;
return write_elements;
}
}
int WebRtc_MoveReadPtr(RingBuffer* self, int element_count) {
if (!self) {
return 0;
}
{
// We need to be able to take care of negative changes, hence use "int"
// instead of "size_t".
const int free_elements = (int) WebRtc_available_write(self);
const int readable_elements = (int) WebRtc_available_read(self);
int read_pos = (int) self->read_pos;
if (element_count > readable_elements) {
element_count = readable_elements;
}
if (element_count < -free_elements) {
element_count = -free_elements;
}
read_pos += element_count;
if (read_pos > (int) self->element_count) {
// Buffer wrap around. Restart read position and wrap indicator.
read_pos -= (int) self->element_count;
self->rw_wrap = SAME_WRAP;
}
if (read_pos < 0) {
// Buffer wrap around. Restart read position and wrap indicator.
read_pos += (int) self->element_count;
self->rw_wrap = DIFF_WRAP;
}
self->read_pos = (size_t) read_pos;
return element_count;
}
}
size_t WebRtc_available_read(const RingBuffer* self) {
if (!self) {
return 0;
}
if (self->rw_wrap == SAME_WRAP) {
return self->write_pos - self->read_pos;
} else {
return self->element_count - self->read_pos + self->write_pos;
}
}
size_t WebRtc_available_write(const RingBuffer* self) {
if (!self) {
return 0;
}
return self->element_count - WebRtc_available_read(self);
}

View File

@ -1,66 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
// otherwise specified, functions return 0 on success and -1 on error.
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h> // size_t
typedef struct RingBuffer RingBuffer;
// Creates and initializes the buffer. Returns NULL on failure.
RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size);
void WebRtc_InitBuffer(RingBuffer* handle);
void WebRtc_FreeBuffer(void* handle);
// Reads data from the buffer. The |data_ptr| will point to the address where
// it is located. If all |element_count| data are feasible to read without
// buffer wrap around |data_ptr| will point to the location in the buffer.
// Otherwise, the data will be copied to |data| (memory allocation done by the
// user) and |data_ptr| points to the address of |data|. |data_ptr| is only
// guaranteed to be valid until the next call to WebRtc_WriteBuffer().
//
// To force a copying to |data|, pass a NULL |data_ptr|.
//
// Returns number of elements read.
size_t WebRtc_ReadBuffer(RingBuffer* handle,
void** data_ptr,
void* data,
size_t element_count);
// Writes |data| to buffer and returns the number of elements written.
size_t WebRtc_WriteBuffer(RingBuffer* handle, const void* data,
size_t element_count);
// Moves the buffer read position and returns the number of elements moved.
// Positive |element_count| moves the read position towards the write position,
// that is, flushing the buffer. Negative |element_count| moves the read
// position away from the the write position, that is, stuffing the buffer.
// Returns number of elements moved.
int WebRtc_MoveReadPtr(RingBuffer* handle, int element_count);
// Returns number of available elements to read.
size_t WebRtc_available_read(const RingBuffer* handle);
// Returns number of available elements for write.
size_t WebRtc_available_write(const RingBuffer* handle);
#ifdef __cplusplus
}
#endif
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_

View File

@ -1,150 +0,0 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include <memory>
#include "testing/gtest/include/gtest/gtest.h"
namespace webrtc {
struct FreeBufferDeleter {
inline void operator()(void* ptr) const {
WebRtc_FreeBuffer(ptr);
}
};
typedef std::unique_ptr<RingBuffer, FreeBufferDeleter> scoped_ring_buffer;
static void AssertElementEq(int expected, int actual) {
ASSERT_EQ(expected, actual);
}
static int SetIncrementingData(int* data, int num_elements,
int starting_value) {
for (int i = 0; i < num_elements; i++) {
data[i] = starting_value++;
}
return starting_value;
}
static int CheckIncrementingData(int* data, int num_elements,
int starting_value) {
for (int i = 0; i < num_elements; i++) {
AssertElementEq(starting_value++, data[i]);
}
return starting_value;
}
// We use ASSERTs in this test to avoid obscuring the seed in the case of a
// failure.
static void RandomStressTest(int** data_ptr) {
const int kNumTests = 10;
const int kNumOps = 1000;
const int kMaxBufferSize = 1000;
unsigned int seed = time(NULL);
printf("seed=%u\n", seed);
srand(seed);
for (int i = 0; i < kNumTests; i++) {
const int buffer_size = std::max(rand() % kMaxBufferSize, 1);
std::unique_ptr<int[]> write_data(new int[buffer_size]);
std::unique_ptr<int[]> read_data(new int[buffer_size]);
scoped_ring_buffer buffer(WebRtc_CreateBuffer(buffer_size, sizeof(int)));
ASSERT_TRUE(buffer.get() != NULL);
WebRtc_InitBuffer(buffer.get());
int buffer_consumed = 0;
int write_element = 0;
int read_element = 0;
for (int j = 0; j < kNumOps; j++) {
const bool write = rand() % 2 == 0 ? true : false;
const int num_elements = rand() % buffer_size;
if (write) {
const int buffer_available = buffer_size - buffer_consumed;
ASSERT_EQ(static_cast<size_t>(buffer_available),
WebRtc_available_write(buffer.get()));
const int expected_elements = std::min(num_elements, buffer_available);
write_element = SetIncrementingData(write_data.get(), expected_elements,
write_element);
ASSERT_EQ(static_cast<size_t>(expected_elements),
WebRtc_WriteBuffer(buffer.get(), write_data.get(),
num_elements));
buffer_consumed = std::min(buffer_consumed + expected_elements,
buffer_size);
} else {
const int expected_elements = std::min(num_elements,
buffer_consumed);
ASSERT_EQ(static_cast<size_t>(buffer_consumed),
WebRtc_available_read(buffer.get()));
ASSERT_EQ(static_cast<size_t>(expected_elements),
WebRtc_ReadBuffer(buffer.get(),
reinterpret_cast<void**>(data_ptr),
read_data.get(),
num_elements));
int* check_ptr = read_data.get();
if (data_ptr) {
check_ptr = *data_ptr;
}
read_element = CheckIncrementingData(check_ptr, expected_elements,
read_element);
buffer_consumed = std::max(buffer_consumed - expected_elements, 0);
}
}
}
}
TEST(RingBufferTest, RandomStressTest) {
int* data_ptr = NULL;
RandomStressTest(&data_ptr);
}
TEST(RingBufferTest, RandomStressTestWithNullPtr) {
RandomStressTest(NULL);
}
TEST(RingBufferTest, PassingNulltoReadBufferForcesMemcpy) {
const size_t kDataSize = 2;
int write_data[kDataSize];
int read_data[kDataSize];
int* data_ptr;
scoped_ring_buffer buffer(WebRtc_CreateBuffer(kDataSize, sizeof(int)));
ASSERT_TRUE(buffer.get() != NULL);
WebRtc_InitBuffer(buffer.get());
SetIncrementingData(write_data, kDataSize, 0);
EXPECT_EQ(kDataSize, WebRtc_WriteBuffer(buffer.get(), write_data, kDataSize));
SetIncrementingData(read_data, kDataSize, kDataSize);
EXPECT_EQ(kDataSize, WebRtc_ReadBuffer(buffer.get(),
reinterpret_cast<void**>(&data_ptr), read_data, kDataSize));
// Copying was not necessary, so |read_data| has not been updated.
CheckIncrementingData(data_ptr, kDataSize, 0);
CheckIncrementingData(read_data, kDataSize, kDataSize);
EXPECT_EQ(kDataSize, WebRtc_WriteBuffer(buffer.get(), write_data, kDataSize));
EXPECT_EQ(kDataSize, WebRtc_ReadBuffer(buffer.get(), NULL, read_data,
kDataSize));
// Passing NULL forces a memcpy, so |read_data| is now updated.
CheckIncrementingData(read_data, kDataSize, 0);
}
TEST(RingBufferTest, CreateHandlesErrors) {
EXPECT_TRUE(WebRtc_CreateBuffer(0, 1) == NULL);
EXPECT_TRUE(WebRtc_CreateBuffer(1, 0) == NULL);
RingBuffer* buffer = WebRtc_CreateBuffer(1, 1);
EXPECT_TRUE(buffer != NULL);
WebRtc_FreeBuffer(buffer);
}
} // namespace webrtc