Moving LappedTransform, Blocker and AudioRingBuffer.

LappedTransform is only used in BandwidthAdaptationTest and therefore it
should not be anymore a visible target under common_audio.
This CL moves LappedTransform and other two classes it depends on (and which
are not used elsewhere) to modules/audio_coding/codecs/opus/test.

Bug: webrtc:9577, webrtc:5298
Change-Id: I1aa8052c2df2b2b150c279c0c9b1001474aed47a
Reviewed-on: https://webrtc-review.googlesource.com/96440
Commit-Queue: Alessio Bazzica <alessiob@webrtc.org>
Reviewed-by: Alex Loiko <aleloi@webrtc.org>
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24509}
This commit is contained in:
Alessio Bazzica
2018-08-31 10:41:37 +02:00
committed by Commit Bot
parent 8a3c166fff
commit d4161a3c9d
14 changed files with 78 additions and 27 deletions

View File

@ -2093,6 +2093,8 @@ if (rtc_include_tests) {
"../../test:rtp_test_utils",
"../../test:test_common",
"../../test:test_support",
"codecs/opus/test",
"codecs/opus/test:test_unittest",
"//testing/gtest",
"//third_party/abseil-cpp/absl/memory",
]

View File

@ -11,8 +11,8 @@
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "common_audio/include/audio_util.h"
#include "common_audio/lapped_transform.h"
#include "common_audio/window_generator.h"
#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
#include "modules/audio_coding/neteq/tools/audio_loop.h"
#include "test/field_trial.h"
#include "test/gtest.h"

View File

@ -0,0 +1,55 @@
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../../../webrtc.gni")
visibility = [
":*",
"../../../:*",
]
if (rtc_include_tests) {
rtc_static_library("test") {
testonly = true
sources = [
"audio_ring_buffer.cc",
"audio_ring_buffer.h",
"blocker.cc",
"blocker.h",
"lapped_transform.cc",
"lapped_transform.h",
]
deps = [
"../../../../../common_audio:common_audio",
"../../../../../common_audio:common_audio_c",
"../../../../../rtc_base:checks",
"../../../../../rtc_base/memory:aligned_array",
]
}
rtc_source_set("test_unittest") {
testonly = true
sources = [
"audio_ring_buffer_unittest.cc",
"blocker_unittest.cc",
"lapped_transform_unittest.cc",
]
deps = [
":test",
"../../../../../common_audio:common_audio",
"../../../../../common_audio:common_audio_c",
"../../../../../rtc_base:rtc_base_approved",
"../../../../../test:test_support",
"//testing/gtest",
]
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
#include "common_audio/ring_buffer.h"
#include "rtc_base/checks.h"
// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
namespace webrtc {
AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
buffers_.reserve(channels);
for (size_t i = 0; i < channels; ++i)
buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
}
AudioRingBuffer::~AudioRingBuffer() {
for (auto* buf : buffers_)
WebRtc_FreeBuffer(buf);
}
void AudioRingBuffer::Write(const float* const* data,
size_t channels,
size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
RTC_CHECK_EQ(written, frames);
}
}
void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
RTC_DCHECK_EQ(buffers_.size(), channels);
for (size_t i = 0; i < channels; ++i) {
const size_t read =
WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
RTC_CHECK_EQ(read, frames);
}
}
size_t AudioRingBuffer::ReadFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_read(buffers_[0]);
}
size_t AudioRingBuffer::WriteFramesAvailable() const {
// All buffers have the same amount available.
return WebRtc_available_write(buffers_[0]);
}
void AudioRingBuffer::MoveReadPositionForward(size_t frames) {
for (auto* buf : buffers_) {
const size_t moved =
static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
void AudioRingBuffer::MoveReadPositionBackward(size_t frames) {
for (auto* buf : buffers_) {
const size_t moved = static_cast<size_t>(
-WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
RTC_CHECK_EQ(moved, frames);
}
}
} // namespace webrtc

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_
#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_
#include <stddef.h>
#include <memory>
#include <vector>
struct RingBuffer;
namespace webrtc {
// A ring buffer tailored for float deinterleaved audio. Any operation that
// cannot be performed as requested will cause a crash (e.g. insufficient data
// in the buffer to fulfill a read request.)
class AudioRingBuffer final {
public:
// Specify the number of channels and maximum number of frames the buffer will
// contain.
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copies |data| to the buffer and advances the write pointer. |channels| must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copies from the buffer to |data| and advances the read pointer. |channels|
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);
size_t ReadFramesAvailable() const;
size_t WriteFramesAvailable() const;
// Moves the read position. The forward version advances the read pointer
// towards the write pointer and the backward verison withdraws the read
// pointer away from the write pointer (i.e. flushing and stuffing the buffer
// respectively.)
void MoveReadPositionForward(size_t frames);
void MoveReadPositionBackward(size_t frames);
private:
// TODO(kwiberg): Use std::vector<std::unique_ptr<RingBuffer>> instead.
std::vector<RingBuffer*> buffers_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_AUDIO_RING_BUFFER_H_

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
#include "common_audio/channel_buffer.h"
#include "test/gtest.h"
namespace webrtc {
class AudioRingBufferTest
: public ::testing::TestWithParam< ::testing::tuple<int, int, int, int> > {
};
void ReadAndWriteTest(const ChannelBuffer<float>& input,
size_t num_write_chunk_frames,
size_t num_read_chunk_frames,
size_t buffer_frames,
ChannelBuffer<float>* output) {
const size_t num_channels = input.num_channels();
const size_t total_frames = input.num_frames();
AudioRingBuffer buf(num_channels, buffer_frames);
std::unique_ptr<float* []> slice(new float*[num_channels]);
size_t input_pos = 0;
size_t output_pos = 0;
while (input_pos + buf.WriteFramesAvailable() < total_frames) {
// Write until the buffer is as full as possible.
while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
buf.Write(input.Slice(slice.get(), input_pos), num_channels,
num_write_chunk_frames);
input_pos += num_write_chunk_frames;
}
// Read until the buffer is as empty as possible.
while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
EXPECT_LT(output_pos, total_frames);
buf.Read(output->Slice(slice.get(), output_pos), num_channels,
num_read_chunk_frames);
output_pos += num_read_chunk_frames;
}
}
// Write and read the last bit.
if (input_pos < total_frames) {
buf.Write(input.Slice(slice.get(), input_pos), num_channels,
total_frames - input_pos);
}
if (buf.ReadFramesAvailable()) {
buf.Read(output->Slice(slice.get(), output_pos), num_channels,
buf.ReadFramesAvailable());
}
EXPECT_EQ(0u, buf.ReadFramesAvailable());
}
TEST_P(AudioRingBufferTest, ReadDataMatchesWrittenData) {
const size_t kFrames = 5000;
const size_t num_channels = ::testing::get<3>(GetParam());
// Initialize the input data to an increasing sequence.
ChannelBuffer<float> input(kFrames, static_cast<int>(num_channels));
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
input.channels()[i][j] = (i + 1) * (j + 1);
ChannelBuffer<float> output(kFrames, static_cast<int>(num_channels));
ReadAndWriteTest(input, ::testing::get<0>(GetParam()),
::testing::get<1>(GetParam()), ::testing::get<2>(GetParam()),
&output);
// Verify the read data matches the input.
for (size_t i = 0; i < num_channels; ++i)
for (size_t j = 0; j < kFrames; ++j)
EXPECT_EQ(input.channels()[i][j], output.channels()[i][j]);
}
INSTANTIATE_TEST_CASE_P(
AudioRingBufferTest,
AudioRingBufferTest,
::testing::Combine(::testing::Values(10, 20, 42), // num_write_chunk_frames
::testing::Values(1, 10, 17), // num_read_chunk_frames
::testing::Values(100, 256), // buffer_frames
::testing::Values(1, 4))); // num_channels
TEST_F(AudioRingBufferTest, MoveReadPosition) {
const size_t kNumChannels = 1;
const float kInputArray[] = {1, 2, 3, 4};
const size_t kNumFrames = sizeof(kInputArray) / sizeof(*kInputArray);
ChannelBuffer<float> input(kNumFrames, kNumChannels);
input.SetDataForTesting(kInputArray, kNumFrames);
AudioRingBuffer buf(kNumChannels, kNumFrames);
buf.Write(input.channels(), kNumChannels, kNumFrames);
buf.MoveReadPositionForward(3);
ChannelBuffer<float> output(1, kNumChannels);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(4, output.channels()[0][0]);
buf.MoveReadPositionBackward(3);
buf.Read(output.channels(), kNumChannels, 1);
EXPECT_EQ(2, output.channels()[0][0]);
}
} // namespace webrtc

View File

@ -0,0 +1,215 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/opus/test/blocker.h"
#include <string.h>
#include "rtc_base/checks.h"
namespace {
// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
void AddFrames(const float* const* a,
size_t a_start_index,
const float* const* b,
int b_start_index,
size_t num_frames,
size_t num_channels,
float* const* result,
size_t result_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
result[i][j + result_start_index] =
a[i][j + a_start_index] + b[i][j + b_start_index];
}
}
}
// Copies |src| into |dst| channel by channel.
void CopyFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
size_t num_channels,
float* const* dst,
size_t dst_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
memcpy(&dst[i][dst_start_index], &src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
// Moves |src| into |dst| channel by channel.
void MoveFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
size_t num_channels,
float* const* dst,
size_t dst_start_index) {
for (size_t i = 0; i < num_channels; ++i) {
memmove(&dst[i][dst_start_index], &src[i][src_start_index],
num_frames * sizeof(dst[i][dst_start_index]));
}
}
void ZeroOut(float* const* buffer,
size_t starting_idx,
size_t num_frames,
size_t num_channels) {
for (size_t i = 0; i < num_channels; ++i) {
memset(&buffer[i][starting_idx], 0,
num_frames * sizeof(buffer[i][starting_idx]));
}
}
// Pointwise multiplies each channel of |frames| with |window|. Results are
// stored in |frames|.
void ApplyWindow(const float* window,
size_t num_frames,
size_t num_channels,
float* const* frames) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
frames[i][j] = frames[i][j] * window[j];
}
}
}
size_t gcd(size_t a, size_t b) {
size_t tmp;
while (b) {
tmp = a;
a = b;
b = tmp % b;
}
return a;
}
} // namespace
namespace webrtc {
Blocker::Blocker(size_t chunk_size,
size_t block_size,
size_t num_input_channels,
size_t num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback)
: chunk_size_(chunk_size),
block_size_(block_size),
num_input_channels_(num_input_channels),
num_output_channels_(num_output_channels),
initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
frame_offset_(0),
input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
input_block_(block_size_, num_input_channels_),
output_block_(block_size_, num_output_channels_),
window_(new float[block_size_]),
shift_amount_(shift_amount),
callback_(callback) {
RTC_CHECK_LE(num_output_channels_, num_input_channels_);
RTC_CHECK_LE(shift_amount_, block_size_);
memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
input_buffer_.MoveReadPositionBackward(initial_delay_);
}
Blocker::~Blocker() = default;
// When block_size < chunk_size the input and output buffers look like this:
//
// delay* chunk_size chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// 1. New input gets read into sections _b_ and _c_ of the input buffer.
// 2. We block starting from frame_offset.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from sections _a_ or _b_ of the input buffer.
// 4. We window the current block, fire the callback for processing, window
// again, and overlap/add to the output buffer.
// 5. We copy sections _a_ and _b_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy section _c_ into
// section _a_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _b_ and _c_.
//
// When block_size > chunk_size the input and output buffers look like this:
//
// chunk_size delay* chunk_size + delay*
// buffer: <-------------|---------------------|---------------|>
// _a_ _b_ _c_
//
// On each call to ProcessChunk():
// The procedure is the same as above, except for:
// 1. New input gets read into section _c_ of the input buffer.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// from section _a_ of the input buffer.
// 5. We copy section _a_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy sections _b_ and _c_
// into section _a_ and _b_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _a_ and _b_.
//
// * delay here refers to inintial_delay_
//
// TODO(claguna): Look at using ring buffers to eliminate some copies.
void Blocker::ProcessChunk(const float* const* input,
size_t chunk_size,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) {
RTC_CHECK_EQ(chunk_size, chunk_size_);
RTC_CHECK_EQ(num_input_channels, num_input_channels_);
RTC_CHECK_EQ(num_output_channels, num_output_channels_);
input_buffer_.Write(input, num_input_channels, chunk_size_);
size_t first_frame_in_block = frame_offset_;
// Loop through blocks.
while (first_frame_in_block < chunk_size_) {
input_buffer_.Read(input_block_.channels(), num_input_channels,
block_size_);
input_buffer_.MoveReadPositionBackward(block_size_ - shift_amount_);
ApplyWindow(window_.get(), block_size_, num_input_channels_,
input_block_.channels());
callback_->ProcessBlock(input_block_.channels(), block_size_,
num_input_channels_, num_output_channels_,
output_block_.channels());
ApplyWindow(window_.get(), block_size_, num_output_channels_,
output_block_.channels());
AddFrames(output_buffer_.channels(), first_frame_in_block,
output_block_.channels(), 0, block_size_, num_output_channels_,
output_buffer_.channels(), first_frame_in_block);
first_frame_in_block += shift_amount_;
}
// Copy output buffer to output
CopyFrames(output_buffer_.channels(), 0, chunk_size_, num_output_channels_,
output, 0);
// Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
// to output buffer [0, initial_delay], zero the rest.
MoveFrames(output_buffer_.channels(), chunk_size, initial_delay_,
num_output_channels_, output_buffer_.channels(), 0);
ZeroOut(output_buffer_.channels(), initial_delay_, chunk_size_,
num_output_channels_);
// Calculate new starting frames.
frame_offset_ = first_frame_in_block - chunk_size_;
}
} // namespace webrtc

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_
#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_
#include <memory>
#include "common_audio/channel_buffer.h"
#include "modules/audio_coding/codecs/opus/test/audio_ring_buffer.h"
namespace webrtc {
// The callback function to process audio in the time domain. Input has already
// been windowed, and output will be windowed. The number of input channels
// must be >= the number of output channels.
class BlockerCallback {
public:
virtual ~BlockerCallback() {}
virtual void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) = 0;
};
// The main purpose of Blocker is to abstract away the fact that often we
// receive a different number of audio frames than our transform takes. For
// example, most FFTs work best when the fft-size is a power of 2, but suppose
// we receive 20ms of audio at a sample rate of 48000. That comes to 960 frames
// of audio, which is not a power of 2. Blocker allows us to specify the
// transform and all other necessary processing via the Process() callback
// function without any constraints on the transform-size
// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
// We handle this for the multichannel audio case, allowing for different
// numbers of input and output channels (for example, beamforming takes 2 or
// more input channels and returns 1 output channel). Audio signals are
// represented as deinterleaved floats in the range [-1, 1].
//
// Blocker is responsible for:
// - blocking audio while handling potential discontinuities on the edges
// of chunks
// - windowing blocks before sending them to Process()
// - windowing processed blocks, and overlap-adding them together before
// sending back a processed chunk
//
// To use blocker:
// 1. Impelment a BlockerCallback object |bc|.
// 2. Instantiate a Blocker object |b|, passing in |bc|.
// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
//
// A small amount of delay is added to the first received chunk to deal with
// the difference in chunk/block sizes. This delay is <= chunk_size.
//
// Ownership of window is retained by the caller. That is, Blocker makes a
// copy of window and does not attempt to delete it.
class Blocker {
public:
Blocker(size_t chunk_size,
size_t block_size,
size_t num_input_channels,
size_t num_output_channels,
const float* window,
size_t shift_amount,
BlockerCallback* callback);
~Blocker();
void ProcessChunk(const float* const* input,
size_t chunk_size,
size_t num_input_channels,
size_t num_output_channels,
float* const* output);
size_t initial_delay() const { return initial_delay_; }
private:
const size_t chunk_size_;
const size_t block_size_;
const size_t num_input_channels_;
const size_t num_output_channels_;
// The number of frames of delay to add at the beginning of the first chunk.
const size_t initial_delay_;
// The frame index into the input buffer where the first block should be read
// from. This is necessary because shift_amount_ is not necessarily a
// multiple of chunk_size_, so blocks won't line up at the start of the
// buffer.
size_t frame_offset_;
// Since blocks nearly always overlap, there are certain blocks that require
// frames from the end of one chunk and the beginning of the next chunk. The
// input and output buffers are responsible for saving those frames between
// calls to ProcessChunk().
//
// Both contain |initial delay| + |chunk_size| frames. The input is a fairly
// standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
ChannelBuffer<float> output_buffer_;
// Space for the input block (can't wrap because of windowing).
ChannelBuffer<float> input_block_;
// Space for the output block (can't wrap because of overlap/add).
ChannelBuffer<float> output_block_;
std::unique_ptr<float[]> window_;
// The amount of frames between the start of contiguous blocks. For example,
// |shift_amount_| = |block_size_| / 2 for a Hann window.
size_t shift_amount_;
BlockerCallback* callback_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_BLOCKER_H_

View File

@ -0,0 +1,293 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "modules/audio_coding/codecs/opus/test/blocker.h"
#include "rtc_base/arraysize.h"
#include "test/gtest.h"
namespace {
// Callback Function to add 3 to every sample in the signal.
class PlusThreeBlockerCallback : public webrtc::BlockerCallback {
public:
void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) override {
for (size_t i = 0; i < num_output_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
output[i][j] = input[i][j] + 3;
}
}
}
};
// No-op Callback Function.
class CopyBlockerCallback : public webrtc::BlockerCallback {
public:
void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) override {
for (size_t i = 0; i < num_output_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
output[i][j] = input[i][j];
}
}
}
};
} // namespace
namespace webrtc {
// Tests blocking with a window that multiplies the signal by 2, a callback
// that adds 3 to each sample in the signal, and different combinations of chunk
// size, block size, and shift amount.
class BlockerTest : public ::testing::Test {
protected:
void RunTest(Blocker* blocker,
size_t chunk_size,
size_t num_frames,
const float* const* input,
float* const* input_chunk,
float* const* output,
float* const* output_chunk,
size_t num_input_channels,
size_t num_output_channels) {
size_t start = 0;
size_t end = chunk_size - 1;
while (end < num_frames) {
CopyTo(input_chunk, 0, start, num_input_channels, chunk_size, input);
blocker->ProcessChunk(input_chunk, chunk_size, num_input_channels,
num_output_channels, output_chunk);
CopyTo(output, start, 0, num_output_channels, chunk_size, output_chunk);
start += chunk_size;
end += chunk_size;
}
}
void ValidateSignalEquality(const float* const* expected,
const float* const* actual,
size_t num_channels,
size_t num_frames) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
}
}
}
void ValidateInitialDelay(const float* const* output,
size_t num_channels,
size_t num_frames,
size_t initial_delay) {
for (size_t i = 0; i < num_channels; ++i) {
for (size_t j = 0; j < num_frames; ++j) {
if (j < initial_delay) {
EXPECT_FLOAT_EQ(output[i][j], 0.f);
} else {
EXPECT_GT(output[i][j], 0.f);
}
}
}
}
static void CopyTo(float* const* dst,
size_t start_index_dst,
size_t start_index_src,
size_t num_channels,
size_t num_frames,
const float* const* src) {
for (size_t i = 0; i < num_channels; ++i) {
memcpy(&dst[i][start_index_dst], &src[i][start_index_src],
num_frames * sizeof(float));
}
}
};
TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 10;
const size_t kBlockSize = 4;
const size_t kChunkSize = 5;
const size_t kShiftAmount = 2;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumInputChannels][kNumFrames] = {
{6, 6, 12, 20, 20, 20, 20, 20, 20, 20},
{6, 6, 12, 28, 28, 28, 28, 28, 28, 28}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumInputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
kWindow, kShiftAmount, &callback);
RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
input_chunk_cb.channels(), actual_output_cb.channels(),
output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(), kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 12;
const size_t kBlockSize = 4;
const size_t kChunkSize = 6;
const size_t kShiftAmount = 3;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
{6, 10, 10, 20, 10, 10, 20, 10, 10, 20, 10, 10},
{6, 14, 14, 28, 14, 14, 28, 14, 14, 28, 14, 14}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
kWindow, kShiftAmount, &callback);
RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
input_chunk_cb.channels(), actual_output_cb.channels(),
output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(), kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, TestBlockerNoOverlap) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 12;
const size_t kBlockSize = 4;
const size_t kChunkSize = 4;
const size_t kShiftAmount = 4;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
{14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}};
ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
expected_output_cb.SetDataForTesting(
kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
PlusThreeBlockerCallback callback;
Blocker blocker(kChunkSize, kBlockSize, kNumInputChannels, kNumOutputChannels,
kWindow, kShiftAmount, &callback);
RunTest(&blocker, kChunkSize, kNumFrames, input_cb.channels(),
input_chunk_cb.channels(), actual_output_cb.channels(),
output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
ValidateSignalEquality(expected_output_cb.channels(),
actual_output_cb.channels(), kNumOutputChannels,
kNumFrames);
}
TEST_F(BlockerTest, InitialDelaysAreMinimum) {
const size_t kNumInputChannels = 3;
const size_t kNumOutputChannels = 2;
const size_t kNumFrames = 1280;
const size_t kChunkSize[] = {80, 80, 80, 80, 80, 80,
160, 160, 160, 160, 160, 160};
const size_t kBlockSize[] = {64, 64, 64, 128, 128, 128,
128, 128, 128, 256, 256, 256};
const size_t kShiftAmount[] = {16, 32, 64, 32, 64, 128,
32, 64, 128, 64, 128, 256};
const size_t kInitialDelay[] = {48, 48, 48, 112, 112, 112,
96, 96, 96, 224, 224, 224};
float input[kNumInputChannels][kNumFrames];
for (size_t i = 0; i < kNumInputChannels; ++i) {
for (size_t j = 0; j < kNumFrames; ++j) {
input[i][j] = i + 1;
}
}
ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
input_cb.SetDataForTesting(input[0], sizeof(input) / sizeof(**input));
ChannelBuffer<float> output_cb(kNumFrames, kNumOutputChannels);
CopyBlockerCallback callback;
for (size_t i = 0; i < arraysize(kChunkSize); ++i) {
std::unique_ptr<float[]> window(new float[kBlockSize[i]]);
for (size_t j = 0; j < kBlockSize[i]; ++j) {
window[j] = 1.f;
}
ChannelBuffer<float> input_chunk_cb(kChunkSize[i], kNumInputChannels);
ChannelBuffer<float> output_chunk_cb(kChunkSize[i], kNumOutputChannels);
Blocker blocker(kChunkSize[i], kBlockSize[i], kNumInputChannels,
kNumOutputChannels, window.get(), kShiftAmount[i],
&callback);
RunTest(&blocker, kChunkSize[i], kNumFrames, input_cb.channels(),
input_chunk_cb.channels(), output_cb.channels(),
output_chunk_cb.channels(), kNumInputChannels, kNumOutputChannels);
ValidateInitialDelay(output_cb.channels(), kNumOutputChannels, kNumFrames,
kInitialDelay[i]);
}
}
} // namespace webrtc

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include "common_audio/real_fourier.h"
#include "rtc_base/checks.h"
namespace webrtc {
void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) {
RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
RTC_CHECK_EQ(parent_->block_length_, num_frames);
for (size_t i = 0; i < num_input_channels; ++i) {
memcpy(parent_->real_buf_.Row(i), input[i], num_frames * sizeof(*input[0]));
parent_->fft_->Forward(parent_->real_buf_.Row(i),
parent_->cplx_pre_.Row(i));
}
size_t block_length =
RealFourier::ComplexLength(RealFourier::FftOrder(num_frames));
RTC_CHECK_EQ(parent_->cplx_length_, block_length);
parent_->block_processor_->ProcessAudioBlock(
parent_->cplx_pre_.Array(), num_input_channels, parent_->cplx_length_,
num_output_channels, parent_->cplx_post_.Array());
for (size_t i = 0; i < num_output_channels; ++i) {
parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
parent_->real_buf_.Row(i));
memcpy(output[i], parent_->real_buf_.Row(i),
num_frames * sizeof(*input[0]));
}
}
LappedTransform::LappedTransform(size_t num_in_channels,
size_t num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback)
: blocker_callback_(this),
num_in_channels_(num_in_channels),
num_out_channels_(num_out_channels),
block_length_(block_length),
chunk_length_(chunk_length),
block_processor_(callback),
blocker_(chunk_length_,
block_length_,
num_in_channels_,
num_out_channels_,
window,
shift_amount,
&blocker_callback_),
fft_(RealFourier::Create(RealFourier::FftOrder(block_length_))),
cplx_length_(RealFourier::ComplexLength(fft_->order())),
real_buf_(num_in_channels,
block_length_,
RealFourier::kFftBufferAlignment),
cplx_pre_(num_in_channels,
cplx_length_,
RealFourier::kFftBufferAlignment),
cplx_post_(num_out_channels,
cplx_length_,
RealFourier::kFftBufferAlignment) {
RTC_CHECK(num_in_channels_ > 0);
RTC_CHECK_GT(block_length_, 0);
RTC_CHECK_GT(chunk_length_, 0);
RTC_CHECK(block_processor_);
// block_length_ power of 2?
RTC_CHECK_EQ(0, block_length_ & (block_length_ - 1));
}
LappedTransform::~LappedTransform() = default;
void LappedTransform::ProcessChunk(const float* const* in_chunk,
float* const* out_chunk) {
blocker_.ProcessChunk(in_chunk, chunk_length_, num_in_channels_,
num_out_channels_, out_chunk);
}
} // namespace webrtc

View File

@ -0,0 +1,132 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_
#define MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_
#include <complex>
#include <memory>
#include "common_audio/real_fourier.h"
#include "modules/audio_coding/codecs/opus/test/blocker.h"
#include "rtc_base/memory/aligned_array.h"
namespace webrtc {
// Helper class for audio processing modules which operate on frequency domain
// input derived from the windowed time domain audio stream.
//
// The input audio chunk is sliced into possibly overlapping blocks, multiplied
// by a window and transformed with an FFT implementation. The transformed data
// is supplied to the given callback for processing. The processed output is
// then inverse transformed into the time domain and spliced back into a chunk
// which constitutes the final output of this processing module.
class LappedTransform {
public:
class Callback {
public:
virtual ~Callback() {}
virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
size_t num_in_channels,
size_t frames,
size_t num_out_channels,
std::complex<float>* const* out_block) = 0;
};
// Construct a transform instance. |chunk_length| is the number of samples in
// each channel. |window| defines the window, owned by the caller (a copy is
// made internally); |window| should have length equal to |block_length|.
// |block_length| defines the length of a block, in samples.
// |shift_amount| is in samples. |callback| is the caller-owned audio
// processing function called for each block of the input chunk.
LappedTransform(size_t num_in_channels,
size_t num_out_channels,
size_t chunk_length,
const float* window,
size_t block_length,
size_t shift_amount,
Callback* callback);
~LappedTransform();
// Main audio processing helper method. Internally slices |in_chunk| into
// blocks, transforms them to frequency domain, calls the callback for each
// block and returns a de-blocked time domain chunk of audio through
// |out_chunk|. Both buffers are caller-owned.
void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
// Get the chunk length.
//
// The chunk length is the number of samples per channel that must be passed
// to ProcessChunk via the parameter in_chunk.
//
// Returns the same chunk_length passed to the LappedTransform constructor.
size_t chunk_length() const { return chunk_length_; }
// Get the number of input channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// in_chunk.
//
// Returns the same num_in_channels passed to the LappedTransform constructor.
size_t num_in_channels() const { return num_in_channels_; }
// Get the number of output channels.
//
// This is the number of arrays that must be passed to ProcessChunk via
// out_chunk.
//
// Returns the same num_out_channels passed to the LappedTransform
// constructor.
size_t num_out_channels() const { return num_out_channels_; }
// Returns the initial delay.
//
// This is the delay introduced by the |blocker_| to be able to get and return
// chunks of |chunk_length|, but process blocks of |block_length|.
size_t initial_delay() const { return blocker_.initial_delay(); }
private:
// Internal middleware callback, given to the blocker. Transforms each block
// and hands it over to the processing method given at construction time.
class BlockThunk : public BlockerCallback {
public:
explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
void ProcessBlock(const float* const* input,
size_t num_frames,
size_t num_input_channels,
size_t num_output_channels,
float* const* output) override;
private:
LappedTransform* const parent_;
} blocker_callback_;
const size_t num_in_channels_;
const size_t num_out_channels_;
const size_t block_length_;
const size_t chunk_length_;
Callback* const block_processor_;
Blocker blocker_;
// TODO(alessiob): Replace RealFourier with a different FFT library.
std::unique_ptr<RealFourier> fft_;
const size_t cplx_length_;
AlignedArray<float> real_buf_;
AlignedArray<std::complex<float> > cplx_pre_;
AlignedArray<std::complex<float> > cplx_post_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_OPUS_TEST_LAPPED_TRANSFORM_H_

View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/opus/test/lapped_transform.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include "test/gtest.h"
using std::complex;
namespace {
class NoopCallback : public webrtc::LappedTransform::Callback {
public:
NoopCallback() : block_num_(0) {}
void ProcessAudioBlock(const complex<float>* const* in_block,
size_t in_channels,
size_t frames,
size_t out_channels,
complex<float>* const* out_block) override {
RTC_CHECK_EQ(in_channels, out_channels);
for (size_t i = 0; i < out_channels; ++i) {
memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
}
++block_num_;
}
size_t block_num() { return block_num_; }
private:
size_t block_num_;
};
class FftCheckerCallback : public webrtc::LappedTransform::Callback {
public:
FftCheckerCallback() : block_num_(0) {}
void ProcessAudioBlock(const complex<float>* const* in_block,
size_t in_channels,
size_t frames,
size_t out_channels,
complex<float>* const* out_block) override {
RTC_CHECK_EQ(in_channels, out_channels);
size_t full_length = (frames - 1) * 2;
++block_num_;
if (block_num_ > 0) {
ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
1e-5f);
ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
for (size_t i = 1; i < frames; ++i) {
ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
ASSERT_NEAR(in_block[0][i].imag(), 0.0f, 1e-5f);
}
}
}
size_t block_num() { return block_num_; }
private:
size_t block_num_;
};
void SetFloatArray(float value, int rows, int cols, float* const* array) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
array[i][j] = value;
}
}
}
} // namespace
namespace webrtc {
TEST(LappedTransformTest, Windowless) {
const size_t kChannels = 3;
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
const size_t kShiftAmount = 64;
NoopCallback noop;
// Rectangular window.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], 1.0f);
LappedTransform trans(kChannels, kChannels, kChunkLength, window,
kBlockLength, kShiftAmount, &noop);
float in_buffer[kChannels][kChunkLength];
float* in_chunk[kChannels];
float out_buffer[kChannels][kChunkLength];
float* out_chunk[kChannels];
in_chunk[0] = in_buffer[0];
in_chunk[1] = in_buffer[1];
in_chunk[2] = in_buffer[2];
out_chunk[0] = out_buffer[0];
out_chunk[1] = out_buffer[1];
out_chunk[2] = out_buffer[2];
SetFloatArray(2.0f, kChannels, kChunkLength, in_chunk);
SetFloatArray(-1.0f, kChannels, kChunkLength, out_chunk);
trans.ProcessChunk(in_chunk, out_chunk);
for (size_t i = 0; i < kChannels; ++i) {
for (size_t j = 0; j < kChunkLength; ++j) {
ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
}
}
ASSERT_EQ(kChunkLength / kBlockLength, noop.block_num());
}
TEST(LappedTransformTest, IdentityProcessor) {
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
const size_t kShiftAmount = 32;
NoopCallback noop;
// Identity window for |overlap = block_size / 2|.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], std::sqrt(0.5f));
LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kShiftAmount,
&noop);
float in_buffer[kChunkLength];
float* in_chunk = in_buffer;
float out_buffer[kChunkLength];
float* out_chunk = out_buffer;
SetFloatArray(2.0f, 1, kChunkLength, &in_chunk);
SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
trans.ProcessChunk(&in_chunk, &out_chunk);
for (size_t i = 0; i < kChunkLength; ++i) {
ASSERT_NEAR(out_chunk[i], (i < kBlockLength - kShiftAmount) ? 0.0f : 2.0f,
1e-5f);
}
ASSERT_EQ(kChunkLength / kShiftAmount, noop.block_num());
}
TEST(LappedTransformTest, Callbacks) {
const size_t kChunkLength = 512;
const size_t kBlockLength = 64;
FftCheckerCallback call;
// Rectangular window.
float window[kBlockLength];
std::fill(window, &window[kBlockLength], 1.0f);
LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kBlockLength,
&call);
float in_buffer[kChunkLength];
float* in_chunk = in_buffer;
float out_buffer[kChunkLength];
float* out_chunk = out_buffer;
SetFloatArray(1.0f, 1, kChunkLength, &in_chunk);
SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
trans.ProcessChunk(&in_chunk, &out_chunk);
ASSERT_EQ(kChunkLength / kBlockLength, call.block_num());
}
TEST(LappedTransformTest, chunk_length) {
const size_t kBlockLength = 64;
FftCheckerCallback call;
const float window[kBlockLength] = {};
// Make sure that chunk_length returns the same value passed to the
// LappedTransform constructor.
{
const size_t kExpectedChunkLength = 512;
const LappedTransform trans(1, 1, kExpectedChunkLength, window,
kBlockLength, kBlockLength, &call);
EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
}
{
const size_t kExpectedChunkLength = 160;
const LappedTransform trans(1, 1, kExpectedChunkLength, window,
kBlockLength, kBlockLength, &call);
EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
}
}
} // namespace webrtc