Files
platform-external-webrtc/webrtc/common_audio/audio_util_unittest.cc
andrew@webrtc.org 17e40641b3 Add a deinterleaved float interface to AudioProcessing.
This is mainly to support the native audio format in Chrome. Although
this implementation just moves the float->int conversion under the hood,
we will transition AudioProcessing towards supporting this format
throughout.

- Add a test which verifies we get identical output with the float and
int interfaces.
- The float and int wrappers are tasked with conversion to the
AudioBuffer format. A new shared Process/Analyze method does most of
the work.
- Add a new field to the debug.proto to hold deinterleaved data.
- Add helpers to audio_utils.cc, and start using numeric_limits.
- Note that there was no performance difference between numeric_limits
and a literal value when measured on Linux using gcc or clang.

BUG=2894
R=aluebs@webrtc.org, bjornv@webrtc.org, henrikg@webrtc.org, tommi@webrtc.org, turaj@webrtc.org, xians@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/9179004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5641 4adac7df-926f-26a2-2b94-8c16560cd09d
2014-03-04 20:58:13 +00:00

94 lines
3.3 KiB
C++

/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/typedefs.h"
namespace webrtc {
void ExpectArraysEq(const int16_t* ref, const int16_t* test, int length) {
for (int i = 0; i < length; ++i) {
EXPECT_EQ(ref[i], test[i]);
}
}
void ExpectArraysEq(const float* ref, const float* test, int length) {
for (int i = 0; i < length; ++i) {
EXPECT_FLOAT_EQ(ref[i], test[i]);
}
}
TEST(AudioUtilTest, RoundToInt16) {
const int kSize = 7;
const float kInput[kSize] = {
0.f, 0.4f, 0.5f, -0.4f, -0.5f, 32768.f, -32769.f};
const int16_t kReference[kSize] = {0, 0, 1, 0, -1, 32767, -32768};
int16_t output[kSize];
RoundToInt16(kInput, kSize, output);
ExpectArraysEq(kReference, output, kSize);
}
TEST(AudioUtilTest, ScaleAndRoundToInt16) {
const int kSize = 9;
const float kInput[kSize] = {
0.f, 0.4f / 32767.f, 0.6f / 32767.f, -0.4f / 32768.f, -0.6f / 32768.f,
1.f, -1.f, 1.1f, -1.1f};
const int16_t kReference[kSize] = {
0, 0, 1, 0, -1, 32767, -32768, 32767, -32768};
int16_t output[kSize];
ScaleAndRoundToInt16(kInput, kSize, output);
ExpectArraysEq(kReference, output, kSize);
}
TEST(AudioUtilTest, ScaleToFloat) {
const int kSize = 7;
const int16_t kInput[kSize] = {0, 1, -1, 16384, -16384, 32767, -32768};
const float kReference[kSize] = {
0.f, 1.f / 32767.f, -1.f / 32768.f, 16384.f / 32767.f, -0.5f, 1.f, -1.f};
float output[kSize];
ScaleToFloat(kInput, kSize, output);
ExpectArraysEq(kReference, output, kSize);
}
TEST(AudioUtilTest, InterleavingStereo) {
const int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81};
const int kSamplesPerChannel = 4;
const int kNumChannels = 2;
const int kLength = kSamplesPerChannel * kNumChannels;
int16_t left[kSamplesPerChannel], right[kSamplesPerChannel];
int16_t* deinterleaved[] = {left, right};
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
const int16_t kRefLeft[] = {2, 4, 8, 16};
const int16_t kRefRight[] = {3, 9, 27, 81};
ExpectArraysEq(kRefLeft, left, kSamplesPerChannel);
ExpectArraysEq(kRefRight, right, kSamplesPerChannel);
int16_t interleaved[kLength];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
ExpectArraysEq(kInterleaved, interleaved, kLength);
}
TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
const int16_t kInterleaved[] = {1, 2, 3, 4, 5};
const int kSamplesPerChannel = 5;
const int kNumChannels = 1;
int16_t mono[kSamplesPerChannel];
int16_t* deinterleaved[] = {mono};
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
ExpectArraysEq(kInterleaved, mono, kSamplesPerChannel);
int16_t interleaved[kSamplesPerChannel];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
ExpectArraysEq(mono, interleaved, kSamplesPerChannel);
}
} // namespace webrtc