Add an AudioFrameOperations unittest.

Additionally, reformat audio_frame_operations to Goog style.

BUG=issue451
TEST=voice_engine_unittests

Review URL: https://webrtc-codereview.appspot.com/528001

git-svn-id: http://webrtc.googlecode.com/svn/trunk@2133 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2012-04-26 22:32:03 +00:00
parent e49d908baf
commit 9c4f6a5ff9
4 changed files with 292 additions and 113 deletions

View File

@ -12,53 +12,43 @@
#include "module_common_types.h"
namespace webrtc {
namespace voe {
WebRtc_Word32
AudioFrameOperations::MonoToStereo(AudioFrame& audioFrame)
{
if (audioFrame._audioChannel != 1)
{
int AudioFrameOperations::MonoToStereo(AudioFrame& frame) {
if (frame._audioChannel != 1) {
return -1;
}
if ((audioFrame._payloadDataLengthInSamples << 1) >=
AudioFrame::kMaxAudioFrameSizeSamples)
{
if ((frame._payloadDataLengthInSamples << 1) >=
AudioFrame::kMaxAudioFrameSizeSamples) {
// not enough memory to expand from mono to stereo
return -1;
}
int16_t payloadCopy[AudioFrame::kMaxAudioFrameSizeSamples];
memcpy(payloadCopy, audioFrame._payloadData,
sizeof(int16_t) * audioFrame._payloadDataLengthInSamples);
memcpy(payloadCopy, frame._payloadData,
sizeof(int16_t) * frame._payloadDataLengthInSamples);
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[2*i] = payloadCopy[i];
audioFrame._payloadData[2*i+1] = payloadCopy[i];
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
frame._payloadData[2 * i] = payloadCopy[i];
frame._payloadData[2 * i + 1] = payloadCopy[i];
}
audioFrame._audioChannel = 2;
frame._audioChannel = 2;
return 0;
}
WebRtc_Word32
AudioFrameOperations::StereoToMono(AudioFrame& audioFrame)
{
if (audioFrame._audioChannel != 2)
{
int AudioFrameOperations::StereoToMono(AudioFrame& frame) {
if (frame._audioChannel != 2) {
return -1;
}
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[i] = (audioFrame._payloadData[2*i] >> 1) +
(audioFrame._payloadData[2*i+1] >> 1);
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
frame._payloadData[i] = (frame._payloadData[2 * i] >> 1) +
(frame._payloadData[2 * i + 1] >> 1);
}
audioFrame._audioChannel = 1;
frame._audioChannel = 1;
return 0;
}
@ -73,65 +63,44 @@ void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
}
}
WebRtc_Word32
AudioFrameOperations::Mute(AudioFrame& audioFrame)
{
const int sizeInBytes = sizeof(WebRtc_Word16) *
audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
memset(audioFrame._payloadData, 0, sizeInBytes);
audioFrame._energy = 0;
return 0;
void AudioFrameOperations::Mute(AudioFrame& frame) {
memset(frame._payloadData, 0, sizeof(int16_t) *
frame._payloadDataLengthInSamples * frame._audioChannel);
frame._energy = 0;
}
WebRtc_Word32
AudioFrameOperations::Scale(const float left,
const float right,
AudioFrame& audioFrame)
{
if (audioFrame._audioChannel == 1)
{
assert(false);
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
if (frame._audioChannel != 2) {
return -1;
}
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[2*i] =
(WebRtc_Word16)(left*audioFrame._payloadData[2*i]);
audioFrame._payloadData[2*i+1] =
(WebRtc_Word16)(right*audioFrame._payloadData[2*i+1]);
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
frame._payloadData[2 * i] =
static_cast<int16_t>(left * frame._payloadData[2 * i]);
frame._payloadData[2 * i + 1] =
static_cast<int16_t>(right * frame._payloadData[2 * i + 1]);
}
return 0;
}
WebRtc_Word32
AudioFrameOperations::ScaleWithSat(const float scale, AudioFrame& audioFrame)
{
WebRtc_Word32 tmp(0);
int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
int32_t temp_data = 0;
// Ensure that the output result is saturated [-32768, +32768].
for (int i = 0;
i < audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
i++)
{
tmp = static_cast<WebRtc_Word32> (scale * audioFrame._payloadData[i]);
if (tmp < -32768)
{
audioFrame._payloadData[i] = -32768;
}
else if (tmp > 32767)
{
audioFrame._payloadData[i] = 32767;
}
else
{
audioFrame._payloadData[i] = static_cast<WebRtc_Word16> (tmp);
// Ensure that the output result is saturated [-32768, +32767].
for (int i = 0; i < frame._payloadDataLengthInSamples * frame._audioChannel;
i++) {
temp_data = static_cast<int32_t>(scale * frame._payloadData[i]);
if (temp_data < -32768) {
frame._payloadData[i] = -32768;
} else if (temp_data > 32767) {
frame._payloadData[i] = 32767;
} else {
frame._payloadData[i] = static_cast<int16_t>(temp_data);
}
}
return 0;
}
} // namespace voe
} // namespace webrtc

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
#define WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
#ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_
#define WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_
#include "typedefs.h"
@ -19,29 +19,26 @@ class AudioFrame;
namespace voe {
class AudioFrameOperations
{
public:
static WebRtc_Word32 MonoToStereo(AudioFrame& audioFrame);
// TODO(andrew): unify this with utility.h. Change reference parameters to
// pointers.
class AudioFrameOperations {
public:
static int MonoToStereo(AudioFrame& frame);
static WebRtc_Word32 StereoToMono(AudioFrame& audioFrame);
static int StereoToMono(AudioFrame& frame);
// Swap the left and right channels of |frame|. Fails silently if |frame|
// is not stereo.
// Swap the left and right channels of |frame|. Fails silently if |frame| is
// not stereo.
static void SwapStereoChannels(AudioFrame* frame);
static WebRtc_Word32 Mute(AudioFrame& audioFrame);
static void Mute(AudioFrame& frame);
static WebRtc_Word32 Scale(const float left,
const float right,
AudioFrame& audioFrame);
static int Scale(float left, float right, AudioFrame& frame);
static WebRtc_Word32 ScaleWithSat(const float scale,
AudioFrame& audioFrame);
static int ScaleWithSat(float scale, AudioFrame& frame);
};
} // namespace voe
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
#endif // #ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_

View File

@ -0,0 +1,212 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include "audio_frame_operations.h"
#include "module_common_types.h"
namespace webrtc {
namespace voe {
namespace {
class AudioFrameOperationsTest : public ::testing::Test {
protected:
AudioFrameOperationsTest() {
// Set typical values.
frame_._payloadDataLengthInSamples = 320;
frame_._audioChannel = 2;
}
AudioFrame frame_;
};
void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
frame->_payloadData[i] = left;
frame->_payloadData[i + 1] = right;
}
}
void SetFrameData(AudioFrame* frame, int16_t data) {
for (int i = 0; i < frame->_payloadDataLengthInSamples; i++) {
frame->_payloadData[i] = data;
}
}
void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
EXPECT_EQ(frame1._audioChannel, frame2._audioChannel);
EXPECT_EQ(frame1._payloadDataLengthInSamples,
frame2._payloadDataLengthInSamples);
for (int i = 0; i < frame1._payloadDataLengthInSamples * frame1._audioChannel;
i++) {
EXPECT_EQ(frame1._payloadData[i], frame2._payloadData[i]);
}
}
TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
frame_._payloadDataLengthInSamples = AudioFrame::kMaxAudioFrameSizeSamples;
frame_._audioChannel = 1;
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
}
TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
frame_._audioChannel = 1;
SetFrameData(&frame_, 1);
EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(frame_));
AudioFrame stereo_frame;
stereo_frame._payloadDataLengthInSamples = 320;
stereo_frame._audioChannel = 2;
SetFrameData(&stereo_frame, 1, 1);
VerifyFramesAreEqual(stereo_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
frame_._audioChannel = 1;
EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(frame_));
}
TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
SetFrameData(&frame_, 4, 2);
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
AudioFrame mono_frame;
mono_frame._payloadDataLengthInSamples = 320;
mono_frame._audioChannel = 1;
SetFrameData(&mono_frame, 3);
VerifyFramesAreEqual(mono_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
SetFrameData(&frame_, -32768, -32768);
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
AudioFrame mono_frame;
mono_frame._payloadDataLengthInSamples = 320;
mono_frame._audioChannel = 1;
SetFrameData(&mono_frame, -32768);
VerifyFramesAreEqual(mono_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
SetFrameData(&frame_, 0, 1);
AudioFrame swapped_frame;
swapped_frame._payloadDataLengthInSamples = 320;
swapped_frame._audioChannel = 2;
SetFrameData(&swapped_frame, 1, 0);
AudioFrameOperations::SwapStereoChannels(&frame_);
VerifyFramesAreEqual(swapped_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
frame_._audioChannel = 1;
// Set data to "stereo", despite it being a mono frame.
SetFrameData(&frame_, 0, 1);
AudioFrame orig_frame = frame_;
AudioFrameOperations::SwapStereoChannels(&frame_);
// Verify that no swap occurred.
VerifyFramesAreEqual(orig_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
SetFrameData(&frame_, 1000, 1000);
frame_._energy = 1000 * 1000 * frame_._payloadDataLengthInSamples *
frame_._audioChannel;
AudioFrameOperations::Mute(frame_);
AudioFrame muted_frame;
muted_frame._payloadDataLengthInSamples = 320;
muted_frame._audioChannel = 2;
SetFrameData(&muted_frame, 0, 0);
muted_frame._energy = 0;
VerifyFramesAreEqual(muted_frame, frame_);
EXPECT_EQ(muted_frame._energy, frame_._energy);
}
// TODO(andrew): should not allow negative scales.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
frame_._audioChannel = 1;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
frame_._audioChannel = 3;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
frame_._audioChannel = 2;
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, frame_));
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, frame_));
}
// TODO(andrew): fix the wraparound bug. We should always saturate.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
SetFrameData(&frame_, 4000, -4000);
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, frame_));
AudioFrame clipped_frame;
clipped_frame._payloadDataLengthInSamples = 320;
clipped_frame._audioChannel = 2;
SetFrameData(&clipped_frame, 32767, -32768);
VerifyFramesAreEqual(clipped_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
SetFrameData(&frame_, 1, -1);
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, frame_));
AudioFrame scaled_frame;
scaled_frame._payloadDataLengthInSamples = 320;
scaled_frame._audioChannel = 2;
SetFrameData(&scaled_frame, 2, -3);
VerifyFramesAreEqual(scaled_frame, frame_);
}
// TODO(andrew): should fail with a negative scale.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, frame_));
}
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
frame_._audioChannel = 1;
SetFrameData(&frame_, 4000);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_));
AudioFrame clipped_frame;
clipped_frame._payloadDataLengthInSamples = 320;
clipped_frame._audioChannel = 1;
SetFrameData(&clipped_frame, 32767);
VerifyFramesAreEqual(clipped_frame, frame_);
SetFrameData(&frame_, -4000);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_));
SetFrameData(&clipped_frame, -32768);
VerifyFramesAreEqual(clipped_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
frame_._audioChannel = 1;
SetFrameData(&frame_, 1);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, frame_));
AudioFrame scaled_frame;
scaled_frame._payloadDataLengthInSamples = 320;
scaled_frame._audioChannel = 1;
SetFrameData(&scaled_frame, 2);
VerifyFramesAreEqual(scaled_frame, frame_);
}
} // namespace
} // namespace voe
} // namespace webrtc

View File

@ -122,25 +122,26 @@
'type': 'executable',
'dependencies': [
'voice_engine_core',
'<(webrtc_root)/../testing/gtest.gyp:gtest',
'<(webrtc_root)/../test/test.gyp:test_support_main',
# The rest are to satisfy the channel_unittest include chain.
# This would be unnecessary if we had qualified includes.
'<(webrtc_root)/common_audio/common_audio.gyp:resampler',
'<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
'<(webrtc_root)/modules/modules.gyp:audio_coding_module',
'<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
'<(webrtc_root)/modules/modules.gyp:audio_device',
'<(webrtc_root)/modules/modules.gyp:audio_processing',
'<(webrtc_root)/modules/modules.gyp:audio_coding_module',
'<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
'<(webrtc_root)/modules/modules.gyp:media_file',
'<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
'<(webrtc_root)/modules/modules.gyp:udp_transport',
'<(webrtc_root)/modules/modules.gyp:webrtc_utility',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/../test/test.gyp:test_support_main',
'<(webrtc_root)/../testing/gtest.gyp:gtest',
],
'include_dirs': [
'../../..',
'../interface',
],
'sources': [
'audio_frame_operations_unittest.cc',
'channel_unittest.cc',
],
},