Update the debug recordings to use protobufs.

Also modify the unittest proto based to correspond with the changes. process_test is a bit of a hack job, but it works fine and isn't too unreadable. We should refactor it properly later.
Review URL: http://webrtc-codereview.appspot.com/98007

git-svn-id: http://webrtc.googlecode.com/svn/trunk@296 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
ajm@google.com
2011-08-03 21:08:51 +00:00
parent 320813c2d5
commit 808e0e0dac
12 changed files with 696 additions and 361 deletions

View File

@ -12,6 +12,7 @@
],
'variables': {
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
'protoc_out_relpath': 'webrtc/audio_processing',
},
'targets': [
{
@ -25,7 +26,7 @@
}],
],
'dependencies': [
'apm_unittest_proto',
'unittest_proto',
'source/apm.gyp:audio_processing',
'../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
'../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
@ -39,19 +40,19 @@
],
'sources': [
'test/unit_test/unit_test.cc',
'<(protoc_out_dir)/audio_processing_unittest.pb.cc',
'<(protoc_out_dir)/audio_processing_unittest.pb.h',
'<(protoc_out_dir)/<(protoc_out_relpath)/unittest.pb.cc',
'<(protoc_out_dir)/<(protoc_out_relpath)/unittest.pb.h',
],
},
{
# Protobuf compiler / generate rule for unit_test
'target_name': 'apm_unittest_proto',
'target_name': 'unittest_proto',
'type': 'none',
'variables': {
'proto_relpath': 'test/unit_test',
},
'sources': [
'<(proto_relpath)/audio_processing_unittest.proto',
'<(proto_relpath)/unittest.proto',
],
'rules': [
{
@ -61,14 +62,14 @@
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(protoc_out_dir)/<(RULE_INPUT_ROOT).pb.cc',
'<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.cc',
'<(protoc_out_dir)/<(RULE_INPUT_ROOT).pb.h',
],
'action': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
'--proto_path=<(proto_relpath)',
'<(proto_relpath)/<(RULE_INPUT_NAME)',
'--cpp_out=<(protoc_out_dir)',
'--cpp_out=<(protoc_out_dir)/<(protoc_out_relpath)',
],
'message': 'Generating C++ code from <(RULE_INPUT_PATH)',
},
@ -88,9 +89,11 @@
'../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
'../../../../testing/gtest.gyp:gtest',
'../../../../testing/gtest.gyp:gtest_main',
'../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
],
'include_dirs': [
'../../../../testing/gtest/include',
'<(protoc_out_dir)',
],
'sources': [
'test/process_test/process_test.cc',

View File

@ -15,6 +15,7 @@ LOCAL_MODULE := libwebrtc_apm
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := .cc
LOCAL_SRC_FILES := \
$(call all-proto-files-under, .) \
audio_buffer.cc \
audio_processing_impl.cc \
echo_cancellation_impl.cc \
@ -44,12 +45,14 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/../../../.. \
$(LOCAL_PATH)/../../../../common_audio/signal_processing_library/main/interface \
$(LOCAL_PATH)/../../../../common_audio/vad/main/interface \
$(LOCAL_PATH)/../../../../system_wrappers/interface
$(LOCAL_PATH)/../../../../system_wrappers/interface \
external/protobuf/src
LOCAL_SHARED_LIBRARIES := \
libcutils \
libdl \
libstlport
libstlport \
libprotobuf-cpp-2.3.0-lite
ifndef NDK_ROOT
include external/stlport/libstlport.mk

View File

@ -10,6 +10,10 @@
'includes': [
'../../../../common_settings.gypi',
],
'variables': {
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
'protoc_out_relpath': 'webrtc/audio_processing',
},
'targets': [
{
'target_name': 'audio_processing',
@ -22,8 +26,18 @@
'dependencies': ['../../ns/main/source/ns.gyp:ns'],
'defines': ['WEBRTC_NS_FLOAT'],
}],
['build_with_chromium==1', {
'dependencies': [
'../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
],
}, {
'dependencies': [
'../../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
],
}],
],
'dependencies': [
'debug_proto',
'../../aec/main/source/aec.gyp:aec',
'../../aecm/main/source/aecm.gyp:aecm',
'../../agc/main/source/agc.gyp:agc',
@ -34,6 +48,7 @@
'include_dirs': [
'../interface',
'../../../interface',
'<(protoc_out_dir)',
],
'direct_dependent_settings': {
'include_dirs': [
@ -65,8 +80,55 @@
'processing_component.h',
'voice_detection_impl.cc',
'voice_detection_impl.h',
'<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.cc',
'<(protoc_out_dir)/<(protoc_out_relpath)/debug.pb.h',
],
},
{
# Protobuf compiler / generate rule for audio_processing
'target_name': 'debug_proto',
'type': 'none',
'variables': {
'proto_relpath': '.',
},
'sources': [
'<(proto_relpath)/debug.proto',
],
'rules': [
{
'rule_name': 'genproto',
'extension': 'proto',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.cc',
'<(protoc_out_dir)/<(protoc_out_relpath)/<(RULE_INPUT_ROOT).pb.h',
],
'action': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
'--proto_path=<(proto_relpath)',
'<(proto_relpath)/<(RULE_INPUT_NAME)',
'--cpp_out=<(protoc_out_dir)/<(protoc_out_relpath)',
],
'message': 'Generating C++ code from <(RULE_INPUT_PATH)',
},
],
'conditions': [
['build_with_chromium==1', {
'dependencies': [
'../../../../third_party/protobuf/protobuf.gyp:protoc#host',
],
}, {
'dependencies': [
'../../../../../third_party/protobuf/protobuf.gyp:protoc#host',
],
}],
],
# This target exports a hard dependency because it generates header
# files.
'hard_dependency': 1,
},
],
}

View File

@ -10,36 +10,24 @@
#include "audio_processing_impl.h"
#include <cassert>
#include "module_common_types.h"
#include "critical_section_wrapper.h"
#include "file_wrapper.h"
#include <assert.h>
#include "audio_buffer.h"
#include "critical_section_wrapper.h"
#include "echo_cancellation_impl.h"
#include "echo_control_mobile_impl.h"
#include "file_wrapper.h"
#include "high_pass_filter_impl.h"
#include "gain_control_impl.h"
#include "level_estimator_impl.h"
#include "module_common_types.h"
#include "noise_suppression_impl.h"
#include "processing_component.h"
#include "splitting_filter.h"
#include "voice_detection_impl.h"
#include "webrtc/audio_processing/debug.pb.h"
namespace webrtc {
namespace {
enum Events {
kInitializeEvent,
kRenderEvent,
kCaptureEvent
};
const char kMagicNumber[] = "#!vqetrace1.2";
} // namespace
AudioProcessing* AudioProcessing::Create(int id) {
/*WEBRTC_TRACE(webrtc::kTraceModuleCall,
webrtc::kTraceAudioProcessing,
@ -69,6 +57,7 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
noise_suppression_(NULL),
voice_detection_(NULL),
debug_file_(FileWrapper::Create()),
event_msg_(new audioproc::Event()),
crit_(CriticalSectionWrapper::CreateCriticalSection()),
render_audio_(NULL),
capture_audio_(NULL),
@ -77,9 +66,9 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
samples_per_channel_(sample_rate_hz_ / 100),
stream_delay_ms_(0),
was_stream_delay_set_(false),
num_render_input_channels_(1),
num_capture_input_channels_(1),
num_capture_output_channels_(1) {
num_reverse_channels_(1),
num_input_channels_(1),
num_output_channels_(1) {
echo_cancellation_ = new EchoCancellationImpl(this);
component_list_.push_back(echo_cancellation_);
@ -117,15 +106,18 @@ AudioProcessingImpl::~AudioProcessingImpl() {
delete debug_file_;
debug_file_ = NULL;
delete event_msg_;
event_msg_ = NULL;
delete crit_;
crit_ = NULL;
if (render_audio_ != NULL) {
if (render_audio_) {
delete render_audio_;
render_audio_ = NULL;
}
if (capture_audio_ != NULL) {
if (capture_audio_) {
delete capture_audio_;
capture_audio_ = NULL;
}
@ -155,9 +147,9 @@ int AudioProcessingImpl::InitializeLocked() {
capture_audio_ = NULL;
}
render_audio_ = new AudioBuffer(num_render_input_channels_,
render_audio_ = new AudioBuffer(num_reverse_channels_,
samples_per_channel_);
capture_audio_ = new AudioBuffer(num_capture_input_channels_,
capture_audio_ = new AudioBuffer(num_input_channels_,
samples_per_channel_);
was_stream_delay_set_ = false;
@ -171,6 +163,13 @@ int AudioProcessingImpl::InitializeLocked() {
}
}
if (debug_file_->Open()) {
int err = WriteInitMessage();
if (err != kNoError) {
return err;
}
}
return kNoError;
}
@ -205,13 +204,13 @@ int AudioProcessingImpl::set_num_reverse_channels(int channels) {
return kBadParameterError;
}
num_render_input_channels_ = channels;
num_reverse_channels_ = channels;
return InitializeLocked();
}
int AudioProcessingImpl::num_reverse_channels() const {
return num_render_input_channels_;
return num_reverse_channels_;
}
int AudioProcessingImpl::set_num_channels(
@ -231,18 +230,18 @@ int AudioProcessingImpl::set_num_channels(
return kBadParameterError;
}
num_capture_input_channels_ = input_channels;
num_capture_output_channels_ = output_channels;
num_input_channels_ = input_channels;
num_output_channels_ = output_channels;
return InitializeLocked();
}
int AudioProcessingImpl::num_input_channels() const {
return num_capture_input_channels_;
return num_input_channels_;
}
int AudioProcessingImpl::num_output_channels() const {
return num_capture_output_channels_;
return num_output_channels_;
}
int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
@ -258,7 +257,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kBadSampleRateError;
}
if (frame->_audioChannel != num_capture_input_channels_) {
if (frame->_audioChannel != num_input_channels_) {
return kBadNumberChannelsError;
}
@ -267,44 +266,28 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
}
if (debug_file_->Open()) {
WebRtc_UWord8 event = kCaptureEvent;
if (!debug_file_->Write(&event, sizeof(event))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_frequencyInHz,
sizeof(frame->_frequencyInHz))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_audioChannel,
sizeof(frame->_audioChannel))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
sizeof(frame->_payloadDataLengthInSamples))) {
return kFileError;
}
if (!debug_file_->Write(frame->_payloadData,
sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
frame->_audioChannel)) {
return kFileError;
}
event_msg_->set_type(audioproc::Event::STREAM);
audioproc::Stream* msg = event_msg_->mutable_stream();
const size_t data_size = sizeof(WebRtc_Word16) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_input_data(frame->_payloadData, data_size);
msg->set_delay(stream_delay_ms_);
msg->set_drift(echo_cancellation_->stream_drift_samples());
msg->set_level(gain_control_->stream_analog_level());
}
capture_audio_->DeinterleaveFrom(frame);
// TODO(ajm): experiment with mixing and AEC placement.
if (num_capture_output_channels_ < num_capture_input_channels_) {
capture_audio_->Mix(num_capture_output_channels_);
if (num_output_channels_ < num_input_channels_) {
capture_audio_->Mix(num_output_channels_);
frame->_audioChannel = num_capture_output_channels_;
frame->_audioChannel = num_output_channels_;
}
if (sample_rate_hz_ == kSampleRate32kHz) {
for (int i = 0; i < num_capture_input_channels_; i++) {
for (int i = 0; i < num_input_channels_; i++) {
// Split into a low and high band.
SplittingFilterAnalysis(capture_audio_->data(i),
capture_audio_->low_pass_split_data(i),
@ -360,7 +343,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
//}
if (sample_rate_hz_ == kSampleRate32kHz) {
for (int i = 0; i < num_capture_output_channels_; i++) {
for (int i = 0; i < num_output_channels_; i++) {
// Recombine low and high bands.
SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
capture_audio_->high_pass_split_data(i),
@ -372,6 +355,18 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
capture_audio_->InterleaveTo(frame);
if (debug_file_->Open()) {
audioproc::Stream* msg = event_msg_->mutable_stream();
const size_t data_size = sizeof(WebRtc_Word16) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_output_data(frame->_payloadData, data_size);
err = WriteMessageToDebugFile();
if (err != kNoError) {
return err;
}
}
return kNoError;
}
@ -388,7 +383,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return kBadSampleRateError;
}
if (frame->_audioChannel != num_render_input_channels_) {
if (frame->_audioChannel != num_reverse_channels_) {
return kBadNumberChannelsError;
}
@ -397,30 +392,15 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
}
if (debug_file_->Open()) {
WebRtc_UWord8 event = kRenderEvent;
if (!debug_file_->Write(&event, sizeof(event))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_frequencyInHz,
sizeof(frame->_frequencyInHz))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_audioChannel,
sizeof(frame->_audioChannel))) {
return kFileError;
}
if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
sizeof(frame->_payloadDataLengthInSamples))) {
return kFileError;
}
if (!debug_file_->Write(frame->_payloadData,
sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
frame->_audioChannel)) {
return kFileError;
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
const size_t data_size = sizeof(WebRtc_Word16) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_data(frame->_payloadData, data_size);
err = WriteMessageToDebugFile();
if (err != kNoError) {
return err;
}
}
@ -428,7 +408,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
// TODO(ajm): turn the splitting filter into a component?
if (sample_rate_hz_ == kSampleRate32kHz) {
for (int i = 0; i < num_render_input_channels_; i++) {
for (int i = 0; i < num_reverse_channels_; i++) {
// Split into low and high band.
SplittingFilterAnalysis(render_audio_->data(i),
render_audio_->low_pass_split_data(i),
@ -508,20 +488,9 @@ int AudioProcessingImpl::StartDebugRecording(
return kFileError;
}
if (debug_file_->WriteText("%s\n", kMagicNumber) == -1) {
debug_file_->CloseFile();
return kFileError;
}
// TODO(ajm): should we do this? If so, we need the number of channels etc.
// Record the default sample rate.
WebRtc_UWord8 event = kInitializeEvent;
if (!debug_file_->Write(&event, sizeof(event))) {
return kFileError;
}
if (!debug_file_->Write(&sample_rate_hz_, sizeof(sample_rate_hz_))) {
return kFileError;
int err = WriteInitMessage();
if (err != kNoError) {
return err;
}
return kNoError;
@ -578,7 +547,7 @@ WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
}
memset(&version[position], 0, bytes_remaining);
WebRtc_Word8 my_version[] = "AudioProcessing 1.0.0";
char my_version[] = "AudioProcessing 1.0.0";
// Includes null termination.
WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
if (bytes_remaining < length) {
@ -633,4 +602,48 @@ WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
return kNoError;
}
int AudioProcessingImpl::WriteMessageToDebugFile() {
int32_t size = event_msg_->ByteSize();
if (size <= 0) {
return kUnspecifiedError;
}
#if defined(WEBRTC_BIG_ENDIAN)
// TODO(ajm): Use little-endian "on the wire". For the moment, we can be
// pretty safe in assuming little-endian.
#endif
if (!event_msg_->SerializeToString(&event_str_)) {
return kUnspecifiedError;
}
// Write message preceded by its size.
if (!debug_file_->Write(&size, sizeof(int32_t))) {
return kFileError;
}
if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
return kFileError;
}
event_msg_->Clear();
return 0;
}
int AudioProcessingImpl::WriteInitMessage() {
event_msg_->set_type(audioproc::Event::INIT);
audioproc::Init* msg = event_msg_->mutable_init();
msg->set_sample_rate(sample_rate_hz_);
msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
msg->set_num_input_channels(num_input_channels_);
msg->set_num_output_channels(num_output_channels_);
msg->set_num_reverse_channels(num_reverse_channels_);
int err = WriteMessageToDebugFile();
if (err != kNoError) {
return err;
}
return kNoError;
}
} // namespace webrtc

View File

@ -12,16 +12,19 @@
#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
#include <list>
#include <string>
#include "audio_processing.h"
namespace webrtc {
class CriticalSectionWrapper;
class FileWrapper;
namespace audioproc {
class Event;
} // audioproc
class AudioBuffer;
class CriticalSectionWrapper;
class EchoCancellationImpl;
class EchoControlMobileImpl;
class FileWrapper;
class GainControlImpl;
class HighPassFilterImpl;
class LevelEstimatorImpl;
@ -76,6 +79,9 @@ class AudioProcessingImpl : public AudioProcessing {
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
private:
int WriteMessageToDebugFile();
int WriteInitMessage();
int id_;
EchoCancellationImpl* echo_cancellation_;
@ -89,6 +95,8 @@ class AudioProcessingImpl : public AudioProcessing {
std::list<ProcessingComponent*> component_list_;
FileWrapper* debug_file_;
audioproc::Event* event_msg_; // Protobuf message.
std::string event_str_; // Memory for protobuf serialization.
CriticalSectionWrapper* crit_;
AudioBuffer* render_audio_;
@ -100,9 +108,9 @@ class AudioProcessingImpl : public AudioProcessing {
int stream_delay_ms_;
bool was_stream_delay_set_;
int num_render_input_channels_;
int num_capture_input_channels_;
int num_capture_output_channels_;
int num_reverse_channels_;
int num_input_channels_;
int num_output_channels_;
};
} // namespace webrtc

View File

@ -0,0 +1,37 @@
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;
message Init {
optional int32 sample_rate = 1;
optional int32 device_sample_rate = 2;
optional int32 num_input_channels = 3;
optional int32 num_output_channels = 4;
optional int32 num_reverse_channels = 5;
}
message ReverseStream {
optional bytes data = 1;
}
message Stream {
optional bytes input_data = 1;
optional bytes output_data = 2;
optional int32 delay = 3;
optional sint32 drift = 4;
optional int32 level = 5;
}
message Event {
enum Type {
INIT = 0;
REVERSE_STREAM = 1;
STREAM = 2;
}
required Type type = 1;
optional Init init = 2;
optional ReverseStream reverse_stream = 3;
optional Stream stream = 4;
}

View File

@ -29,6 +29,8 @@ class EchoCancellationImpl : public EchoCancellation,
// EchoCancellation implementation.
virtual bool is_enabled() const;
virtual int device_sample_rate_hz() const;
virtual int stream_drift_samples() const;
// ProcessingComponent implementation.
virtual int Initialize();
@ -40,9 +42,7 @@ class EchoCancellationImpl : public EchoCancellation,
virtual int enable_drift_compensation(bool enable);
virtual bool is_drift_compensation_enabled() const;
virtual int set_device_sample_rate_hz(int rate);
virtual int device_sample_rate_hz() const;
virtual int set_stream_drift_samples(int drift);
virtual int stream_drift_samples() const;
virtual int set_suppression_level(SuppressionLevel level);
virtual SuppressionLevel suppression_level() const;
virtual int enable_metrics(bool enable);

View File

@ -36,12 +36,12 @@ class GainControlImpl : public GainControl,
// GainControl implementation.
virtual bool is_enabled() const;
virtual int stream_analog_level();
private:
// GainControl implementation.
virtual int Enable(bool enable);
virtual int set_stream_analog_level(int level);
virtual int stream_analog_level();
virtual int set_mode(Mode mode);
virtual Mode mode() const;
virtual int set_target_level_dbfs(int level);

View File

@ -14,35 +14,62 @@
#include <sys/stat.h>
#endif
#include "tick_util.h"
#include "gtest/gtest.h"
#include "module_common_types.h"
#include "audio_processing.h"
#include "cpu_features_wrapper.h"
#include "module_common_types.h"
#include "tick_util.h"
#include "webrtc/audio_processing/debug.pb.h"
using webrtc::AudioFrame;
using webrtc::TickInterval;
using webrtc::TickTime;
using webrtc::AudioProcessing;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::TickInterval;
using webrtc::TickTime;
using webrtc::audioproc::Event;
using webrtc::audioproc::Init;
using webrtc::audioproc::ReverseStream;
using webrtc::audioproc::Stream;
namespace {
// Returns true on success, false on error or end-of-file.
bool ReadMessageFromFile(FILE* file,
::google::protobuf::MessageLite* msg) {
// The "wire format" for the size is little-endian.
// Assume process_test is running on a little-endian machine.
int32_t size;
if (fread(&size, sizeof(int32_t), 1, file) != 1) {
return false;
}
if (size <= 0) {
return false;
}
size_t usize = static_cast<size_t>(size);
char array[usize];
if (fread(array, sizeof(char), usize, file) != usize) {
return false;
}
msg->Clear();
return msg->ParseFromArray(array, usize);
}
void usage() {
printf(
"Usage: process_test [options] [-ir REVERSE_FILE] [-i PRIMARY_FILE]\n");
printf(
" [-o OUT_FILE]\n");
"Usage: process_test [options] [-pb PROTOBUF_FILE]\n"
" [-ir REVERSE_FILE] [-i PRIMARY_FILE] [-o OUT_FILE]\n");
printf(
"process_test is a test application for AudioProcessing.\n\n"
"When -ir or -i is specified the files will be processed directly in a\n"
"simulation mode. Otherwise the full set of test files is expected to be\n"
"present in the working directory.\n");
"When a protobuf debug file is available, specify it with -pb.\n"
"Alternately, when -ir or -i is used, the specified files will be\n"
"processed directly in a simulation mode. Otherwise the full set of\n"
"legacy test files is expected to be present in the working directory.\n");
printf("\n");
printf("Options\n");
printf("General configuration:\n");
printf("General configuration (only used for the simulation mode):\n");
printf(" -fs SAMPLE_RATE_HZ\n");
printf(" -ch CHANNELS_IN CHANNELS_OUT\n");
printf(" -rch REVERSE_CHANNELS\n");
@ -73,7 +100,7 @@ void usage() {
printf(" --ns_high\n");
printf(" --ns_very_high\n");
printf("\n -vad Voice activity detection\n");
printf(" --vad_out_file FILE");
printf(" --vad_out_file FILE\n");
printf("\n");
printf("Modifiers:\n");
printf(" --perf Measure performance.\n");
@ -101,6 +128,7 @@ void void_main(int argc, char* argv[]) {
WebRtc_UWord32 version_bytes_remaining = sizeof(version);
WebRtc_UWord32 version_position = 0;
const char* pb_filename = NULL;
const char* far_filename = NULL;
const char* near_filename = NULL;
const char* out_filename = NULL;
@ -124,7 +152,12 @@ void void_main(int argc, char* argv[]) {
//bool interleaved = true;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-ir") == 0) {
if (strcmp(argv[i], "-pb") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify protobuf filename after -pb";
pb_filename = argv[i];
} else if (strcmp(argv[i], "-ir") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after -ir";
far_filename = argv[i];
@ -296,10 +329,17 @@ void void_main(int argc, char* argv[]) {
printf("%s\n", version);
return;
} else if (strcmp(argv[i], "--debug_recording") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --debug_recording";
ASSERT_EQ(apm->kNoError, apm->StartDebugRecording(argv[i]));
} else {
FAIL() << "Unrecognized argument " << argv[i];
}
}
// If we're reading a protobuf file, ensure a simulation hasn't also
// been requested (which makes no sense...)
ASSERT_FALSE(pb_filename && simulating);
if (verbose) {
printf("Sample rate: %d Hz\n", sample_rate_hz);
@ -322,14 +362,15 @@ void void_main(int argc, char* argv[]) {
near_filename = near_file_default;
}
if (out_filename == NULL) {
if (!out_filename) {
out_filename = out_file_default;
}
if (vad_out_filename == NULL) {
if (!vad_out_filename) {
vad_out_filename = vad_file_default;
}
FILE* pb_file = NULL;
FILE* far_file = NULL;
FILE* near_file = NULL;
FILE* out_file = NULL;
@ -340,35 +381,49 @@ void void_main(int argc, char* argv[]) {
FILE* aecm_echo_path_in_file = NULL;
FILE* aecm_echo_path_out_file = NULL;
if (far_filename != NULL) {
far_file = fopen(far_filename, "rb");
ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
<< far_filename;
}
if (pb_filename) {
pb_file = fopen(pb_filename, "rb");
ASSERT_TRUE(NULL != pb_file) << "Unable to open protobuf file "
<< pb_filename;
} else {
if (far_filename) {
far_file = fopen(far_filename, "rb");
ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
<< far_filename;
}
near_file = fopen(near_filename, "rb");
ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
<< near_filename;
struct stat st;
stat(near_filename, &st);
int near_size_samples = st.st_size / sizeof(int16_t);
near_file = fopen(near_filename, "rb");
ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
<< near_filename;
if (!simulating) {
event_file = fopen(event_filename, "rb");
ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
<< event_filename;
delay_file = fopen(delay_filename, "rb");
ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
<< delay_filename;
drift_file = fopen(drift_filename, "rb");
ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
<< drift_filename;
}
}
out_file = fopen(out_filename, "wb");
ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
<< out_filename;
if (!simulating) {
event_file = fopen(event_filename, "rb");
ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
<< event_filename;
delay_file = fopen(delay_filename, "rb");
ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
<< delay_filename;
drift_file = fopen(drift_filename, "rb");
ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
<< drift_filename;
int near_size_samples = 0;
if (pb_file) {
struct stat st;
stat(pb_filename, &st);
// Crude estimate, but should be good enough.
near_size_samples = st.st_size / 3 / sizeof(int16_t);
} else {
struct stat st;
stat(near_filename, &st);
near_size_samples = st.st_size / sizeof(int16_t);
}
if (apm->voice_detection()->is_enabled()) {
@ -399,7 +454,6 @@ void void_main(int argc, char* argv[]) {
aecm_echo_path_out_file = fopen(aecm_echo_path_out_filename, "wb");
ASSERT_TRUE(NULL != aecm_echo_path_out_file) << "Unable to open file "
<< aecm_echo_path_out_filename;
}
enum Events {
@ -433,190 +487,341 @@ void void_main(int argc, char* argv[]) {
WebRtc_Word64 min_time_us = 1e6;
WebRtc_Word64 min_time_reverse_us = 1e6;
while (simulating || feof(event_file) == 0) {
std::ostringstream trace_stream;
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
<< primary_count << " (primary)";
SCOPED_TRACE(trace_stream.str());
// TODO(ajm): Ideally we would refactor this block into separate functions,
// but for now we want to share the variables.
if (pb_file) {
Event event_msg;
while (ReadMessageFromFile(pb_file, &event_msg)) {
std::ostringstream trace_stream;
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
<< primary_count << " (primary)";
SCOPED_TRACE(trace_stream.str());
if (event_msg.type() == Event::INIT) {
ASSERT_TRUE(event_msg.has_init());
const Init msg = event_msg.init();
if (simulating) {
if (far_file == NULL) {
event = kCaptureEvent;
} else {
if (event == kRenderEvent) {
ASSERT_TRUE(msg.has_sample_rate());
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(msg.sample_rate()));
ASSERT_TRUE(msg.has_device_sample_rate());
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz(
msg.device_sample_rate()));
ASSERT_TRUE(msg.has_num_input_channels());
ASSERT_TRUE(msg.has_num_output_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_channels(msg.num_input_channels(),
msg.num_output_channels()));
ASSERT_TRUE(msg.has_num_reverse_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_reverse_channels(msg.num_reverse_channels()));
samples_per_channel = msg.sample_rate() / 100;
far_frame._frequencyInHz = msg.sample_rate();
far_frame._payloadDataLengthInSamples =
msg.num_reverse_channels() * samples_per_channel;
near_frame._frequencyInHz = msg.sample_rate();
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
printf(" Sample rate: %d Hz\n", sample_rate_hz);
}
} else if (event_msg.type() == Event::REVERSE_STREAM) {
ASSERT_TRUE(event_msg.has_reverse_stream());
const ReverseStream msg = event_msg.reverse_stream();
reverse_count++;
ASSERT_TRUE(msg.has_data());
ASSERT_EQ(sizeof(int16_t) * far_frame._payloadDataLengthInSamples,
msg.data().size());
memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_reverse_us) {
max_time_reverse_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_reverse_us) {
min_time_reverse_us = tick_diff.Microseconds();
}
}
} else if (event_msg.type() == Event::STREAM) {
ASSERT_TRUE(event_msg.has_stream());
const Stream msg = event_msg.stream();
primary_count++;
near_frame._audioChannel = apm->num_input_channels();
near_frame._payloadDataLengthInSamples =
apm->num_input_channels() * samples_per_channel;
ASSERT_TRUE(msg.has_input_data());
ASSERT_EQ(sizeof(int16_t) * near_frame._payloadDataLengthInSamples,
msg.input_data().size());
memcpy(near_frame._payloadData,
msg.input_data().data(),
msg.input_data().size());
near_read_samples += near_frame._payloadDataLengthInSamples;
if (progress && primary_count % 100 == 0) {
printf("%.0f%% complete\r",
(near_read_samples * 100.0) / near_size_samples);
fflush(stdout);
}
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_stream_analog_level(msg.level()));
ASSERT_EQ(apm->kNoError,
apm->set_stream_delay_ms(msg.delay()));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_stream_drift_samples(msg.drift()));
int err = apm->ProcessStream(&near_frame);
if (err == apm->kBadStreamParameterWarning) {
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
}
ASSERT_TRUE(err == apm->kNoError ||
err == apm->kBadStreamParameterWarning);
capture_level = apm->gain_control()->stream_analog_level();
stream_has_voice =
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
if (vad_out_file != NULL) {
ASSERT_EQ(1u, fwrite(&stream_has_voice,
sizeof(stream_has_voice),
1,
vad_out_file));
}
if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
ASSERT_EQ(msg.level(), capture_level);
}
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_us) {
max_time_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_us) {
min_time_us = tick_diff.Microseconds();
}
}
ASSERT_EQ(near_frame._payloadDataLengthInSamples,
fwrite(near_frame._payloadData,
sizeof(int16_t),
near_frame._payloadDataLengthInSamples,
out_file));
}
}
ASSERT_TRUE(feof(pb_file));
printf("100%% complete\r");
} else {
while (simulating || feof(event_file) == 0) {
std::ostringstream trace_stream;
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
<< primary_count << " (primary)";
SCOPED_TRACE(trace_stream.str());
if (simulating) {
if (far_file == NULL) {
event = kCaptureEvent;
} else {
event = kRenderEvent;
}
}
} else {
read_count = fread(&event, sizeof(event), 1, event_file);
if (read_count != 1) {
break;
}
//if (fread(&event, sizeof(event), 1, event_file) != 1) {
// break; // This is expected.
//}
}
if (event == kInitializeEvent || event == kResetEventDeprecated) {
ASSERT_EQ(1u,
fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
samples_per_channel = sample_rate_hz / 100;
ASSERT_EQ(1u,
fread(&device_sample_rate_hz,
sizeof(device_sample_rate_hz),
1,
event_file));
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz(
device_sample_rate_hz));
far_frame._frequencyInHz = sample_rate_hz;
near_frame._frequencyInHz = sample_rate_hz;
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
printf(" Sample rate: %d Hz\n", sample_rate_hz);
}
} else if (event == kRenderEvent) {
reverse_count++;
far_frame._audioChannel = num_render_channels;
far_frame._payloadDataLengthInSamples =
num_render_channels * samples_per_channel;
read_count = fread(far_frame._payloadData,
sizeof(WebRtc_Word16),
far_frame._payloadDataLengthInSamples,
far_file);
if (simulating) {
if (read_count != far_frame._payloadDataLengthInSamples) {
break; // This is expected.
if (event == kRenderEvent) {
event = kCaptureEvent;
} else {
event = kRenderEvent;
}
}
} else {
ASSERT_EQ(read_count,
far_frame._payloadDataLengthInSamples);
}
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_reverse_us) {
max_time_reverse_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_reverse_us) {
min_time_reverse_us = tick_diff.Microseconds();
read_count = fread(&event, sizeof(event), 1, event_file);
if (read_count != 1) {
break;
}
}
} else if (event == kCaptureEvent) {
primary_count++;
near_frame._audioChannel = num_capture_input_channels;
near_frame._payloadDataLengthInSamples =
num_capture_input_channels * samples_per_channel;
if (event == kInitializeEvent || event == kResetEventDeprecated) {
ASSERT_EQ(1u,
fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
samples_per_channel = sample_rate_hz / 100;
read_count = fread(near_frame._payloadData,
ASSERT_EQ(1u,
fread(&device_sample_rate_hz,
sizeof(device_sample_rate_hz),
1,
event_file));
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz(
device_sample_rate_hz));
far_frame._frequencyInHz = sample_rate_hz;
near_frame._frequencyInHz = sample_rate_hz;
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
printf(" Sample rate: %d Hz\n", sample_rate_hz);
}
} else if (event == kRenderEvent) {
reverse_count++;
far_frame._audioChannel = num_render_channels;
far_frame._payloadDataLengthInSamples =
num_render_channels * samples_per_channel;
read_count = fread(far_frame._payloadData,
sizeof(WebRtc_Word16),
far_frame._payloadDataLengthInSamples,
far_file);
if (simulating) {
if (read_count != far_frame._payloadDataLengthInSamples) {
break; // This is expected.
}
} else {
ASSERT_EQ(read_count,
far_frame._payloadDataLengthInSamples);
}
if (perf_testing) {
t0 = TickTime::Now();
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_reverse_us) {
max_time_reverse_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_reverse_us) {
min_time_reverse_us = tick_diff.Microseconds();
}
}
} else if (event == kCaptureEvent) {
primary_count++;
near_frame._audioChannel = num_capture_input_channels;
near_frame._payloadDataLengthInSamples =
num_capture_input_channels * samples_per_channel;
read_count = fread(near_frame._payloadData,
sizeof(WebRtc_Word16),
near_frame._payloadDataLengthInSamples,
near_file);
near_read_samples += read_count;
if (progress && primary_count % 100 == 0) {
printf("%.0f%% complete\r",
(near_read_samples * 100.0) / near_size_samples);
fflush(stdout);
}
if (simulating) {
if (read_count != near_frame._payloadDataLengthInSamples) {
break; // This is expected.
}
delay_ms = 0;
drift_samples = 0;
} else {
ASSERT_EQ(read_count,
near_frame._payloadDataLengthInSamples);
// TODO(ajm): sizeof(delay_ms) for current files?
ASSERT_EQ(1u,
fread(&delay_ms, 2, 1, delay_file));
ASSERT_EQ(1u,
fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
}
if (perf_testing) {
t0 = TickTime::Now();
}
// TODO(ajm): fake an analog gain while simulating.
int capture_level_in = capture_level;
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_stream_analog_level(capture_level));
ASSERT_EQ(apm->kNoError,
apm->set_stream_delay_ms(delay_ms));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
int err = apm->ProcessStream(&near_frame);
if (err == apm->kBadStreamParameterWarning) {
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
}
ASSERT_TRUE(err == apm->kNoError ||
err == apm->kBadStreamParameterWarning);
capture_level = apm->gain_control()->stream_analog_level();
stream_has_voice =
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
if (vad_out_file != NULL) {
ASSERT_EQ(1u, fwrite(&stream_has_voice,
sizeof(stream_has_voice),
1,
vad_out_file));
}
if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
ASSERT_EQ(capture_level_in, capture_level);
}
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_us) {
max_time_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_us) {
min_time_us = tick_diff.Microseconds();
}
}
ASSERT_EQ(near_frame._payloadDataLengthInSamples,
fwrite(near_frame._payloadData,
sizeof(WebRtc_Word16),
near_frame._payloadDataLengthInSamples,
near_file);
near_read_samples += read_count;
if (progress && primary_count % 100 == 0) {
printf("%.0f%% complete\r",
(near_read_samples * 100.0) / near_size_samples);
fflush(stdout);
out_file));
}
if (simulating) {
if (read_count != near_frame._payloadDataLengthInSamples) {
break; // This is expected.
}
delay_ms = 0;
drift_samples = 0;
} else {
ASSERT_EQ(read_count,
near_frame._payloadDataLengthInSamples);
// TODO(ajm): sizeof(delay_ms) for current files?
ASSERT_EQ(1u,
fread(&delay_ms, 2, 1, delay_file));
ASSERT_EQ(1u,
fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
else {
FAIL() << "Event " << event << " is unrecognized";
}
if (perf_testing) {
t0 = TickTime::Now();
}
// TODO(ajm): fake an analog gain while simulating.
int capture_level_in = capture_level;
ASSERT_EQ(apm->kNoError,
apm->gain_control()->set_stream_analog_level(capture_level));
ASSERT_EQ(apm->kNoError,
apm->set_stream_delay_ms(delay_ms));
ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
int err = apm->ProcessStream(&near_frame);
if (err == apm->kBadStreamParameterWarning) {
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
}
ASSERT_TRUE(err == apm->kNoError ||
err == apm->kBadStreamParameterWarning);
capture_level = apm->gain_control()->stream_analog_level();
stream_has_voice =
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
if (vad_out_file != NULL) {
ASSERT_EQ(1u, fwrite(&stream_has_voice,
sizeof(stream_has_voice),
1,
vad_out_file));
}
if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
ASSERT_EQ(capture_level_in, capture_level);
}
if (perf_testing) {
t1 = TickTime::Now();
TickInterval tick_diff = t1 - t0;
acc_ticks += tick_diff;
if (tick_diff.Microseconds() > max_time_us) {
max_time_us = tick_diff.Microseconds();
}
if (tick_diff.Microseconds() < min_time_us) {
min_time_us = tick_diff.Microseconds();
}
}
ASSERT_EQ(near_frame._payloadDataLengthInSamples,
fwrite(near_frame._payloadData,
sizeof(WebRtc_Word16),
near_frame._payloadDataLengthInSamples,
out_file));
}
else {
FAIL() << "Event " << event << " is unrecognized";
}
}
@ -638,21 +843,24 @@ void void_main(int argc, char* argv[]) {
primary_count, reverse_count);
}
int8_t temp_int8;
if (far_file != NULL) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
}
read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
if (!pb_file) {
int8_t temp_int8;
if (far_file) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
}
if (!simulating) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
if (!simulating) {
read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
}
}
if (perf_testing) {
@ -673,6 +881,7 @@ void void_main(int argc, char* argv[]) {
AudioProcessing::Destroy(apm);
apm = NULL;
}
} // namespace
int main(int argc, char* argv[])
{

View File

@ -29,7 +29,7 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/../../../../../system_wrappers/interface \
$(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface \
external/gtest/include \
external/protobuf/src
external/protobuf/src
LOCAL_STATIC_LIBRARIES := \
libgtest \
@ -37,7 +37,7 @@ LOCAL_STATIC_LIBRARIES := \
LOCAL_SHARED_LIBRARIES := \
libstlport \
libwebrtc_audio_preprocessing
libwebrtc_audio_preprocessing
LOCAL_MODULE:= webrtc_apm_unit_test

View File

@ -13,16 +13,16 @@
#include <gtest/gtest.h>
#include "audio_processing.h"
#ifdef WEBRTC_ANDROID
#include "external/webrtc/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h"
#else
#include "audio_processing_unittest.pb.h"
#endif
#include "event_wrapper.h"
#include "module_common_types.h"
#include "signal_processing_library.h"
#include "thread_wrapper.h"
#include "trace.h"
#ifdef WEBRTC_ANDROID
#include "external/webrtc/src/modules/audio_processing/main/test/unit_test/unittest.pb.h"
#else
#include "webrtc/audio_processing/unittest.pb.h"
#endif
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
@ -162,7 +162,7 @@ WebRtc_Word16 MaxAudioFrame(const AudioFrame& frame) {
}
void TestStats(const AudioProcessing::Statistic& test,
const audio_processing_unittest::Test::Statistic& reference) {
const webrtc::audioproc::Test::Statistic& reference) {
EXPECT_EQ(reference.instant(), test.instant);
EXPECT_EQ(reference.average(), test.average);
EXPECT_EQ(reference.maximum(), test.maximum);
@ -170,7 +170,7 @@ void TestStats(const AudioProcessing::Statistic& test,
}
void WriteStatsMessage(const AudioProcessing::Statistic& output,
audio_processing_unittest::Test::Statistic* message) {
webrtc::audioproc::Test::Statistic* message) {
message->set_instant(output.instant);
message->set_average(output.average);
message->set_maximum(output.maximum);
@ -416,7 +416,7 @@ TEST_F(ApmTest, SampleRates) {
TEST_F(ApmTest, Process) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
audio_processing_unittest::OutputData output_data;
webrtc::audioproc::OutputData output_data;
if (!write_output_data) {
ReadMessageLiteFromFile(kOutputFileName, &output_data);
@ -435,7 +435,7 @@ TEST_F(ApmTest, Process) {
for (size_t i = 0; i < channels_size; i++) {
for (size_t j = 0; j < channels_size; j++) {
for (size_t k = 0; k < sample_rates_size; k++) {
audio_processing_unittest::Test* test = output_data.add_test();
webrtc::audioproc::Test* test = output_data.add_test();
test->set_num_reverse_channels(channels[i]);
test->set_num_input_channels(channels[j]);
test->set_num_output_channels(channels[j]);
@ -481,7 +481,7 @@ TEST_F(ApmTest, Process) {
for (int i = 0; i < output_data.test_size(); i++) {
printf("Running test %d of %d...\n", i + 1, output_data.test_size());
audio_processing_unittest::Test* test = output_data.mutable_test(i);
webrtc::audioproc::Test* test = output_data.mutable_test(i);
const int num_samples = test->sample_rate() / 100;
revframe_->_payloadDataLengthInSamples = num_samples;
revframe_->_audioChannel = test->num_reverse_channels();
@ -598,7 +598,7 @@ TEST_F(ApmTest, Process) {
EXPECT_EQ(test->max_output_average(), max_output_average);
#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
audio_processing_unittest::Test::EchoMetrics reference =
webrtc::audioproc::Test::EchoMetrics reference =
test->echo_metrics();
TestStats(echo_metrics.residual_echo_return_loss,
reference.residual_echo_return_loss());
@ -618,7 +618,7 @@ TEST_F(ApmTest, Process) {
test->set_max_output_average(max_output_average);
#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
audio_processing_unittest::Test::EchoMetrics* message =
webrtc::audioproc::Test::EchoMetrics* message =
test->mutable_echo_metrics();
WriteStatsMessage(echo_metrics.residual_echo_return_loss,
message->mutable_residual_echo_return_loss());

View File

@ -1,5 +1,6 @@
package audio_processing_unittest;
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;
message Test {
optional int32 num_reverse_channels = 1;
@ -19,7 +20,6 @@ message Test {
optional int32 has_voice_count = 9;
optional int32 is_saturated_count = 10;
message Statistic {
optional int32 instant = 1;
optional int32 average = 2;