Make protobuf use optional.
- By default, disable the AudioProcessing protobuf usage in the Chromium build. The standalone build is unaffected. - Add a test for the AudioProcessing debug dumps. TEST=audioproc_unittest Review URL: http://webrtc-codereview.appspot.com/303003 git-svn-id: http://webrtc.googlecode.com/svn/trunk@1094 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@ -66,6 +66,9 @@
|
|||||||
# Exclude internal video render module on Chromium build
|
# Exclude internal video render module on Chromium build
|
||||||
'include_internal_video_render%': 0,
|
'include_internal_video_render%': 0,
|
||||||
|
|
||||||
|
# Disable the use of protocol buffers in production code.
|
||||||
|
'enable_protobuf%': 0,
|
||||||
|
|
||||||
'webrtc_root%': '<(DEPTH)/third_party/webrtc',
|
'webrtc_root%': '<(DEPTH)/third_party/webrtc',
|
||||||
}, {
|
}, {
|
||||||
# Settings for the standalone (not-in-Chromium) build.
|
# Settings for the standalone (not-in-Chromium) build.
|
||||||
@ -77,6 +80,8 @@
|
|||||||
|
|
||||||
'include_internal_video_render%': 1,
|
'include_internal_video_render%': 1,
|
||||||
|
|
||||||
|
'enable_protobuf%': 1,
|
||||||
|
|
||||||
'webrtc_root%': '<(DEPTH)/src',
|
'webrtc_root%': '<(DEPTH)/src',
|
||||||
|
|
||||||
'conditions': [
|
'conditions': [
|
||||||
|
@ -17,6 +17,9 @@
|
|||||||
}, {
|
}, {
|
||||||
'defines': [ 'WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE' ],
|
'defines': [ 'WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE' ],
|
||||||
}],
|
}],
|
||||||
|
['enable_protobuf==1', {
|
||||||
|
'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
|
||||||
|
}],
|
||||||
],
|
],
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'audio_processing',
|
'audio_processing',
|
||||||
@ -41,12 +44,15 @@
|
|||||||
},
|
},
|
||||||
'includes': [ '../../build/protoc.gypi', ],
|
'includes': [ '../../build/protoc.gypi', ],
|
||||||
},
|
},
|
||||||
|
],
|
||||||
|
'conditions': [
|
||||||
|
['enable_protobuf==1', {
|
||||||
|
'targets': [
|
||||||
{
|
{
|
||||||
'target_name': 'audioproc',
|
'target_name': 'audioproc',
|
||||||
'type': 'executable',
|
'type': 'executable',
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'audio_processing',
|
'audio_processing',
|
||||||
'audioproc_debug_proto',
|
|
||||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||||
'<(webrtc_root)/../testing/gtest.gyp:gtest',
|
'<(webrtc_root)/../testing/gtest.gyp:gtest',
|
||||||
],
|
],
|
||||||
@ -63,4 +69,6 @@
|
|||||||
'sources': [ 'test/unpack.cc', ],
|
'sources': [ 'test/unpack.cc', ],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
}],
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,12 @@
|
|||||||
'dependencies': [ 'ns' ],
|
'dependencies': [ 'ns' ],
|
||||||
'defines': [ 'WEBRTC_NS_FLOAT' ],
|
'defines': [ 'WEBRTC_NS_FLOAT' ],
|
||||||
}],
|
}],
|
||||||
|
['enable_protobuf==1', {
|
||||||
|
'dependencies': [ 'audioproc_debug_proto' ],
|
||||||
|
'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
|
||||||
|
}],
|
||||||
],
|
],
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'audioproc_debug_proto',
|
|
||||||
'aec',
|
'aec',
|
||||||
'aecm',
|
'aecm',
|
||||||
'agc',
|
'agc',
|
||||||
@ -65,18 +68,24 @@
|
|||||||
'voice_detection_impl.h',
|
'voice_detection_impl.h',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
|
],
|
||||||
|
'conditions': [
|
||||||
|
['enable_protobuf==1', {
|
||||||
|
'targets': [
|
||||||
{
|
{
|
||||||
'target_name': 'audioproc_debug_proto',
|
'target_name': 'audioproc_debug_proto',
|
||||||
'type': 'static_library',
|
'type': 'static_library',
|
||||||
'sources': [ 'debug.proto', ],
|
'sources': [ 'debug.proto', ],
|
||||||
'variables': {
|
'variables': {
|
||||||
'proto_in_dir': '.',
|
'proto_in_dir': '.',
|
||||||
# Workaround to protect against gyp's pathname relativization when this
|
# Workaround to protect against gyp's pathname relativization when
|
||||||
# file is included by modules.gyp.
|
# this file is included by modules.gyp.
|
||||||
'proto_out_protected': 'webrtc/audio_processing',
|
'proto_out_protected': 'webrtc/audio_processing',
|
||||||
'proto_out_dir': '<(proto_out_protected)',
|
'proto_out_dir': '<(proto_out_protected)',
|
||||||
},
|
},
|
||||||
'includes': [ '../../build/protoc.gypi', ],
|
'includes': [ '../../build/protoc.gypi', ],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
}],
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
@ -25,11 +25,15 @@
|
|||||||
#include "processing_component.h"
|
#include "processing_component.h"
|
||||||
#include "splitting_filter.h"
|
#include "splitting_filter.h"
|
||||||
#include "voice_detection_impl.h"
|
#include "voice_detection_impl.h"
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
// Files generated at build-time by the protobuf compiler.
|
||||||
#ifdef WEBRTC_ANDROID
|
#ifdef WEBRTC_ANDROID
|
||||||
#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
|
#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
|
||||||
#else
|
#else
|
||||||
#include "webrtc/audio_processing/debug.pb.h"
|
#include "webrtc/audio_processing/debug.pb.h"
|
||||||
#endif
|
#endif
|
||||||
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
AudioProcessing* AudioProcessing::Create(int id) {
|
AudioProcessing* AudioProcessing::Create(int id) {
|
||||||
@ -60,11 +64,13 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
|
|||||||
level_estimator_(NULL),
|
level_estimator_(NULL),
|
||||||
noise_suppression_(NULL),
|
noise_suppression_(NULL),
|
||||||
voice_detection_(NULL),
|
voice_detection_(NULL),
|
||||||
debug_file_(FileWrapper::Create()),
|
|
||||||
event_msg_(new audioproc::Event()),
|
|
||||||
crit_(CriticalSectionWrapper::CreateCriticalSection()),
|
crit_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||||
render_audio_(NULL),
|
render_audio_(NULL),
|
||||||
capture_audio_(NULL),
|
capture_audio_(NULL),
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
debug_file_(FileWrapper::Create()),
|
||||||
|
event_msg_(new audioproc::Event()),
|
||||||
|
#endif
|
||||||
sample_rate_hz_(kSampleRate16kHz),
|
sample_rate_hz_(kSampleRate16kHz),
|
||||||
split_sample_rate_hz_(kSampleRate16kHz),
|
split_sample_rate_hz_(kSampleRate16kHz),
|
||||||
samples_per_channel_(sample_rate_hz_ / 100),
|
samples_per_channel_(sample_rate_hz_ / 100),
|
||||||
@ -104,14 +110,11 @@ AudioProcessingImpl::~AudioProcessingImpl() {
|
|||||||
component_list_.pop_front();
|
component_list_.pop_front();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
debug_file_->CloseFile();
|
debug_file_->CloseFile();
|
||||||
}
|
}
|
||||||
delete debug_file_;
|
#endif
|
||||||
debug_file_ = NULL;
|
|
||||||
|
|
||||||
delete event_msg_;
|
|
||||||
event_msg_ = NULL;
|
|
||||||
|
|
||||||
delete crit_;
|
delete crit_;
|
||||||
crit_ = NULL;
|
crit_ = NULL;
|
||||||
@ -167,12 +170,14 @@ int AudioProcessingImpl::InitializeLocked() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
int err = WriteInitMessage();
|
int err = WriteInitMessage();
|
||||||
if (err != kNoError) {
|
if (err != kNoError) {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return kNoError;
|
return kNoError;
|
||||||
}
|
}
|
||||||
@ -268,6 +273,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
return kBadDataLengthError;
|
return kBadDataLengthError;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
event_msg_->set_type(audioproc::Event::STREAM);
|
event_msg_->set_type(audioproc::Event::STREAM);
|
||||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||||
@ -279,6 +285,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
msg->set_drift(echo_cancellation_->stream_drift_samples());
|
msg->set_drift(echo_cancellation_->stream_drift_samples());
|
||||||
msg->set_level(gain_control_->stream_analog_level());
|
msg->set_level(gain_control_->stream_analog_level());
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
capture_audio_->DeinterleaveFrom(frame);
|
capture_audio_->DeinterleaveFrom(frame);
|
||||||
|
|
||||||
@ -359,6 +366,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
|
|
||||||
capture_audio_->InterleaveTo(frame, data_changed);
|
capture_audio_->InterleaveTo(frame, data_changed);
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||||
const size_t data_size = sizeof(int16_t) *
|
const size_t data_size = sizeof(int16_t) *
|
||||||
@ -370,6 +378,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
was_stream_delay_set_ = false;
|
was_stream_delay_set_ = false;
|
||||||
return kNoError;
|
return kNoError;
|
||||||
@ -395,6 +404,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
|||||||
return kBadDataLengthError;
|
return kBadDataLengthError;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
|
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
|
||||||
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
||||||
@ -407,6 +417,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
render_audio_->DeinterleaveFrom(frame);
|
render_audio_->DeinterleaveFrom(frame);
|
||||||
|
|
||||||
@ -474,6 +485,7 @@ int AudioProcessingImpl::StartDebugRecording(
|
|||||||
return kNullPointerError;
|
return kNullPointerError;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
// Stop any ongoing recording.
|
// Stop any ongoing recording.
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
if (debug_file_->CloseFile() == -1) {
|
if (debug_file_->CloseFile() == -1) {
|
||||||
@ -490,20 +502,26 @@ int AudioProcessingImpl::StartDebugRecording(
|
|||||||
if (err != kNoError) {
|
if (err != kNoError) {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return kNoError;
|
return kNoError;
|
||||||
|
#else
|
||||||
|
return kUnsupportedFunctionError;
|
||||||
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioProcessingImpl::StopDebugRecording() {
|
int AudioProcessingImpl::StopDebugRecording() {
|
||||||
CriticalSectionScoped crit_scoped(*crit_);
|
CriticalSectionScoped crit_scoped(*crit_);
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
// We just return if recording hasn't started.
|
// We just return if recording hasn't started.
|
||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
if (debug_file_->CloseFile() == -1) {
|
if (debug_file_->CloseFile() == -1) {
|
||||||
return kFileError;
|
return kFileError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return kNoError;
|
return kNoError;
|
||||||
|
#else
|
||||||
|
return kUnsupportedFunctionError;
|
||||||
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
}
|
}
|
||||||
|
|
||||||
EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
|
EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
|
||||||
@ -601,6 +619,47 @@ WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
|
|||||||
return kNoError;
|
return kNoError;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool AudioProcessingImpl::stream_data_changed() const {
|
||||||
|
int enabled_count = 0;
|
||||||
|
std::list<ProcessingComponent*>::const_iterator it;
|
||||||
|
for (it = component_list_.begin(); it != component_list_.end(); it++) {
|
||||||
|
if ((*it)->is_component_enabled()) {
|
||||||
|
enabled_count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data is unchanged if no components are enabled, or if only level_estimator_
|
||||||
|
// or voice_detection_ is enabled.
|
||||||
|
if (enabled_count == 0) {
|
||||||
|
return false;
|
||||||
|
} else if (enabled_count == 1) {
|
||||||
|
if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (enabled_count == 2) {
|
||||||
|
if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
|
||||||
|
return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
|
||||||
|
if (!stream_data_changed && !voice_detection_->is_enabled()) {
|
||||||
|
// Only level_estimator_ is enabled.
|
||||||
|
return false;
|
||||||
|
} else if (sample_rate_hz_ == kSampleRate32kHz) {
|
||||||
|
// Something besides level_estimator_ is enabled, and we have super-wb.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
int AudioProcessingImpl::WriteMessageToDebugFile() {
|
int AudioProcessingImpl::WriteMessageToDebugFile() {
|
||||||
int32_t size = event_msg_->ByteSize();
|
int32_t size = event_msg_->ByteSize();
|
||||||
if (size <= 0) {
|
if (size <= 0) {
|
||||||
@ -644,44 +703,5 @@ int AudioProcessingImpl::WriteInitMessage() {
|
|||||||
|
|
||||||
return kNoError;
|
return kNoError;
|
||||||
}
|
}
|
||||||
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
bool AudioProcessingImpl::stream_data_changed() const {
|
|
||||||
int enabled_count = 0;
|
|
||||||
std::list<ProcessingComponent*>::const_iterator it;
|
|
||||||
for (it = component_list_.begin(); it != component_list_.end(); it++) {
|
|
||||||
if ((*it)->is_component_enabled()) {
|
|
||||||
enabled_count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data is unchanged if no components are enabled, or if only level_estimator_
|
|
||||||
// or voice_detection_ is enabled.
|
|
||||||
if (enabled_count == 0) {
|
|
||||||
return false;
|
|
||||||
} else if (enabled_count == 1) {
|
|
||||||
if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else if (enabled_count == 2) {
|
|
||||||
if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
|
|
||||||
return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
|
|
||||||
if (!stream_data_changed && !voice_detection_->is_enabled()) {
|
|
||||||
// Only level_estimator_ is enabled.
|
|
||||||
return false;
|
|
||||||
} else if (sample_rate_hz_ == kSampleRate32kHz) {
|
|
||||||
// Something besides level_estimator_ is enabled, and we have super-wb.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -11,15 +11,14 @@
|
|||||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
|
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
|
||||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
|
#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
|
||||||
|
|
||||||
|
#include "audio_processing.h"
|
||||||
|
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "audio_processing.h"
|
#include "scoped_ptr.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace audioproc {
|
|
||||||
class Event;
|
|
||||||
} // audioproc
|
|
||||||
class AudioBuffer;
|
class AudioBuffer;
|
||||||
class CriticalSectionWrapper;
|
class CriticalSectionWrapper;
|
||||||
class EchoCancellationImpl;
|
class EchoCancellationImpl;
|
||||||
@ -32,6 +31,14 @@ class NoiseSuppressionImpl;
|
|||||||
class ProcessingComponent;
|
class ProcessingComponent;
|
||||||
class VoiceDetectionImpl;
|
class VoiceDetectionImpl;
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
namespace audioproc {
|
||||||
|
|
||||||
|
class Event;
|
||||||
|
|
||||||
|
} // namespace audioproc
|
||||||
|
#endif
|
||||||
|
|
||||||
class AudioProcessingImpl : public AudioProcessing {
|
class AudioProcessingImpl : public AudioProcessing {
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
@ -79,8 +86,6 @@ class AudioProcessingImpl : public AudioProcessing {
|
|||||||
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
|
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int WriteMessageToDebugFile();
|
|
||||||
int WriteInitMessage();
|
|
||||||
bool stream_data_changed() const;
|
bool stream_data_changed() const;
|
||||||
bool synthesis_needed(bool stream_data_changed) const;
|
bool synthesis_needed(bool stream_data_changed) const;
|
||||||
bool analysis_needed(bool stream_data_changed) const;
|
bool analysis_needed(bool stream_data_changed) const;
|
||||||
@ -96,14 +101,18 @@ class AudioProcessingImpl : public AudioProcessing {
|
|||||||
VoiceDetectionImpl* voice_detection_;
|
VoiceDetectionImpl* voice_detection_;
|
||||||
|
|
||||||
std::list<ProcessingComponent*> component_list_;
|
std::list<ProcessingComponent*> component_list_;
|
||||||
|
|
||||||
FileWrapper* debug_file_;
|
|
||||||
audioproc::Event* event_msg_; // Protobuf message.
|
|
||||||
std::string event_str_; // Memory for protobuf serialization.
|
|
||||||
CriticalSectionWrapper* crit_;
|
CriticalSectionWrapper* crit_;
|
||||||
|
|
||||||
AudioBuffer* render_audio_;
|
AudioBuffer* render_audio_;
|
||||||
AudioBuffer* capture_audio_;
|
AudioBuffer* capture_audio_;
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
// TODO(andrew): make this more graceful. Ideally we would split this stuff
|
||||||
|
// out into a separate class with an "enabled" and "disabled" implementation.
|
||||||
|
int WriteMessageToDebugFile();
|
||||||
|
int WriteInitMessage();
|
||||||
|
scoped_ptr<FileWrapper> debug_file_;
|
||||||
|
scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
|
||||||
|
std::string event_str_; // Memory for protobuf serialization.
|
||||||
|
#endif
|
||||||
|
|
||||||
int sample_rate_hz_;
|
int sample_rate_hz_;
|
||||||
int split_sample_rate_hz_;
|
int split_sample_rate_hz_;
|
||||||
|
@ -970,6 +970,34 @@ TEST_F(ApmTest, SplittingFilter) {
|
|||||||
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
|
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(andrew): expand test to verify output.
|
||||||
|
TEST_F(ApmTest, DebugDump) {
|
||||||
|
const std::string filename = webrtc::test::OutputPath() + "debug.aec";
|
||||||
|
EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
|
||||||
|
|
||||||
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
// Stopping without having started should be OK.
|
||||||
|
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
|
||||||
|
|
||||||
|
EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
|
||||||
|
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
|
||||||
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
|
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
|
||||||
|
|
||||||
|
// Verify the file has been written.
|
||||||
|
ASSERT_TRUE(fopen(filename.c_str(), "r") != NULL);
|
||||||
|
// Clean it up.
|
||||||
|
ASSERT_EQ(0, remove(filename.c_str()));
|
||||||
|
#else
|
||||||
|
EXPECT_EQ(apm_->kUnsupportedFunctionError,
|
||||||
|
apm_->StartDebugRecording(filename.c_str()));
|
||||||
|
EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
|
||||||
|
|
||||||
|
// Verify the file has NOT been written.
|
||||||
|
ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
|
||||||
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(ApmTest, Process) {
|
TEST_F(ApmTest, Process) {
|
||||||
GOOGLE_PROTOBUF_VERIFY_VERSION;
|
GOOGLE_PROTOBUF_VERIFY_VERSION;
|
||||||
webrtc::audioproc::OutputData output_data;
|
webrtc::audioproc::OutputData output_data;
|
||||||
|
Reference in New Issue
Block a user