Introduced the new locking scheme

BUG=webrtc:5099

Review URL: https://codereview.webrtc.org/1424663003

Cr-Commit-Position: refs/heads/master@{#10836}
This commit is contained in:
peah
2015-11-28 12:35:15 -08:00
committed by Commit bot
parent 3236b91f55
commit df3efa8c07
17 changed files with 1304 additions and 836 deletions

File diff suppressed because it is too large Load Diff

View File

@ -15,23 +15,32 @@
#include <string>
#include <vector>
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Files generated at build-time by the protobuf compiler.
#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
#else
#include "webrtc/audio_processing/debug.pb.h"
#endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace webrtc {
class AgcManagerDirect;
class AudioBuffer;
class AudioConverter;
template<typename T>
class Beamformer;
class CriticalSectionWrapper;
class EchoCancellationImpl;
class EchoControlMobileImpl;
class FileWrapper;
class GainControlImpl;
class GainControlForNewAgc;
class HighPassFilterImpl;
@ -42,23 +51,14 @@ class TransientSuppressor;
class VoiceDetectionImpl;
class IntelligibilityEnhancer;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace audioproc {
class Event;
} // namespace audioproc
#endif
class AudioProcessingImpl : public AudioProcessing {
public:
// Methods forcing APM to run in a single-threaded manner.
// Acquires both the render and capture locks.
explicit AudioProcessingImpl(const Config& config);
// AudioProcessingImpl takes ownership of beamformer.
AudioProcessingImpl(const Config& config, Beamformer<float>* beamformer);
virtual ~AudioProcessingImpl();
// AudioProcessing methods.
int Initialize() override;
int Initialize(int input_sample_rate_hz,
int output_sample_rate_hz,
@ -68,12 +68,14 @@ class AudioProcessingImpl : public AudioProcessing {
ChannelLayout reverse_layout) override;
int Initialize(const ProcessingConfig& processing_config) override;
void SetExtraOptions(const Config& config) override;
int proc_sample_rate_hz() const override;
int proc_split_sample_rate_hz() const override;
int num_input_channels() const override;
int num_output_channels() const override;
int num_reverse_channels() const override;
void set_output_will_be_muted(bool muted) override;
void UpdateHistogramsOnCallEnd() override;
int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
int StartDebugRecording(FILE* handle) override;
int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
int StopDebugRecording() override;
// Capture-side exclusive methods possibly running APM in a
// multi-threaded manner. Acquire the capture lock.
int ProcessStream(AudioFrame* frame) override;
int ProcessStream(const float* const* src,
size_t samples_per_channel,
@ -86,6 +88,14 @@ class AudioProcessingImpl : public AudioProcessing {
const StreamConfig& input_config,
const StreamConfig& output_config,
float* const* dest) override;
void set_output_will_be_muted(bool muted) override;
int set_stream_delay_ms(int delay) override;
void set_delay_offset_ms(int offset) override;
int delay_offset_ms() const override;
void set_stream_key_pressed(bool key_pressed) override;
// Render-side exclusive methods possibly running APM in a
// multi-threaded manner. Acquire the render lock.
int AnalyzeReverseStream(AudioFrame* frame) override;
int ProcessReverseStream(AudioFrame* frame) override;
int AnalyzeReverseStream(const float* const* data,
@ -96,17 +106,24 @@ class AudioProcessingImpl : public AudioProcessing {
const StreamConfig& reverse_input_config,
const StreamConfig& reverse_output_config,
float* const* dest) override;
int set_stream_delay_ms(int delay) override;
// Methods only accessed from APM submodules or
// from AudioProcessing tests in a single-threaded manner.
// Hence there is no need for locks in these.
int proc_sample_rate_hz() const override;
int proc_split_sample_rate_hz() const override;
int num_input_channels() const override;
int num_output_channels() const override;
int num_reverse_channels() const override;
int stream_delay_ms() const override;
bool was_stream_delay_set() const override;
void set_delay_offset_ms(int offset) override;
int delay_offset_ms() const override;
void set_stream_key_pressed(bool key_pressed) override;
int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
int StartDebugRecording(FILE* handle) override;
int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
int StopDebugRecording() override;
void UpdateHistogramsOnCallEnd() override;
bool was_stream_delay_set() const override
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
// Methods returning pointers to APM submodules.
// No locks are aquired in those, as those locks
// would offer no protection (the submodules are
// created only once in a single-treaded manner
// during APM creation).
EchoCancellation* echo_cancellation() const override;
EchoControlMobile* echo_control_mobile() const override;
GainControl* gain_control() const override;
@ -117,116 +134,209 @@ class AudioProcessingImpl : public AudioProcessing {
protected:
// Overridden in a mock.
virtual int InitializeLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
virtual int InitializeLocked()
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
private:
struct ApmPublicSubmodules;
struct ApmPrivateSubmodules;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// State for the debug dump.
struct ApmDebugDumpThreadState {
ApmDebugDumpThreadState() : event_msg(new audioproc::Event()) {}
rtc::scoped_ptr<audioproc::Event> event_msg; // Protobuf message.
std::string event_str; // Memory for protobuf serialization.
// Serialized string of last saved APM configuration.
std::string last_serialized_config;
};
struct ApmDebugDumpState {
ApmDebugDumpState() : debug_file(FileWrapper::Create()) {}
rtc::scoped_ptr<FileWrapper> debug_file;
ApmDebugDumpThreadState render;
ApmDebugDumpThreadState capture;
};
#endif
// Method for modifying the formats struct that are called from both
// the render and capture threads. The check for whether modifications
// are needed is done while holding the render lock only, thereby avoiding
// that the capture thread blocks the render thread.
// The struct is modified in a single-threaded manner by holding both the
// render and capture locks.
int MaybeInitialize(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
int MaybeInitializeRender(const ProcessingConfig& processing_config)
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
int MaybeInitializeCapture(const ProcessingConfig& processing_config)
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
// Method for checking for the need of conversion. Accesses the formats
// structs in a read manner but the requirement for the render lock to be held
// was added as it currently anyway is always called in that manner.
bool rev_conversion_needed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
bool render_check_rev_conversion_needed() const
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
// Methods requiring APM running in a single-threaded manner.
// Are called with both the render and capture locks already
// acquired.
void InitializeExperimentalAgc()
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
void InitializeTransient()
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
void InitializeBeamformer()
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
void InitializeIntelligibility()
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
int InitializeLocked(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
int MaybeInitializeLockedRender(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
int MaybeInitializeLockedCapture(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
int MaybeInitializeLocked(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
// Capture-side exclusive methods possibly running APM in a multi-threaded
// manner that are called with the render lock already acquired.
int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
bool output_copy_needed(bool is_data_processed) const
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
bool is_data_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
bool synthesis_needed(bool is_data_processed) const
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
bool analysis_needed(bool is_data_processed) const
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
// Render-side exclusive methods possibly running APM in a multi-threaded
// manner that are called with the render lock already acquired.
// TODO(ekm): Remove once all clients updated to new interface.
int AnalyzeReverseStream(const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config);
int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
int AnalyzeReverseStreamLocked(const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config)
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
bool is_rev_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
bool is_data_processed() const;
bool output_copy_needed(bool is_data_processed) const;
bool synthesis_needed(bool is_data_processed) const;
bool analysis_needed(bool is_data_processed) const;
bool is_rev_processed() const;
bool rev_conversion_needed() const;
// TODO(peah): Add EXCLUSIVE_LOCKS_REQUIRED for the method below.
bool render_check_rev_conversion_needed() const;
void InitializeExperimentalAgc() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void InitializeTransient() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void InitializeBeamformer() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void InitializeIntelligibility() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_);
EchoCancellationImpl* echo_cancellation_;
EchoControlMobileImpl* echo_control_mobile_;
GainControlImpl* gain_control_;
HighPassFilterImpl* high_pass_filter_;
LevelEstimatorImpl* level_estimator_;
NoiseSuppressionImpl* noise_suppression_;
VoiceDetectionImpl* voice_detection_;
rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc_;
std::list<ProcessingComponent*> component_list_;
CriticalSectionWrapper* crit_;
rtc::scoped_ptr<AudioBuffer> render_audio_;
rtc::scoped_ptr<AudioBuffer> capture_audio_;
rtc::scoped_ptr<AudioConverter> render_converter_;
// Debug dump methods that are internal and called without locks.
// TODO(peah): Make thread safe.
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// TODO(andrew): make this more graceful. Ideally we would split this stuff
// out into a separate class with an "enabled" and "disabled" implementation.
int WriteMessageToDebugFile();
int WriteInitMessage();
static int WriteMessageToDebugFile(FileWrapper* debug_file,
rtc::CriticalSection* crit_debug,
ApmDebugDumpThreadState* debug_state);
int WriteInitMessage() EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
// Writes Config message. If not |forced|, only writes the current config if
// it is different from the last saved one; if |forced|, writes the config
// regardless of the last saved.
int WriteConfigMessage(bool forced);
int WriteConfigMessage(bool forced) EXCLUSIVE_LOCKS_REQUIRED(crit_capture_)
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
rtc::scoped_ptr<FileWrapper> debug_file_;
rtc::scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
std::string event_str_; // Memory for protobuf serialization.
// Critical section.
mutable rtc::CriticalSection crit_debug_;
// Serialized string of last saved APM configuration.
std::string last_serialized_config_;
// Debug dump state.
ApmDebugDumpState debug_dump_;
#endif
// Critical sections.
mutable rtc::CriticalSection crit_render_ ACQUIRED_BEFORE(crit_capture_);
mutable rtc::CriticalSection crit_capture_;
// Structs containing the pointers to the submodules.
rtc::scoped_ptr<ApmPublicSubmodules> public_submodules_;
rtc::scoped_ptr<ApmPrivateSubmodules> private_submodules_
GUARDED_BY(crit_capture_);
// State that is written to while holding both the render and capture locks
// but can be read while holding only one of the locks.
struct SharedState {
SharedState()
// but can be read without any lock being held.
// As this is only accessed internally of APM, and all internal methods in APM
// either are holding the render or capture locks, this construct is safe as
// it is not possible to read the variables while writing them.
struct ApmFormatState {
ApmFormatState()
: // Format of processing streams at input/output call sites.
api_format_({{{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false}}}) {}
ProcessingConfig api_format_;
} shared_state_;
api_format({{{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false},
{kSampleRate16kHz, 1, false}}}),
rev_proc_format(kSampleRate16kHz, 1) {}
ProcessingConfig api_format;
StreamConfig rev_proc_format;
} formats_;
// Only the rate and samples fields of fwd_proc_format_ are used because the
// forward processing number of channels is mutable and is tracked by the
// capture_audio_.
StreamConfig fwd_proc_format_;
StreamConfig rev_proc_format_;
int split_rate_;
// APM constants.
const struct ApmConstants {
ApmConstants(int agc_startup_min_volume,
const std::vector<Point> array_geometry,
SphericalPointf target_direction,
bool use_new_agc,
bool intelligibility_enabled,
bool beamformer_enabled)
: // Format of processing streams at input/output call sites.
agc_startup_min_volume(agc_startup_min_volume),
array_geometry(array_geometry),
target_direction(target_direction),
use_new_agc(use_new_agc),
intelligibility_enabled(intelligibility_enabled),
beamformer_enabled(beamformer_enabled) {}
int agc_startup_min_volume;
std::vector<Point> array_geometry;
SphericalPointf target_direction;
bool use_new_agc;
bool intelligibility_enabled;
bool beamformer_enabled;
} constants_;
int stream_delay_ms_;
int delay_offset_ms_;
bool was_stream_delay_set_;
int last_stream_delay_ms_;
int last_aec_system_delay_ms_;
int stream_delay_jumps_;
int aec_system_delay_jumps_;
struct ApmCaptureState {
ApmCaptureState(bool transient_suppressor_enabled)
: aec_system_delay_jumps(-1),
delay_offset_ms(0),
was_stream_delay_set(false),
last_stream_delay_ms(0),
last_aec_system_delay_ms(0),
stream_delay_jumps(-1),
output_will_be_muted(false),
key_pressed(false),
transient_suppressor_enabled(transient_suppressor_enabled),
fwd_proc_format(kSampleRate16kHz),
split_rate(kSampleRate16kHz) {}
int aec_system_delay_jumps;
int delay_offset_ms;
bool was_stream_delay_set;
int last_stream_delay_ms;
int last_aec_system_delay_ms;
int stream_delay_jumps;
bool output_will_be_muted;
bool key_pressed;
bool transient_suppressor_enabled;
rtc::scoped_ptr<AudioBuffer> capture_audio;
// Only the rate and samples fields of fwd_proc_format_ are used because the
// forward processing number of channels is mutable and is tracked by the
// capture_audio_.
StreamConfig fwd_proc_format;
int split_rate;
} capture_ GUARDED_BY(crit_capture_);
bool output_will_be_muted_ GUARDED_BY(crit_);
struct ApmCaptureNonLockedState {
ApmCaptureNonLockedState()
: fwd_proc_format(kSampleRate16kHz),
split_rate(kSampleRate16kHz),
stream_delay_ms(0) {}
// Only the rate and samples fields of fwd_proc_format_ are used because the
// forward processing number of channels is mutable and is tracked by the
// capture_audio_.
StreamConfig fwd_proc_format;
int split_rate;
int stream_delay_ms;
} capture_nonlocked_;
bool key_pressed_;
// Only set through the constructor's Config parameter.
const bool use_new_agc_;
rtc::scoped_ptr<AgcManagerDirect> agc_manager_ GUARDED_BY(crit_);
int agc_startup_min_volume_;
bool transient_suppressor_enabled_;
rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
const bool beamformer_enabled_;
rtc::scoped_ptr<Beamformer<float>> beamformer_;
const std::vector<Point> array_geometry_;
const SphericalPointf target_direction_;
bool intelligibility_enabled_;
rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer_;
struct ApmRenderState {
rtc::scoped_ptr<AudioConverter> render_converter;
rtc::scoped_ptr<AudioBuffer> render_audio;
} render_ GUARDED_BY(crit_render_);
};
} // namespace webrtc

View File

@ -30,41 +30,6 @@ namespace {
class AudioProcessingImplLockTest;
// Sleeps a random time between 0 and max_sleep milliseconds.
void SleepRandomMs(int max_sleep, test::Random* rand_gen) {
int sleeptime = rand_gen->Rand(0, max_sleep);
SleepMs(sleeptime);
}
// Populates a float audio frame with random data.
void PopulateAudioFrame(float** frame,
float amplitude,
size_t num_channels,
size_t samples_per_channel,
test::Random* rand_gen) {
for (size_t ch = 0; ch < num_channels; ch++) {
for (size_t k = 0; k < samples_per_channel; k++) {
// Store random 16 bit quantized float number between +-amplitude.
frame[ch][k] = amplitude * (2 * rand_gen->Rand<float>() - 1);
}
}
}
// Populates an audioframe frame of AudioFrame type with random data.
void PopulateAudioFrame(AudioFrame* frame,
int16_t amplitude,
test::Random* rand_gen) {
ASSERT_GT(amplitude, 0);
ASSERT_LE(amplitude, 32767);
for (int ch = 0; ch < frame->num_channels_; ch++) {
for (int k = 0; k < static_cast<int>(frame->samples_per_channel_); k++) {
// Store random 16 bit number between -(amplitude+1) and
// amplitude.
frame->data_[k * ch] = rand_gen->Rand(2 * amplitude + 1) - amplitude - 1;
}
}
}
// Type of the render thread APM API call to use in the test.
enum class RenderApiImpl {
ProcessReverseStreamImpl1,
@ -97,6 +62,31 @@ enum class AecType {
BasicWebRtcAecSettingsWithAecMobile
};
// Thread-safe random number generator wrapper.
class RandomGenerator {
public:
RandomGenerator() : rand_gen_(42U) {}
int RandInt(int min, int max) {
rtc::CritScope cs(&crit_);
return rand_gen_.Rand(min, max);
}
int RandInt(int max) {
rtc::CritScope cs(&crit_);
return rand_gen_.Rand(max);
}
float RandFloat() {
rtc::CritScope cs(&crit_);
return rand_gen_.Rand<float>();
}
private:
rtc::CriticalSection crit_;
test::Random rand_gen_ GUARDED_BY(crit_);
};
// Variables related to the audio data and formats.
struct AudioFrameData {
explicit AudioFrameData(int max_frame_size) {
@ -331,7 +321,7 @@ class CaptureSideCalledChecker {
class CaptureProcessor {
public:
CaptureProcessor(int max_frame_size,
test::Random* rand_gen,
RandomGenerator* rand_gen,
FrameCounters* shared_counters_state,
CaptureSideCalledChecker* capture_call_checker,
AudioProcessingImplLockTest* test_framework,
@ -348,7 +338,7 @@ class CaptureProcessor {
void CallApmCaptureSide();
void ApplyRuntimeSettingScheme();
test::Random* rand_gen_ = nullptr;
RandomGenerator* rand_gen_ = nullptr;
FrameCounters* frame_counters_ = nullptr;
CaptureSideCalledChecker* capture_call_checker_ = nullptr;
AudioProcessingImplLockTest* test_ = nullptr;
@ -360,13 +350,13 @@ class CaptureProcessor {
// Class for handling the stats processing.
class StatsProcessor {
public:
StatsProcessor(test::Random* rand_gen,
StatsProcessor(RandomGenerator* rand_gen,
TestConfig* test_config,
AudioProcessing* apm);
bool Process();
private:
test::Random* rand_gen_ = nullptr;
RandomGenerator* rand_gen_ = nullptr;
TestConfig* test_config_ = nullptr;
AudioProcessing* apm_ = nullptr;
};
@ -375,7 +365,7 @@ class StatsProcessor {
class RenderProcessor {
public:
RenderProcessor(int max_frame_size,
test::Random* rand_gen,
RandomGenerator* rand_gen,
FrameCounters* shared_counters_state,
CaptureSideCalledChecker* capture_call_checker,
AudioProcessingImplLockTest* test_framework,
@ -392,7 +382,7 @@ class RenderProcessor {
void CallApmRenderSide();
void ApplyRuntimeSettingScheme();
test::Random* rand_gen_ = nullptr;
RandomGenerator* rand_gen_ = nullptr;
FrameCounters* frame_counters_ = nullptr;
CaptureSideCalledChecker* capture_call_checker_ = nullptr;
AudioProcessingImplLockTest* test_ = nullptr;
@ -459,7 +449,7 @@ class AudioProcessingImplLockTest
rtc::PlatformThread render_thread_;
rtc::PlatformThread capture_thread_;
rtc::PlatformThread stats_thread_;
mutable test::Random rand_gen_;
mutable RandomGenerator rand_gen_;
rtc::scoped_ptr<AudioProcessing> apm_;
TestConfig test_config_;
@ -470,12 +460,47 @@ class AudioProcessingImplLockTest
StatsProcessor stats_thread_state_;
};
// Sleeps a random time between 0 and max_sleep milliseconds.
void SleepRandomMs(int max_sleep, RandomGenerator* rand_gen) {
int sleeptime = rand_gen->RandInt(0, max_sleep);
SleepMs(sleeptime);
}
// Populates a float audio frame with random data.
void PopulateAudioFrame(float** frame,
float amplitude,
size_t num_channels,
size_t samples_per_channel,
RandomGenerator* rand_gen) {
for (size_t ch = 0; ch < num_channels; ch++) {
for (size_t k = 0; k < samples_per_channel; k++) {
// Store random 16 bit quantized float number between +-amplitude.
frame[ch][k] = amplitude * (2 * rand_gen->RandFloat() - 1);
}
}
}
// Populates an audioframe frame of AudioFrame type with random data.
void PopulateAudioFrame(AudioFrame* frame,
int16_t amplitude,
RandomGenerator* rand_gen) {
ASSERT_GT(amplitude, 0);
ASSERT_LE(amplitude, 32767);
for (int ch = 0; ch < frame->num_channels_; ch++) {
for (int k = 0; k < static_cast<int>(frame->samples_per_channel_); k++) {
// Store random 16 bit number between -(amplitude+1) and
// amplitude.
frame->data_[k * ch] =
rand_gen->RandInt(2 * amplitude + 1) - amplitude - 1;
}
}
}
AudioProcessingImplLockTest::AudioProcessingImplLockTest()
: test_complete_(EventWrapper::Create()),
render_thread_(RenderProcessorThreadFunc, this, "render"),
capture_thread_(CaptureProcessorThreadFunc, this, "capture"),
stats_thread_(StatsProcessorThreadFunc, this, "stats"),
rand_gen_(42U),
apm_(AudioProcessingImpl::Create()),
render_thread_state_(kMaxFrameSize,
&rand_gen_,
@ -513,7 +538,7 @@ void AudioProcessingImplLockTest::SetUp() {
ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
ASSERT_EQ(apm_->kNoError,
apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
ASSERT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
@ -552,7 +577,7 @@ void AudioProcessingImplLockTest::TearDown() {
stats_thread_.Stop();
}
StatsProcessor::StatsProcessor(test::Random* rand_gen,
StatsProcessor::StatsProcessor(RandomGenerator* rand_gen,
TestConfig* test_config,
AudioProcessing* apm)
: rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {}
@ -586,7 +611,7 @@ const float CaptureProcessor::kCaptureInputFloatLevel = 0.03125f;
CaptureProcessor::CaptureProcessor(
int max_frame_size,
test::Random* rand_gen,
RandomGenerator* rand_gen,
FrameCounters* shared_counters_state,
CaptureSideCalledChecker* capture_call_checker,
AudioProcessingImplLockTest* test_framework,
@ -824,8 +849,6 @@ void CaptureProcessor::ApplyRuntimeSettingScheme() {
apm_->set_stream_key_pressed(true);
apm_->set_delay_offset_ms(15);
EXPECT_EQ(apm_->delay_offset_ms(), 15);
EXPECT_GE(apm_->num_reverse_channels(), 0);
EXPECT_LE(apm_->num_reverse_channels(), 2);
} else {
ASSERT_EQ(AudioProcessing::Error::kNoError,
apm_->set_stream_delay_ms(50));
@ -833,9 +856,6 @@ void CaptureProcessor::ApplyRuntimeSettingScheme() {
apm_->set_delay_offset_ms(20);
EXPECT_EQ(apm_->delay_offset_ms(), 20);
apm_->delay_offset_ms();
apm_->num_reverse_channels();
EXPECT_GE(apm_->num_reverse_channels(), 0);
EXPECT_LE(apm_->num_reverse_channels(), 2);
}
break;
default:
@ -852,7 +872,7 @@ void CaptureProcessor::ApplyRuntimeSettingScheme() {
const float RenderProcessor::kRenderInputFloatLevel = 0.5f;
RenderProcessor::RenderProcessor(int max_frame_size,
test::Random* rand_gen,
RandomGenerator* rand_gen,
FrameCounters* shared_counters_state,
CaptureSideCalledChecker* capture_call_checker,
AudioProcessingImplLockTest* test_framework,
@ -1104,7 +1124,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::ValuesIn(TestConfig::GenerateExtensiveTestConfigs()));
INSTANTIATE_TEST_CASE_P(
DISABLED_AudioProcessingImplLockBrief,
AudioProcessingImplLockBrief,
AudioProcessingImplLockTest,
::testing::ValuesIn(TestConfig::GenerateBriefTestConfigs()));

View File

@ -18,7 +18,6 @@ extern "C" {
}
#include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -63,10 +62,12 @@ static const size_t kMaxNumFramesToBuffer = 100;
} // namespace
EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
crit_render_(crit_render),
crit_capture_(crit_capture),
drift_compensation_enabled_(false),
metrics_enabled_(false),
suppression_level_(kModerateSuppression),
@ -76,19 +77,24 @@ EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
delay_logging_enabled_(false),
extended_filter_enabled_(false),
delay_agnostic_enabled_(false),
render_queue_element_max_size_(0) {}
render_queue_element_max_size_(0) {
RTC_DCHECK(apm);
RTC_DCHECK(crit_render);
RTC_DCHECK(crit_capture);
}
EchoCancellationImpl::~EchoCancellationImpl() {}
int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
rtc::CritScope cs_render(crit_render_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_reverse_channels());
int err = apm_->kNoError;
int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AEC.
size_t handle_index = 0;
@ -102,7 +108,7 @@ int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
my_handle, audio->split_bands_const_f(j)[kBand0To8kHz],
audio->num_frames_per_band());
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return MapError(err); // TODO(ajm): warning possible?
}
@ -116,18 +122,20 @@ int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
// Insert the samples into the queue.
if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
// The data queue is full and needs to be emptied.
ReadQueuedRenderData();
// Retry the insert (should always work).
RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
// Read chunks of data that were received and queued on the render side from
// a queue. All the data chunks are buffered into the farend signal of the AEC.
void EchoCancellationImpl::ReadQueuedRenderData() {
rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
return;
}
@ -152,22 +160,23 @@ void EchoCancellationImpl::ReadQueuedRenderData() {
}
int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
if (!apm_->was_stream_delay_set()) {
return apm_->kStreamParameterNotSetError;
return AudioProcessing::kStreamParameterNotSetError;
}
if (drift_compensation_enabled_ && !was_stream_drift_set_) {
return apm_->kStreamParameterNotSetError;
return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_output_channels());
int err = apm_->kNoError;
int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AEC.
size_t handle_index = 0;
@ -175,26 +184,22 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
for (int i = 0; i < audio->num_channels(); i++) {
for (int j = 0; j < apm_->num_reverse_channels(); j++) {
Handle* my_handle = handle(handle_index);
err = WebRtcAec_Process(
my_handle,
audio->split_bands_const_f(i),
audio->num_bands(),
audio->split_bands_f(i),
audio->num_frames_per_band(),
apm_->stream_delay_ms(),
stream_drift_samples_);
err = WebRtcAec_Process(my_handle, audio->split_bands_const_f(i),
audio->num_bands(), audio->split_bands_f(i),
audio->num_frames_per_band(),
apm_->stream_delay_ms(), stream_drift_samples_);
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
err = MapError(err);
// TODO(ajm): Figure out how to return warnings properly.
if (err != apm_->kBadStreamParameterWarning) {
if (err != AudioProcessing::kBadStreamParameterWarning) {
return err;
}
}
int status = 0;
err = WebRtcAec_get_echo_status(my_handle, &status);
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return MapError(err);
}
@ -207,77 +212,92 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
was_stream_drift_set_ = false;
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int EchoCancellationImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
// Run in a single-threaded manner.
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
// Ensure AEC and AECM are not both enabled.
// The is_enabled call is safe from a deadlock perspective
// as both locks are already held in the correct order.
if (enable && apm_->echo_control_mobile()->is_enabled()) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
return EnableComponent(enable);
}
bool EchoCancellationImpl::is_enabled() const {
rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
{
if (MapSetting(level) == -1) {
return AudioProcessing::kBadParameterError;
}
rtc::CritScope cs(crit_capture_);
suppression_level_ = level;
}
suppression_level_ = level;
return Configure();
}
EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
const {
rtc::CritScope cs(crit_capture_);
return suppression_level_;
}
int EchoCancellationImpl::enable_drift_compensation(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
drift_compensation_enabled_ = enable;
{
rtc::CritScope cs(crit_capture_);
drift_compensation_enabled_ = enable;
}
return Configure();
}
bool EchoCancellationImpl::is_drift_compensation_enabled() const {
rtc::CritScope cs(crit_capture_);
return drift_compensation_enabled_;
}
void EchoCancellationImpl::set_stream_drift_samples(int drift) {
rtc::CritScope cs(crit_capture_);
was_stream_drift_set_ = true;
stream_drift_samples_ = drift;
}
int EchoCancellationImpl::stream_drift_samples() const {
rtc::CritScope cs(crit_capture_);
return stream_drift_samples_;
}
int EchoCancellationImpl::enable_metrics(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
metrics_enabled_ = enable;
{
rtc::CritScope cs(crit_capture_);
metrics_enabled_ = enable;
}
return Configure();
}
bool EchoCancellationImpl::are_metrics_enabled() const {
rtc::CritScope cs(crit_capture_);
return metrics_enabled_;
}
// TODO(ajm): we currently just use the metrics from the first AEC. Think more
// aboue the best way to extend this to multi-channel.
int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (metrics == NULL) {
return apm_->kNullPointerError;
return AudioProcessing::kNullPointerError;
}
if (!is_component_enabled() || !metrics_enabled_) {
return apm_->kNotEnabledError;
return AudioProcessing::kNotEnabledError;
}
AecMetrics my_metrics;
@ -286,7 +306,7 @@ int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
Handle* my_handle = static_cast<Handle*>(handle(0));
int err = WebRtcAec_GetMetrics(my_handle, &my_metrics);
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return MapError(err);
}
@ -310,63 +330,70 @@ int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
metrics->a_nlp.maximum = my_metrics.aNlp.max;
metrics->a_nlp.minimum = my_metrics.aNlp.min;
return apm_->kNoError;
return AudioProcessing::kNoError;
}
bool EchoCancellationImpl::stream_has_echo() const {
rtc::CritScope cs(crit_capture_);
return stream_has_echo_;
}
int EchoCancellationImpl::enable_delay_logging(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
delay_logging_enabled_ = enable;
{
rtc::CritScope cs(crit_capture_);
delay_logging_enabled_ = enable;
}
return Configure();
}
bool EchoCancellationImpl::is_delay_logging_enabled() const {
rtc::CritScope cs(crit_capture_);
return delay_logging_enabled_;
}
bool EchoCancellationImpl::is_delay_agnostic_enabled() const {
rtc::CritScope cs(crit_capture_);
return delay_agnostic_enabled_;
}
bool EchoCancellationImpl::is_extended_filter_enabled() const {
rtc::CritScope cs(crit_capture_);
return extended_filter_enabled_;
}
// TODO(bjornv): How should we handle the multi-channel case?
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
rtc::CritScope cs(crit_capture_);
float fraction_poor_delays = 0;
return GetDelayMetrics(median, std, &fraction_poor_delays);
}
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std,
float* fraction_poor_delays) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (median == NULL) {
return apm_->kNullPointerError;
return AudioProcessing::kNullPointerError;
}
if (std == NULL) {
return apm_->kNullPointerError;
return AudioProcessing::kNullPointerError;
}
if (!is_component_enabled() || !delay_logging_enabled_) {
return apm_->kNotEnabledError;
return AudioProcessing::kNotEnabledError;
}
Handle* my_handle = static_cast<Handle*>(handle(0));
const int err =
WebRtcAec_GetDelayMetrics(my_handle, median, std, fraction_poor_delays);
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return MapError(err);
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
struct AecCore* EchoCancellationImpl::aec_core() const {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (!is_component_enabled()) {
return NULL;
}
@ -376,13 +403,16 @@ struct AecCore* EchoCancellationImpl::aec_core() const {
int EchoCancellationImpl::Initialize() {
int err = ProcessingComponent::Initialize();
if (err != apm_->kNoError || !is_component_enabled()) {
return err;
{
rtc::CritScope cs(crit_capture_);
if (err != AudioProcessing::kNoError || !is_component_enabled()) {
return err;
}
}
AllocateRenderQueue();
return apm_->kNoError;
return AudioProcessing::kNoError;
}
void EchoCancellationImpl::AllocateRenderQueue() {
@ -390,6 +420,9 @@ void EchoCancellationImpl::AllocateRenderQueue() {
static_cast<size_t>(1),
kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
// Reallocate the queue if the queue item size is too small to fit the
// data to put in the queue.
if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
@ -410,8 +443,11 @@ void EchoCancellationImpl::AllocateRenderQueue() {
}
void EchoCancellationImpl::SetExtraOptions(const Config& config) {
extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
{
rtc::CritScope cs(crit_capture_);
extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
}
Configure();
}
@ -425,23 +461,25 @@ void EchoCancellationImpl::DestroyHandle(void* handle) const {
}
int EchoCancellationImpl::InitializeHandle(void* handle) const {
// Not locked as it only relies on APM public API which is threadsafe.
assert(handle != NULL);
// TODO(ajm): Drift compensation is disabled in practice. If restored, it
// should be managed internally and not depend on the hardware sample rate.
// For now, just hardcode a 48 kHz value.
return WebRtcAec_Init(static_cast<Handle*>(handle),
apm_->proc_sample_rate_hz(),
48000);
apm_->proc_sample_rate_hz(), 48000);
}
int EchoCancellationImpl::ConfigureHandle(void* handle) const {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
assert(handle != NULL);
AecConfig config;
config.metricsMode = metrics_enabled_;
config.nlpMode = MapSetting(suppression_level_);
config.skewMode = drift_compensation_enabled_;
config.delay_logging = delay_logging_enabled_;
WebRtcAec_enable_extended_filter(
WebRtcAec_aec_core(static_cast<Handle*>(handle)),
extended_filter_enabled_ ? 1 : 0);
@ -452,11 +490,13 @@ int EchoCancellationImpl::ConfigureHandle(void* handle) const {
}
int EchoCancellationImpl::num_handles_required() const {
// Not locked as it only relies on APM public API which is threadsafe.
return apm_->num_output_channels() *
apm_->num_reverse_channels();
}
int EchoCancellationImpl::GetHandleError(void* handle) const {
// Not locked as it does not rely on anything in the state.
assert(handle != NULL);
return AudioProcessing::kUnspecifiedError;
}

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
@ -19,13 +20,13 @@
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class EchoCancellationImpl : public EchoCancellation,
public ProcessingComponent {
public:
EchoCancellationImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit);
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture);
virtual ~EchoCancellationImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@ -40,11 +41,11 @@ class EchoCancellationImpl : public EchoCancellation,
// ProcessingComponent implementation.
int Initialize() override;
void SetExtraOptions(const Config& config) override;
bool is_delay_agnostic_enabled() const;
bool is_extended_filter_enabled() const;
// Reads render side data that has been queued on the render call.
// Called holding the capture lock.
void ReadQueuedRenderData();
private:
@ -63,6 +64,7 @@ class EchoCancellationImpl : public EchoCancellation,
int GetDelayMetrics(int* median,
int* std,
float* fraction_poor_delays) override;
struct AecCore* aec_core() const override;
// ProcessingComponent implementation.
@ -75,22 +77,28 @@ class EchoCancellationImpl : public EchoCancellation,
void AllocateRenderQueue();
// Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
bool drift_compensation_enabled_;
bool metrics_enabled_;
SuppressionLevel suppression_level_;
int stream_drift_samples_;
bool was_stream_drift_set_;
bool stream_has_echo_;
bool delay_logging_enabled_;
bool extended_filter_enabled_;
bool delay_agnostic_enabled_;
rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
rtc::CriticalSection* const crit_capture_;
size_t render_queue_element_max_size_;
std::vector<float> render_queue_buffer_;
std::vector<float> capture_queue_buffer_;
bool drift_compensation_enabled_ GUARDED_BY(crit_capture_);
bool metrics_enabled_ GUARDED_BY(crit_capture_);
SuppressionLevel suppression_level_ GUARDED_BY(crit_capture_);
int stream_drift_samples_ GUARDED_BY(crit_capture_);
bool was_stream_drift_set_ GUARDED_BY(crit_capture_);
bool stream_has_echo_ GUARDED_BY(crit_capture_);
bool delay_logging_enabled_ GUARDED_BY(crit_capture_);
bool extended_filter_enabled_ GUARDED_BY(crit_capture_);
bool delay_agnostic_enabled_ GUARDED_BY(crit_capture_);
size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
GUARDED_BY(crit_capture_);
std::vector<float> render_queue_buffer_ GUARDED_BY(crit_render_);
std::vector<float> capture_queue_buffer_ GUARDED_BY(crit_capture_);
// Lock protection not needed.
rtc::scoped_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
render_signal_queue_;
};

View File

@ -15,7 +15,6 @@
#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
@ -69,14 +68,20 @@ size_t EchoControlMobile::echo_path_size_bytes() {
}
EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
crit_render_(crit_render),
crit_capture_(crit_capture),
routing_mode_(kSpeakerphone),
comfort_noise_enabled_(true),
external_echo_path_(NULL),
render_queue_element_max_size_(0) {}
render_queue_element_max_size_(0) {
RTC_DCHECK(apm);
RTC_DCHECK(crit_render);
RTC_DCHECK(crit_capture);
}
EchoControlMobileImpl::~EchoControlMobileImpl() {
if (external_echo_path_ != NULL) {
@ -86,15 +91,16 @@ EchoControlMobileImpl::~EchoControlMobileImpl() {
}
int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
rtc::CritScope cs_render(crit_render_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_reverse_channels());
int err = apm_->kNoError;
int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AECM.
size_t handle_index = 0;
render_queue_buffer_.clear();
@ -105,7 +111,7 @@ int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
my_handle, audio->split_bands_const(j)[kBand0To8kHz],
audio->num_frames_per_band());
if (err != apm_->kNoError)
if (err != AudioProcessing::kNoError)
return MapError(err); // TODO(ajm): warning possible?);
// Buffer the samples in the render queue.
@ -120,18 +126,21 @@ int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
// Insert the samples into the queue.
if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
// The data queue is full and needs to be emptied.
ReadQueuedRenderData();
// Retry the insert (should always work).
RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
// Read chunks of data that were received and queued on the render side from
// a queue. All the data chunks are buffered into the farend signal of the AEC.
void EchoControlMobileImpl::ReadQueuedRenderData() {
rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
return;
}
@ -156,18 +165,20 @@ void EchoControlMobileImpl::ReadQueuedRenderData() {
}
int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
if (!apm_->was_stream_delay_set()) {
return apm_->kStreamParameterNotSetError;
return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_output_channels());
int err = apm_->kNoError;
int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AECM.
size_t handle_index = 0;
@ -190,86 +201,99 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
audio->num_frames_per_band(),
apm_->stream_delay_ms());
if (err != apm_->kNoError)
if (err != AudioProcessing::kNoError)
return MapError(err);
handle_index++;
}
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
// The is_enabled call is safe from a deadlock perspective
// as both locks are allready held in the correct order.
if (enable && apm_->echo_cancellation()->is_enabled()) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
return EnableComponent(enable);
}
bool EchoControlMobileImpl::is_enabled() const {
rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
routing_mode_ = mode;
{
rtc::CritScope cs(crit_capture_);
routing_mode_ = mode;
}
return Configure();
}
EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
const {
rtc::CritScope cs(crit_capture_);
return routing_mode_;
}
int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
comfort_noise_enabled_ = enable;
{
rtc::CritScope cs(crit_capture_);
comfort_noise_enabled_ = enable;
}
return Configure();
}
bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
rtc::CritScope cs(crit_capture_);
return comfort_noise_enabled_;
}
int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
size_t size_bytes) {
CriticalSectionScoped crit_scoped(crit_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
}
if (size_bytes != echo_path_size_bytes()) {
// Size mismatch
return apm_->kBadParameterError;
}
{
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
if (echo_path == NULL) {
return AudioProcessing::kNullPointerError;
}
if (size_bytes != echo_path_size_bytes()) {
// Size mismatch
return AudioProcessing::kBadParameterError;
}
if (external_echo_path_ == NULL) {
external_echo_path_ = new unsigned char[size_bytes];
if (external_echo_path_ == NULL) {
external_echo_path_ = new unsigned char[size_bytes];
}
memcpy(external_echo_path_, echo_path, size_bytes);
}
memcpy(external_echo_path_, echo_path, size_bytes);
return Initialize();
}
int EchoControlMobileImpl::GetEchoPath(void* echo_path,
size_t size_bytes) const {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
return AudioProcessing::kNullPointerError;
}
if (size_bytes != echo_path_size_bytes()) {
// Size mismatch
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
if (!is_component_enabled()) {
return apm_->kNotEnabledError;
return AudioProcessing::kNotEnabledError;
}
// Get the echo path from the first channel
@ -278,27 +302,30 @@ int EchoControlMobileImpl::GetEchoPath(void* echo_path,
if (err != 0)
return MapError(err);
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::Initialize() {
if (!is_component_enabled()) {
return apm_->kNoError;
{
rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
return AudioProcessing::kNoError;
}
}
if (apm_->proc_sample_rate_hz() > apm_->kSampleRate16kHz) {
if (apm_->proc_sample_rate_hz() > AudioProcessing::kSampleRate16kHz) {
LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
return apm_->kBadSampleRateError;
return AudioProcessing::kBadSampleRateError;
}
int err = ProcessingComponent::Initialize();
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return err;
}
AllocateRenderQueue();
return apm_->kNoError;
return AudioProcessing::kNoError;
}
void EchoControlMobileImpl::AllocateRenderQueue() {
@ -306,6 +333,9 @@ void EchoControlMobileImpl::AllocateRenderQueue() {
static_cast<size_t>(1),
kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
// Reallocate the queue if the queue item size is too small to fit the
// data to put in the queue.
if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
@ -330,10 +360,14 @@ void* EchoControlMobileImpl::CreateHandle() const {
}
void EchoControlMobileImpl::DestroyHandle(void* handle) const {
// This method is only called in a non-concurrent manner during APM
// destruction.
WebRtcAecm_Free(static_cast<Handle*>(handle));
}
int EchoControlMobileImpl::InitializeHandle(void* handle) const {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
assert(handle != NULL);
Handle* my_handle = static_cast<Handle*>(handle);
if (WebRtcAecm_Init(my_handle, apm_->proc_sample_rate_hz()) != 0) {
@ -347,10 +381,12 @@ int EchoControlMobileImpl::InitializeHandle(void* handle) const {
}
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
AecmConfig config;
config.cngMode = comfort_noise_enabled_;
config.echoMode = MapSetting(routing_mode_);
@ -359,11 +395,13 @@ int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
}
int EchoControlMobileImpl::num_handles_required() const {
// Not locked as it only relies on APM public API which is threadsafe.
return apm_->num_output_channels() *
apm_->num_reverse_channels();
}
int EchoControlMobileImpl::GetHandleError(void* handle) const {
// Not locked as it does not rely on anything in the state.
assert(handle != NULL);
return AudioProcessing::kUnspecifiedError;
}

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
@ -19,13 +20,14 @@
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class EchoControlMobileImpl : public EchoControlMobile,
public ProcessingComponent {
public:
EchoControlMobileImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit);
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture);
virtual ~EchoControlMobileImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@ -51,6 +53,7 @@ class EchoControlMobileImpl : public EchoControlMobile,
int GetEchoPath(void* echo_path, size_t size_bytes) const override;
// ProcessingComponent implementation.
// Called holding both the render and capture locks.
void* CreateHandle() const override;
int InitializeHandle(void* handle) const override;
int ConfigureHandle(void* handle) const override;
@ -60,15 +63,24 @@ class EchoControlMobileImpl : public EchoControlMobile,
void AllocateRenderQueue();
// Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
RoutingMode routing_mode_;
bool comfort_noise_enabled_;
unsigned char* external_echo_path_;
size_t render_queue_element_max_size_;
std::vector<int16_t> render_queue_buffer_;
std::vector<int16_t> capture_queue_buffer_;
rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
rtc::CriticalSection* const crit_capture_;
RoutingMode routing_mode_ GUARDED_BY(crit_capture_);
bool comfort_noise_enabled_ GUARDED_BY(crit_capture_);
unsigned char* external_echo_path_ GUARDED_BY(crit_render_)
GUARDED_BY(crit_capture_);
size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
GUARDED_BY(crit_capture_);
std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
// Lock protection not needed.
rtc::scoped_ptr<
SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
render_signal_queue_;

View File

@ -14,7 +14,6 @@
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/agc/legacy/gain_control.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -44,10 +43,12 @@ static const size_t kMaxNumFramesToBuffer = 100;
} // namespace
GainControlImpl::GainControlImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
crit_render_(crit_render),
crit_capture_(crit_capture),
mode_(kAdaptiveAnalog),
minimum_capture_level_(0),
maximum_capture_level_(255),
@ -57,13 +58,18 @@ GainControlImpl::GainControlImpl(const AudioProcessing* apm,
analog_capture_level_(0),
was_analog_level_set_(false),
stream_is_saturated_(false),
render_queue_element_max_size_(0) {}
render_queue_element_max_size_(0) {
RTC_DCHECK(apm);
RTC_DCHECK(crit_render);
RTC_DCHECK(crit_capture);
}
GainControlImpl::~GainControlImpl() {}
int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_render_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
@ -74,7 +80,7 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
int err =
WebRtcAgc_GetAddFarendError(my_handle, audio->num_frames_per_band());
if (err != apm_->kNoError)
if (err != AudioProcessing::kNoError)
return GetHandleError(my_handle);
// Buffer the samples in the render queue.
@ -85,18 +91,21 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
// Insert the samples into the queue.
if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
// The data queue is full and needs to be emptied.
ReadQueuedRenderData();
// Retry the insert (should always work).
RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
// Read chunks of data that were received and queued on the render side from
// a queue. All the data chunks are buffered into the farend signal of the AGC.
void GainControlImpl::ReadQueuedRenderData() {
rtc::CritScope cs(crit_capture_);
if (!is_component_enabled()) {
return;
}
@ -116,14 +125,16 @@ void GainControlImpl::ReadQueuedRenderData() {
}
int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_capture_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == num_handles());
int err = apm_->kNoError;
int err = AudioProcessing::kNoError;
if (mode_ == kAdaptiveAnalog) {
capture_levels_.assign(num_handles(), analog_capture_level_);
@ -135,7 +146,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
audio->num_bands(),
audio->num_frames_per_band());
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
}
@ -155,23 +166,25 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
capture_levels_[i] = capture_level_out;
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
}
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_capture_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
return apm_->kStreamParameterNotSetError;
return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
@ -183,6 +196,8 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
int32_t capture_level_out = 0;
uint8_t saturation_warning = 0;
// The call to stream_has_echo() is ok from a deadlock perspective
// as the capture lock is allready held.
int err = WebRtcAgc_Process(
my_handle,
audio->split_bands_const(i),
@ -194,7 +209,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
apm_->echo_cancellation()->stream_has_echo(),
&saturation_warning);
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
@ -215,22 +230,24 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
was_analog_level_set_ = false;
return apm_->kNoError;
return AudioProcessing::kNoError;
}
// TODO(ajm): ensure this is called under kAdaptiveAnalog.
int GainControlImpl::set_stream_analog_level(int level) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
was_analog_level_set_ = true;
if (level < minimum_capture_level_ || level > maximum_capture_level_) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
analog_capture_level_ = level;
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int GainControlImpl::stream_analog_level() {
rtc::CritScope cs(crit_capture_);
// TODO(ajm): enable this assertion?
//assert(mode_ == kAdaptiveAnalog);
@ -238,18 +255,21 @@ int GainControlImpl::stream_analog_level() {
}
int GainControlImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
return EnableComponent(enable);
}
bool GainControlImpl::is_enabled() const {
rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int GainControlImpl::set_mode(Mode mode) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
mode_ = mode;
@ -257,22 +277,23 @@ int GainControlImpl::set_mode(Mode mode) {
}
GainControl::Mode GainControlImpl::mode() const {
rtc::CritScope cs(crit_capture_);
return mode_;
}
int GainControlImpl::set_analog_level_limits(int minimum,
int maximum) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (minimum < 0) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
if (maximum > 65535) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
if (maximum < minimum) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
minimum_capture_level_ = minimum;
@ -282,21 +303,24 @@ int GainControlImpl::set_analog_level_limits(int minimum,
}
int GainControlImpl::analog_level_minimum() const {
rtc::CritScope cs(crit_capture_);
return minimum_capture_level_;
}
int GainControlImpl::analog_level_maximum() const {
rtc::CritScope cs(crit_capture_);
return maximum_capture_level_;
}
bool GainControlImpl::stream_is_saturated() const {
rtc::CritScope cs(crit_capture_);
return stream_is_saturated_;
}
int GainControlImpl::set_target_level_dbfs(int level) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (level > 31 || level < 0) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
target_level_dbfs_ = level;
@ -304,13 +328,14 @@ int GainControlImpl::set_target_level_dbfs(int level) {
}
int GainControlImpl::target_level_dbfs() const {
rtc::CritScope cs(crit_capture_);
return target_level_dbfs_;
}
int GainControlImpl::set_compression_gain_db(int gain) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
if (gain < 0 || gain > 90) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
compression_gain_db_ = gain;
@ -318,31 +343,35 @@ int GainControlImpl::set_compression_gain_db(int gain) {
}
int GainControlImpl::compression_gain_db() const {
rtc::CritScope cs(crit_capture_);
return compression_gain_db_;
}
int GainControlImpl::enable_limiter(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_capture_);
limiter_enabled_ = enable;
return Configure();
}
bool GainControlImpl::is_limiter_enabled() const {
rtc::CritScope cs(crit_capture_);
return limiter_enabled_;
}
int GainControlImpl::Initialize() {
int err = ProcessingComponent::Initialize();
if (err != apm_->kNoError || !is_component_enabled()) {
if (err != AudioProcessing::kNoError || !is_component_enabled()) {
return err;
}
AllocateRenderQueue();
rtc::CritScope cs_capture(crit_capture_);
const int n = num_handles();
RTC_CHECK_GE(n, 0) << "Bad number of handles: " << n;
capture_levels_.assign(n, analog_capture_level_);
return apm_->kNoError;
return AudioProcessing::kNoError;
}
void GainControlImpl::AllocateRenderQueue() {
@ -350,6 +379,9 @@ void GainControlImpl::AllocateRenderQueue() {
std::max<size_t>(static_cast<size_t>(1),
kMaxAllowedValuesOfSamplesPerFrame * num_handles());
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
render_queue_element_max_size_ = new_render_queue_element_max_size;
std::vector<int16_t> template_queue_element(render_queue_element_max_size_);
@ -375,6 +407,9 @@ void GainControlImpl::DestroyHandle(void* handle) const {
}
int GainControlImpl::InitializeHandle(void* handle) const {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
return WebRtcAgc_Init(static_cast<Handle*>(handle),
minimum_capture_level_,
maximum_capture_level_,
@ -383,6 +418,8 @@ int GainControlImpl::InitializeHandle(void* handle) const {
}
int GainControlImpl::ConfigureHandle(void* handle) const {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
WebRtcAgcConfig config;
// TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
// change the interface.
@ -397,6 +434,7 @@ int GainControlImpl::ConfigureHandle(void* handle) const {
}
int GainControlImpl::num_handles_required() const {
// Not locked as it only relies on APM public API which is threadsafe.
return apm_->num_output_channels();
}
@ -404,6 +442,6 @@ int GainControlImpl::GetHandleError(void* handle) const {
// The AGC has no get_error() function.
// (Despite listing errors in its interface...)
assert(handle != NULL);
return apm_->kUnspecifiedError;
return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc

View File

@ -13,7 +13,9 @@
#include <vector>
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
@ -21,13 +23,13 @@
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class GainControlImpl : public GainControl,
public ProcessingComponent {
public:
GainControlImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit);
rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture);
virtual ~GainControlImpl();
int ProcessRenderAudio(AudioBuffer* audio);
@ -71,22 +73,29 @@ class GainControlImpl : public GainControl,
void AllocateRenderQueue();
// Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
Mode mode_;
int minimum_capture_level_;
int maximum_capture_level_;
bool limiter_enabled_;
int target_level_dbfs_;
int compression_gain_db_;
std::vector<int> capture_levels_;
int analog_capture_level_;
bool was_analog_level_set_;
bool stream_is_saturated_;
size_t render_queue_element_max_size_;
std::vector<int16_t> render_queue_buffer_;
std::vector<int16_t> capture_queue_buffer_;
rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
rtc::CriticalSection* const crit_capture_;
Mode mode_ GUARDED_BY(crit_capture_);
int minimum_capture_level_ GUARDED_BY(crit_capture_);
int maximum_capture_level_ GUARDED_BY(crit_capture_);
bool limiter_enabled_ GUARDED_BY(crit_capture_);
int target_level_dbfs_ GUARDED_BY(crit_capture_);
int compression_gain_db_ GUARDED_BY(crit_capture_);
std::vector<int> capture_levels_ GUARDED_BY(crit_capture_);
int analog_capture_level_ GUARDED_BY(crit_capture_);
bool was_analog_level_set_ GUARDED_BY(crit_capture_);
bool stream_is_saturated_ GUARDED_BY(crit_capture_);
size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
GUARDED_BY(crit_capture_);
std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
// Lock protection not needed.
rtc::scoped_ptr<
SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
render_signal_queue_;

View File

@ -100,18 +100,20 @@ int Filter(FilterState* hpf, int16_t* data, size_t length) {
typedef FilterState Handle;
HighPassFilterImpl::HighPassFilterImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
: ProcessingComponent(),
apm_(apm),
crit_(crit) {}
rtc::CriticalSection* crit)
: ProcessingComponent(), apm_(apm), crit_(crit) {
RTC_DCHECK(apm);
RTC_DCHECK(crit);
}
HighPassFilterImpl::~HighPassFilterImpl() {}
int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
int err = apm_->kNoError;
rtc::CritScope cs(crit_);
int err = AudioProcessing::kNoError;
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
@ -122,20 +124,21 @@ int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
audio->split_bands(i)[kBand0To8kHz],
audio->num_frames_per_band());
if (err != apm_->kNoError) {
if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int HighPassFilterImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
return EnableComponent(enable);
}
bool HighPassFilterImpl::is_enabled() const {
rtc::CritScope cs(crit_);
return is_component_enabled();
}
@ -148,12 +151,15 @@ void HighPassFilterImpl::DestroyHandle(void* handle) const {
}
int HighPassFilterImpl::InitializeHandle(void* handle) const {
// TODO(peah): Remove dependency on apm for the
// capture side sample rate.
rtc::CritScope cs(crit_);
return InitializeFilter(static_cast<Handle*>(handle),
apm_->proc_sample_rate_hz());
}
int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
return apm_->kNoError; // Not configurable.
return AudioProcessing::kNoError; // Not configurable.
}
int HighPassFilterImpl::num_handles_required() const {
@ -163,6 +169,6 @@ int HighPassFilterImpl::num_handles_required() const {
int HighPassFilterImpl::GetHandleError(void* handle) const {
// The component has no detailed errors.
assert(handle != NULL);
return apm_->kUnspecifiedError;
return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc

View File

@ -11,18 +11,18 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class HighPassFilterImpl : public HighPassFilter,
public ProcessingComponent {
public:
HighPassFilterImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
HighPassFilterImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
virtual ~HighPassFilterImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@ -43,7 +43,8 @@ class HighPassFilterImpl : public HighPassFilter,
int GetHandleError(void* handle) const override;
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
rtc::CriticalSection* const crit_;
};
} // namespace webrtc

View File

@ -18,13 +18,17 @@
namespace webrtc {
LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
: ProcessingComponent(),
crit_(crit) {}
rtc::CriticalSection* crit)
: ProcessingComponent(), crit_(crit) {
RTC_DCHECK(apm);
RTC_DCHECK(crit);
}
LevelEstimatorImpl::~LevelEstimatorImpl() {}
int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
rtc::CritScope cs(crit_);
if (!is_component_enabled()) {
return AudioProcessing::kNoError;
}
@ -39,15 +43,17 @@ int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
}
int LevelEstimatorImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
return EnableComponent(enable);
}
bool LevelEstimatorImpl::is_enabled() const {
rtc::CritScope cs(crit_);
return is_component_enabled();
}
int LevelEstimatorImpl::RMS() {
rtc::CritScope cs(crit_);
if (!is_component_enabled()) {
return AudioProcessing::kNotEnabledError;
}
@ -67,6 +73,7 @@ void LevelEstimatorImpl::DestroyHandle(void* handle) const {
}
int LevelEstimatorImpl::InitializeHandle(void* handle) const {
rtc::CritScope cs(crit_);
static_cast<RMSLevel*>(handle)->Reset();
return AudioProcessing::kNoError;
}

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
#include "webrtc/modules/audio_processing/rms_level.h"
@ -18,13 +19,11 @@
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class LevelEstimatorImpl : public LevelEstimator,
public ProcessingComponent {
public:
LevelEstimatorImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit);
LevelEstimatorImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
virtual ~LevelEstimatorImpl();
int ProcessStream(AudioBuffer* audio);
@ -45,7 +44,7 @@ class LevelEstimatorImpl : public LevelEstimator,
int num_handles_required() const override;
int GetHandleError(void* handle) const override;
CriticalSectionWrapper* crit_;
rtc::CriticalSection* const crit_;
};
} // namespace webrtc

View File

@ -18,7 +18,6 @@
#elif defined(WEBRTC_NS_FIXED)
#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
#endif
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -47,18 +46,18 @@ int MapSetting(NoiseSuppression::Level level) {
} // namespace
NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
level_(kModerate) {}
rtc::CriticalSection* crit)
: ProcessingComponent(), apm_(apm), crit_(crit), level_(kModerate) {
RTC_DCHECK(apm);
RTC_DCHECK(crit);
}
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
#if defined(WEBRTC_NS_FLOAT)
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == num_handles());
@ -69,12 +68,13 @@ int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
WebRtcNs_Analyze(my_handle, audio->split_bands_const_f(i)[kBand0To8kHz]);
}
#endif
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_);
if (!is_component_enabled()) {
return apm_->kNoError;
return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == num_handles());
@ -93,22 +93,23 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
audio->split_bands(i));
#endif
}
return apm_->kNoError;
return AudioProcessing::kNoError;
}
int NoiseSuppressionImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
return EnableComponent(enable);
}
bool NoiseSuppressionImpl::is_enabled() const {
rtc::CritScope cs(crit_);
return is_component_enabled();
}
int NoiseSuppressionImpl::set_level(Level level) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
return AudioProcessing::kBadParameterError;
}
level_ = level;
@ -116,10 +117,12 @@ int NoiseSuppressionImpl::set_level(Level level) {
}
NoiseSuppression::Level NoiseSuppressionImpl::level() const {
rtc::CritScope cs(crit_);
return level_;
}
float NoiseSuppressionImpl::speech_probability() const {
rtc::CritScope cs(crit_);
#if defined(WEBRTC_NS_FLOAT)
float probability_average = 0.0f;
for (int i = 0; i < num_handles(); i++) {
@ -129,7 +132,7 @@ float NoiseSuppressionImpl::speech_probability() const {
return probability_average / num_handles();
#elif defined(WEBRTC_NS_FIXED)
// Currently not available for the fixed point implementation.
return apm_->kUnsupportedFunctionError;
return AudioProcessing::kUnsupportedFunctionError;
#endif
}
@ -160,6 +163,7 @@ int NoiseSuppressionImpl::InitializeHandle(void* handle) const {
}
int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
rtc::CritScope cs(crit_);
#if defined(WEBRTC_NS_FLOAT)
return WebRtcNs_set_policy(static_cast<Handle*>(handle),
MapSetting(level_));
@ -176,6 +180,6 @@ int NoiseSuppressionImpl::num_handles_required() const {
int NoiseSuppressionImpl::GetHandleError(void* handle) const {
// The NS has no get_error() function.
assert(handle != NULL);
return apm_->kUnspecifiedError;
return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc

View File

@ -11,19 +11,18 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class NoiseSuppressionImpl : public NoiseSuppression,
public ProcessingComponent {
public:
NoiseSuppressionImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit);
NoiseSuppressionImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
virtual ~NoiseSuppressionImpl();
int AnalyzeCaptureAudio(AudioBuffer* audio);
@ -47,9 +46,12 @@ class NoiseSuppressionImpl : public NoiseSuppression,
int num_handles_required() const override;
int GetHandleError(void* handle) const override;
// Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
Level level_;
rtc::CriticalSection* const crit_;
Level level_ GUARDED_BY(crit_);
};
} // namespace webrtc

View File

@ -12,9 +12,10 @@
#include <assert.h>
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -38,19 +39,23 @@ int MapSetting(VoiceDetection::Likelihood likelihood) {
} // namespace
VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessing* apm,
CriticalSectionWrapper* crit)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
stream_has_voice_(false),
using_external_vad_(false),
likelihood_(kLowLikelihood),
frame_size_ms_(10),
frame_size_samples_(0) {}
rtc::CriticalSection* crit)
: ProcessingComponent(),
apm_(apm),
crit_(crit),
stream_has_voice_(false),
using_external_vad_(false),
likelihood_(kLowLikelihood),
frame_size_ms_(10),
frame_size_samples_(0) {
RTC_DCHECK(apm);
RTC_DCHECK(crit);
}
VoiceDetectionImpl::~VoiceDetectionImpl() {}
int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_);
if (!is_component_enabled()) {
return apm_->kNoError;
}
@ -81,28 +86,31 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int VoiceDetectionImpl::Enable(bool enable) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
return EnableComponent(enable);
}
bool VoiceDetectionImpl::is_enabled() const {
rtc::CritScope cs(crit_);
return is_component_enabled();
}
int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
rtc::CritScope cs(crit_);
using_external_vad_ = true;
stream_has_voice_ = has_voice;
return apm_->kNoError;
}
bool VoiceDetectionImpl::stream_has_voice() const {
rtc::CritScope cs(crit_);
// TODO(ajm): enable this assertion?
//assert(using_external_vad_ || is_component_enabled());
return stream_has_voice_;
}
int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
if (MapSetting(likelihood) == -1) {
return apm_->kBadParameterError;
}
@ -112,11 +120,12 @@ int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
}
VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
rtc::CritScope cs(crit_);
return likelihood_;
}
int VoiceDetectionImpl::set_frame_size_ms(int size) {
CriticalSectionScoped crit_scoped(crit_);
rtc::CritScope cs(crit_);
assert(size == 10); // TODO(ajm): remove when supported.
if (size != 10 &&
size != 20 &&
@ -130,11 +139,14 @@ int VoiceDetectionImpl::set_frame_size_ms(int size) {
}
int VoiceDetectionImpl::frame_size_ms() const {
rtc::CritScope cs(crit_);
return frame_size_ms_;
}
int VoiceDetectionImpl::Initialize() {
int err = ProcessingComponent::Initialize();
rtc::CritScope cs(crit_);
if (err != apm_->kNoError || !is_component_enabled()) {
return err;
}
@ -160,6 +172,7 @@ int VoiceDetectionImpl::InitializeHandle(void* handle) const {
}
int VoiceDetectionImpl::ConfigureHandle(void* handle) const {
rtc::CritScope cs(crit_);
return WebRtcVad_set_mode(static_cast<Handle*>(handle),
MapSetting(likelihood_));
}

View File

@ -11,18 +11,18 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
class VoiceDetectionImpl : public VoiceDetection,
public ProcessingComponent {
public:
VoiceDetectionImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
VoiceDetectionImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
virtual ~VoiceDetectionImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@ -51,13 +51,16 @@ class VoiceDetectionImpl : public VoiceDetection,
int num_handles_required() const override;
int GetHandleError(void* handle) const override;
// Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
CriticalSectionWrapper* crit_;
bool stream_has_voice_;
bool using_external_vad_;
Likelihood likelihood_;
int frame_size_ms_;
size_t frame_size_samples_;
rtc::CriticalSection* const crit_;
bool stream_has_voice_ GUARDED_BY(crit_);
bool using_external_vad_ GUARDED_BY(crit_);
Likelihood likelihood_ GUARDED_BY(crit_);
int frame_size_ms_ GUARDED_BY(crit_);
size_t frame_size_samples_ GUARDED_BY(crit_);
};
} // namespace webrtc