Add Analyze API to NS
This adds an empty API. In a next CL I will separate the noise estimation from the Process API and fill this function. BUG=webrtc:3811 R=bjornv@webrtc.org Review URL: https://webrtc-codereview.appspot.com/23599004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7218 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@ -487,6 +487,7 @@ int AudioProcessingImpl::ProcessStreamLocked() {
|
|||||||
if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
|
if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
|
||||||
ca->CopyLowPassToReference();
|
ca->CopyLowPassToReference();
|
||||||
}
|
}
|
||||||
|
RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
|
||||||
RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
|
RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
|
||||||
RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
|
RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
|
||||||
RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
|
RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
|
||||||
|
@ -55,6 +55,27 @@ NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
|
|||||||
|
|
||||||
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
|
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
|
||||||
|
|
||||||
|
int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
|
||||||
|
#if defined(WEBRTC_NS_FLOAT)
|
||||||
|
if (!is_component_enabled()) {
|
||||||
|
return apm_->kNoError;
|
||||||
|
}
|
||||||
|
assert(audio->samples_per_split_channel() <= 160);
|
||||||
|
assert(audio->num_channels() == num_handles());
|
||||||
|
|
||||||
|
for (int i = 0; i < num_handles(); ++i) {
|
||||||
|
Handle* my_handle = static_cast<Handle*>(handle(i));
|
||||||
|
|
||||||
|
int err = WebRtcNs_Analyze(my_handle,
|
||||||
|
audio->low_pass_split_data_f(i));
|
||||||
|
if (err != apm_->kNoError) {
|
||||||
|
return GetHandleError(my_handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return apm_->kNoError;
|
||||||
|
}
|
||||||
|
|
||||||
int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
|
int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
|
||||||
int err = apm_->kNoError;
|
int err = apm_->kNoError;
|
||||||
|
|
||||||
@ -64,16 +85,16 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
|
|||||||
assert(audio->samples_per_split_channel() <= 160);
|
assert(audio->samples_per_split_channel() <= 160);
|
||||||
assert(audio->num_channels() == num_handles());
|
assert(audio->num_channels() == num_handles());
|
||||||
|
|
||||||
for (int i = 0; i < num_handles(); i++) {
|
for (int i = 0; i < num_handles(); ++i) {
|
||||||
Handle* my_handle = static_cast<Handle*>(handle(i));
|
Handle* my_handle = static_cast<Handle*>(handle(i));
|
||||||
#if defined(WEBRTC_NS_FLOAT)
|
#if defined(WEBRTC_NS_FLOAT)
|
||||||
err = WebRtcNs_Process(static_cast<Handle*>(handle(i)),
|
err = WebRtcNs_Process(my_handle,
|
||||||
audio->low_pass_split_data_f(i),
|
audio->low_pass_split_data_f(i),
|
||||||
audio->high_pass_split_data_f(i),
|
audio->high_pass_split_data_f(i),
|
||||||
audio->low_pass_split_data_f(i),
|
audio->low_pass_split_data_f(i),
|
||||||
audio->high_pass_split_data_f(i));
|
audio->high_pass_split_data_f(i));
|
||||||
#elif defined(WEBRTC_NS_FIXED)
|
#elif defined(WEBRTC_NS_FIXED)
|
||||||
err = WebRtcNsx_Process(static_cast<Handle*>(handle(i)),
|
err = WebRtcNsx_Process(my_handle,
|
||||||
audio->low_pass_split_data(i),
|
audio->low_pass_split_data(i),
|
||||||
audio->high_pass_split_data(i),
|
audio->high_pass_split_data(i),
|
||||||
audio->low_pass_split_data(i),
|
audio->low_pass_split_data(i),
|
||||||
|
@ -26,6 +26,7 @@ class NoiseSuppressionImpl : public NoiseSuppression,
|
|||||||
CriticalSectionWrapper* crit);
|
CriticalSectionWrapper* crit);
|
||||||
virtual ~NoiseSuppressionImpl();
|
virtual ~NoiseSuppressionImpl();
|
||||||
|
|
||||||
|
int AnalyzeCaptureAudio(AudioBuffer* audio);
|
||||||
int ProcessCaptureAudio(AudioBuffer* audio);
|
int ProcessCaptureAudio(AudioBuffer* audio);
|
||||||
|
|
||||||
// NoiseSuppression implementation.
|
// NoiseSuppression implementation.
|
||||||
|
@ -79,6 +79,21 @@ int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs);
|
|||||||
*/
|
*/
|
||||||
int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
|
int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This functions estimates the background noise for the inserted speech frame.
|
||||||
|
* The input and output signals should always be 10ms (80 or 160 samples).
|
||||||
|
*
|
||||||
|
* Input
|
||||||
|
* - NS_inst : Noise suppression instance.
|
||||||
|
* - spframe : Pointer to speech frame buffer for L band
|
||||||
|
*
|
||||||
|
* Output:
|
||||||
|
* - NS_inst : Updated NS instance
|
||||||
|
*
|
||||||
|
* Return value : 0 - OK
|
||||||
|
* -1 - Error
|
||||||
|
*/
|
||||||
|
int WebRtcNs_Analyze(NsHandle* NS_inst, float* spframe);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This functions does Noise Suppression for the inserted speech frame. The
|
* This functions does Noise Suppression for the inserted speech frame. The
|
||||||
|
@ -42,6 +42,9 @@ int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
|
|||||||
return WebRtcNs_set_policy_core((NSinst_t*) NS_inst, mode);
|
return WebRtcNs_set_policy_core((NSinst_t*) NS_inst, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int WebRtcNs_Analyze(NsHandle* NS_inst, float* spframe) {
|
||||||
|
return WebRtcNs_AnalyzeCore((NSinst_t*) NS_inst, spframe);
|
||||||
|
}
|
||||||
|
|
||||||
int WebRtcNs_Process(NsHandle* NS_inst, float* spframe, float* spframe_H,
|
int WebRtcNs_Process(NsHandle* NS_inst, float* spframe, float* spframe_H,
|
||||||
float* outframe, float* outframe_H) {
|
float* outframe, float* outframe_H) {
|
||||||
|
@ -714,6 +714,10 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* inFrame) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int WebRtcNs_ProcessCore(NSinst_t* inst,
|
int WebRtcNs_ProcessCore(NSinst_t* inst,
|
||||||
float* speechFrame,
|
float* speechFrame,
|
||||||
float* speechFrameHB,
|
float* speechFrameHB,
|
||||||
|
@ -146,6 +146,23 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs);
|
|||||||
*/
|
*/
|
||||||
int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
|
int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* WebRtcNs_AnalyzeCore
|
||||||
|
*
|
||||||
|
* Estimate the background noise.
|
||||||
|
*
|
||||||
|
* Input:
|
||||||
|
* - inst : Instance that should be initialized
|
||||||
|
* - inFrame : Input speech frame for lower band
|
||||||
|
*
|
||||||
|
* Output:
|
||||||
|
* - inst : Updated instance
|
||||||
|
*
|
||||||
|
* Return value : 0 - OK
|
||||||
|
* -1 - Error
|
||||||
|
*/
|
||||||
|
int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* inFrame);
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* WebRtcNs_ProcessCore
|
* WebRtcNs_ProcessCore
|
||||||
*
|
*
|
||||||
@ -164,8 +181,6 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
|
|||||||
* Return value : 0 - OK
|
* Return value : 0 - OK
|
||||||
* -1 - Error
|
* -1 - Error
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
int WebRtcNs_ProcessCore(NSinst_t* inst,
|
int WebRtcNs_ProcessCore(NSinst_t* inst,
|
||||||
float* inFrameLow,
|
float* inFrameLow,
|
||||||
float* inFrameHigh,
|
float* inFrameHigh,
|
||||||
|
Reference in New Issue
Block a user