Rename Beamformer to NonlinearBeamformer.
R=aluebs@webrtc.org Review URL: https://webrtc-codereview.appspot.com/42359004 Cr-Commit-Position: refs/heads/master@{#8710} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8710 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@ -170,8 +170,8 @@ source_set("audio_processing") {
|
|||||||
if (rtc_use_openmax_dl) {
|
if (rtc_use_openmax_dl) {
|
||||||
defines += [ "WEBRTC_BEAMFORMER" ]
|
defines += [ "WEBRTC_BEAMFORMER" ]
|
||||||
sources += [
|
sources += [
|
||||||
"beamformer/beamformer.cc",
|
"beamformer/nonlinear_beamformer.cc",
|
||||||
"beamformer/beamformer.h",
|
"beamformer/nonlinear_beamformer.h",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,8 +179,8 @@
|
|||||||
['rtc_use_openmax_dl==1', {
|
['rtc_use_openmax_dl==1', {
|
||||||
'defines': ['WEBRTC_BEAMFORMER'],
|
'defines': ['WEBRTC_BEAMFORMER'],
|
||||||
'sources': [
|
'sources': [
|
||||||
'beamformer/beamformer.cc',
|
'beamformer/nonlinear_beamformer.cc',
|
||||||
'beamformer/beamformer.h',
|
'beamformer/nonlinear_beamformer.h',
|
||||||
],
|
],
|
||||||
}],
|
}],
|
||||||
['target_arch=="ia32" or target_arch=="x64"', {
|
['target_arch=="ia32" or target_arch=="x64"', {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
|
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
|
||||||
#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
|
#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
|
||||||
#include "webrtc/modules/audio_processing/audio_buffer.h"
|
#include "webrtc/modules/audio_processing/audio_buffer.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
|
||||||
#include "webrtc/common_audio/channel_buffer.h"
|
#include "webrtc/common_audio/channel_buffer.h"
|
||||||
#include "webrtc/modules/audio_processing/common.h"
|
#include "webrtc/modules/audio_processing/common.h"
|
||||||
#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
|
#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
|
||||||
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
#define RETURN_ON_ERR(expr) \
|
#define RETURN_ON_ERR(expr) \
|
||||||
do { \
|
do { \
|
||||||
int err = expr; \
|
int err = (expr); \
|
||||||
if (err != kNoError) { \
|
if (err != kNoError) { \
|
||||||
return err; \
|
return err; \
|
||||||
} \
|
} \
|
||||||
@ -134,7 +134,7 @@ AudioProcessing* AudioProcessing::Create(const Config& config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
AudioProcessing* AudioProcessing::Create(const Config& config,
|
AudioProcessing* AudioProcessing::Create(const Config& config,
|
||||||
Beamformer* beamformer) {
|
NonlinearBeamformer* beamformer) {
|
||||||
AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
|
AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
|
||||||
if (apm->Initialize() != kNoError) {
|
if (apm->Initialize() != kNoError) {
|
||||||
delete apm;
|
delete apm;
|
||||||
@ -148,7 +148,7 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config)
|
|||||||
: AudioProcessingImpl(config, nullptr) {}
|
: AudioProcessingImpl(config, nullptr) {}
|
||||||
|
|
||||||
AudioProcessingImpl::AudioProcessingImpl(const Config& config,
|
AudioProcessingImpl::AudioProcessingImpl(const Config& config,
|
||||||
Beamformer* beamformer)
|
NonlinearBeamformer* beamformer)
|
||||||
: echo_cancellation_(NULL),
|
: echo_cancellation_(NULL),
|
||||||
echo_control_mobile_(NULL),
|
echo_control_mobile_(NULL),
|
||||||
gain_control_(NULL),
|
gain_control_(NULL),
|
||||||
@ -988,7 +988,7 @@ void AudioProcessingImpl::InitializeBeamformer() {
|
|||||||
if (beamformer_enabled_) {
|
if (beamformer_enabled_) {
|
||||||
#ifdef WEBRTC_BEAMFORMER
|
#ifdef WEBRTC_BEAMFORMER
|
||||||
if (!beamformer_) {
|
if (!beamformer_) {
|
||||||
beamformer_.reset(new Beamformer(array_geometry_));
|
beamformer_.reset(new NonlinearBeamformer(array_geometry_));
|
||||||
}
|
}
|
||||||
beamformer_->Initialize(kChunkSizeMs, split_rate_);
|
beamformer_->Initialize(kChunkSizeMs, split_rate_);
|
||||||
#else
|
#else
|
||||||
|
@ -23,7 +23,7 @@ namespace webrtc {
|
|||||||
|
|
||||||
class AgcManagerDirect;
|
class AgcManagerDirect;
|
||||||
class AudioBuffer;
|
class AudioBuffer;
|
||||||
class Beamformer;
|
class NonlinearBeamformer;
|
||||||
class CriticalSectionWrapper;
|
class CriticalSectionWrapper;
|
||||||
class EchoCancellationImpl;
|
class EchoCancellationImpl;
|
||||||
class EchoControlMobileImpl;
|
class EchoControlMobileImpl;
|
||||||
@ -87,7 +87,7 @@ class AudioProcessingImpl : public AudioProcessing {
|
|||||||
public:
|
public:
|
||||||
explicit AudioProcessingImpl(const Config& config);
|
explicit AudioProcessingImpl(const Config& config);
|
||||||
// Only for testing.
|
// Only for testing.
|
||||||
AudioProcessingImpl(const Config& config, Beamformer* beamformer);
|
AudioProcessingImpl(const Config& config, NonlinearBeamformer* beamformer);
|
||||||
virtual ~AudioProcessingImpl();
|
virtual ~AudioProcessingImpl();
|
||||||
|
|
||||||
// AudioProcessing methods.
|
// AudioProcessing methods.
|
||||||
@ -218,7 +218,7 @@ class AudioProcessingImpl : public AudioProcessing {
|
|||||||
bool transient_suppressor_enabled_;
|
bool transient_suppressor_enabled_;
|
||||||
rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
|
rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
|
||||||
const bool beamformer_enabled_;
|
const bool beamformer_enabled_;
|
||||||
rtc::scoped_ptr<Beamformer> beamformer_;
|
rtc::scoped_ptr<NonlinearBeamformer> beamformer_;
|
||||||
const std::vector<Point> array_geometry_;
|
const std::vector<Point> array_geometry_;
|
||||||
|
|
||||||
const bool supports_48kHz_;
|
const bool supports_48kHz_;
|
||||||
|
@ -88,18 +88,18 @@
|
|||||||
['rtc_use_openmax_dl==1', {
|
['rtc_use_openmax_dl==1', {
|
||||||
'targets': [
|
'targets': [
|
||||||
{
|
{
|
||||||
'target_name': 'beamformer_test',
|
'target_name': 'nonlinear_beamformer_test',
|
||||||
'type': 'executable',
|
'type': 'executable',
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
|
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
|
||||||
'<(webrtc_root)/modules/modules.gyp:audio_processing',
|
'<(webrtc_root)/modules/modules.gyp:audio_processing',
|
||||||
],
|
],
|
||||||
'sources': [
|
'sources': [
|
||||||
'beamformer/beamformer_test.cc',
|
'beamformer/nonlinear_beamformer_test.cc',
|
||||||
'beamformer/pcm_utils.cc',
|
'beamformer/pcm_utils.cc',
|
||||||
'beamformer/pcm_utils.h',
|
'beamformer/pcm_utils.h',
|
||||||
],
|
],
|
||||||
}, # beamformer_test
|
}, # nonlinear_beamformer_test
|
||||||
],
|
],
|
||||||
}],
|
}],
|
||||||
],
|
],
|
||||||
|
@ -8,15 +8,17 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "webrtc/modules/audio_processing/beamformer/mock_beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h"
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
MockBeamformer::MockBeamformer(const std::vector<Point>& array_geometry)
|
MockNonlinearBeamformer::MockNonlinearBeamformer(
|
||||||
: Beamformer(array_geometry) {}
|
const std::vector<Point>& array_geometry)
|
||||||
|
: NonlinearBeamformer(array_geometry) {
|
||||||
|
}
|
||||||
|
|
||||||
MockBeamformer::~MockBeamformer() {}
|
MockNonlinearBeamformer::~MockNonlinearBeamformer() {}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
@ -14,14 +14,14 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "testing/gmock/include/gmock/gmock.h"
|
#include "testing/gmock/include/gmock/gmock.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class MockBeamformer : public Beamformer {
|
class MockNonlinearBeamformer : public NonlinearBeamformer {
|
||||||
public:
|
public:
|
||||||
explicit MockBeamformer(const std::vector<Point>& array_geometry);
|
explicit MockNonlinearBeamformer(const std::vector<Point>& array_geometry);
|
||||||
~MockBeamformer() override;
|
~MockNonlinearBeamformer() override;
|
||||||
|
|
||||||
MOCK_METHOD2(Initialize, void(int chunk_size_ms, int sample_rate_hz));
|
MOCK_METHOD2(Initialize, void(int chunk_size_ms, int sample_rate_hz));
|
||||||
MOCK_METHOD2(ProcessChunk, void(const ChannelBuffer<float>* input,
|
MOCK_METHOD2(ProcessChunk, void(const ChannelBuffer<float>* input,
|
@ -10,10 +10,11 @@
|
|||||||
|
|
||||||
#define _USE_MATH_DEFINES
|
#define _USE_MATH_DEFINES
|
||||||
|
|
||||||
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "webrtc/base/arraysize.h"
|
#include "webrtc/base/arraysize.h"
|
||||||
#include "webrtc/common_audio/window_generator.h"
|
#include "webrtc/common_audio/window_generator.h"
|
||||||
@ -174,13 +175,14 @@ std::vector<Point> GetCenteredArray(std::vector<Point> array_geometry) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Beamformer::Beamformer(const std::vector<Point>& array_geometry)
|
NonlinearBeamformer::NonlinearBeamformer(
|
||||||
: num_input_channels_(array_geometry.size()),
|
const std::vector<Point>& array_geometry)
|
||||||
|
: num_input_channels_(array_geometry.size()),
|
||||||
array_geometry_(GetCenteredArray(array_geometry)) {
|
array_geometry_(GetCenteredArray(array_geometry)) {
|
||||||
WindowGenerator::KaiserBesselDerived(kAlpha, kFftSize, window_);
|
WindowGenerator::KaiserBesselDerived(kAlpha, kFftSize, window_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
|
void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
|
||||||
chunk_length_ = sample_rate_hz / (1000.f / chunk_size_ms);
|
chunk_length_ = sample_rate_hz / (1000.f / chunk_size_ms);
|
||||||
sample_rate_hz_ = sample_rate_hz;
|
sample_rate_hz_ = sample_rate_hz;
|
||||||
low_average_start_bin_ =
|
low_average_start_bin_ =
|
||||||
@ -230,7 +232,7 @@ void Beamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::InitDelaySumMasks() {
|
void NonlinearBeamformer::InitDelaySumMasks() {
|
||||||
for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
|
for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
|
||||||
delay_sum_masks_[f_ix].Resize(1, num_input_channels_);
|
delay_sum_masks_[f_ix].Resize(1, num_input_channels_);
|
||||||
CovarianceMatrixGenerator::PhaseAlignmentMasks(f_ix,
|
CovarianceMatrixGenerator::PhaseAlignmentMasks(f_ix,
|
||||||
@ -250,7 +252,7 @@ void Beamformer::InitDelaySumMasks() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::InitTargetCovMats() {
|
void NonlinearBeamformer::InitTargetCovMats() {
|
||||||
for (int i = 0; i < kNumFreqBins; ++i) {
|
for (int i = 0; i < kNumFreqBins; ++i) {
|
||||||
target_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
|
target_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
|
||||||
TransposedConjugatedProduct(delay_sum_masks_[i], &target_cov_mats_[i]);
|
TransposedConjugatedProduct(delay_sum_masks_[i], &target_cov_mats_[i]);
|
||||||
@ -259,7 +261,7 @@ void Beamformer::InitTargetCovMats() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::InitInterfCovMats() {
|
void NonlinearBeamformer::InitInterfCovMats() {
|
||||||
for (int i = 0; i < kNumFreqBins; ++i) {
|
for (int i = 0; i < kNumFreqBins; ++i) {
|
||||||
interf_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
|
interf_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
|
||||||
ComplexMatrixF uniform_cov_mat(num_input_channels_, num_input_channels_);
|
ComplexMatrixF uniform_cov_mat(num_input_channels_, num_input_channels_);
|
||||||
@ -291,7 +293,7 @@ void Beamformer::InitInterfCovMats() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ProcessChunk(const ChannelBuffer<float>* input,
|
void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>* input,
|
||||||
ChannelBuffer<float>* output) {
|
ChannelBuffer<float>* output) {
|
||||||
DCHECK_EQ(input->num_channels(), num_input_channels_);
|
DCHECK_EQ(input->num_channels(), num_input_channels_);
|
||||||
DCHECK_EQ(input->num_frames_per_band(), chunk_length_);
|
DCHECK_EQ(input->num_frames_per_band(), chunk_length_);
|
||||||
@ -321,7 +323,7 @@ void Beamformer::ProcessChunk(const ChannelBuffer<float>* input,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ProcessAudioBlock(const complex_f* const* input,
|
void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
|
||||||
int num_input_channels,
|
int num_input_channels,
|
||||||
int num_freq_bins,
|
int num_freq_bins,
|
||||||
int num_output_channels,
|
int num_output_channels,
|
||||||
@ -371,11 +373,12 @@ void Beamformer::ProcessAudioBlock(const complex_f* const* input,
|
|||||||
EstimateTargetPresence();
|
EstimateTargetPresence();
|
||||||
}
|
}
|
||||||
|
|
||||||
float Beamformer::CalculatePostfilterMask(const ComplexMatrixF& interf_cov_mat,
|
float NonlinearBeamformer::CalculatePostfilterMask(
|
||||||
float rpsiw,
|
const ComplexMatrixF& interf_cov_mat,
|
||||||
float ratio_rxiw_rxim,
|
float rpsiw,
|
||||||
float rmw_r,
|
float ratio_rxiw_rxim,
|
||||||
float mask_threshold) {
|
float rmw_r,
|
||||||
|
float mask_threshold) {
|
||||||
float rpsim = Norm(interf_cov_mat, eig_m_);
|
float rpsim = Norm(interf_cov_mat, eig_m_);
|
||||||
|
|
||||||
// Find lambda.
|
// Find lambda.
|
||||||
@ -394,7 +397,7 @@ float Beamformer::CalculatePostfilterMask(const ComplexMatrixF& interf_cov_mat,
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ApplyMasks(const complex_f* const* input,
|
void NonlinearBeamformer::ApplyMasks(const complex_f* const* input,
|
||||||
complex_f* const* output) {
|
complex_f* const* output) {
|
||||||
complex_f* output_channel = output[0];
|
complex_f* output_channel = output[0];
|
||||||
for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
|
for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
|
||||||
@ -410,14 +413,14 @@ void Beamformer::ApplyMasks(const complex_f* const* input,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ApplyMaskSmoothing() {
|
void NonlinearBeamformer::ApplyMaskSmoothing() {
|
||||||
for (int i = 0; i < kNumFreqBins; ++i) {
|
for (int i = 0; i < kNumFreqBins; ++i) {
|
||||||
postfilter_mask_[i] = kMaskSmoothAlpha * new_mask_[i] +
|
postfilter_mask_[i] = kMaskSmoothAlpha * new_mask_[i] +
|
||||||
(1.f - kMaskSmoothAlpha) * postfilter_mask_[i];
|
(1.f - kMaskSmoothAlpha) * postfilter_mask_[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ApplyLowFrequencyCorrection() {
|
void NonlinearBeamformer::ApplyLowFrequencyCorrection() {
|
||||||
float low_frequency_mask = 0.f;
|
float low_frequency_mask = 0.f;
|
||||||
for (int i = low_average_start_bin_; i < low_average_end_bin_; ++i) {
|
for (int i = low_average_start_bin_; i < low_average_end_bin_; ++i) {
|
||||||
low_frequency_mask += postfilter_mask_[i];
|
low_frequency_mask += postfilter_mask_[i];
|
||||||
@ -430,7 +433,7 @@ void Beamformer::ApplyLowFrequencyCorrection() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::ApplyHighFrequencyCorrection() {
|
void NonlinearBeamformer::ApplyHighFrequencyCorrection() {
|
||||||
high_pass_postfilter_mask_ = 0.f;
|
high_pass_postfilter_mask_ = 0.f;
|
||||||
for (int i = high_average_start_bin_; i < high_average_end_bin_; ++i) {
|
for (int i = high_average_start_bin_; i < high_average_end_bin_; ++i) {
|
||||||
high_pass_postfilter_mask_ += postfilter_mask_[i];
|
high_pass_postfilter_mask_ += postfilter_mask_[i];
|
||||||
@ -443,7 +446,7 @@ void Beamformer::ApplyHighFrequencyCorrection() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Beamformer::EstimateTargetPresence() {
|
void NonlinearBeamformer::EstimateTargetPresence() {
|
||||||
const int quantile = (1.f - kMaskQuantile) * high_average_end_bin_ +
|
const int quantile = (1.f - kMaskQuantile) * high_average_end_bin_ +
|
||||||
kMaskQuantile * low_average_start_bin_;
|
kMaskQuantile * low_average_start_bin_;
|
||||||
std::nth_element(new_mask_ + low_average_start_bin_,
|
std::nth_element(new_mask_ + low_average_start_bin_,
|
@ -8,8 +8,10 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
|
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
|
||||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
|
#define WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "webrtc/common_audio/lapped_transform.h"
|
#include "webrtc/common_audio/lapped_transform.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
|
#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
|
||||||
@ -25,15 +27,14 @@ namespace webrtc {
|
|||||||
// Beamforming Postprocessor" by Bastiaan Kleijn.
|
// Beamforming Postprocessor" by Bastiaan Kleijn.
|
||||||
//
|
//
|
||||||
// TODO: Target angle assumed to be 0. Parameterize target angle.
|
// TODO: Target angle assumed to be 0. Parameterize target angle.
|
||||||
class Beamformer : public LappedTransform::Callback {
|
class NonlinearBeamformer : public LappedTransform::Callback {
|
||||||
public:
|
public:
|
||||||
// At the moment it only accepts uniform linear microphone arrays. Using the
|
// At the moment it only accepts uniform linear microphone arrays. Using the
|
||||||
// first microphone as a reference position [0, 0, 0] is a natural choice.
|
// first microphone as a reference position [0, 0, 0] is a natural choice.
|
||||||
explicit Beamformer(const std::vector<Point>& array_geometry);
|
explicit NonlinearBeamformer(const std::vector<Point>& array_geometry);
|
||||||
virtual ~Beamformer() {};
|
|
||||||
|
|
||||||
// Sample rate corresponds to the lower band.
|
// Sample rate corresponds to the lower band.
|
||||||
// Needs to be called before the Beamformer can be used.
|
// Needs to be called before the NonlinearBeamformer can be used.
|
||||||
virtual void Initialize(int chunk_size_ms, int sample_rate_hz);
|
virtual void Initialize(int chunk_size_ms, int sample_rate_hz);
|
||||||
|
|
||||||
// Process one time-domain chunk of audio. The audio is expected to be split
|
// Process one time-domain chunk of audio. The audio is expected to be split
|
||||||
@ -160,4 +161,4 @@ class Beamformer : public LappedTransform::Callback {
|
|||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
||||||
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
|
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
|
@ -9,9 +9,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "gflags/gflags.h"
|
#include "gflags/gflags.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/pcm_utils.h"
|
#include "webrtc/modules/audio_processing/beamformer/pcm_utils.h"
|
||||||
|
|
||||||
DEFINE_int32(sample_rate,
|
DEFINE_int32(sample_rate,
|
||||||
@ -59,7 +60,7 @@ int main(int argc, char* argv[]) {
|
|||||||
for (int i = 0; i < FLAGS_num_input_channels; ++i) {
|
for (int i = 0; i < FLAGS_num_input_channels; ++i) {
|
||||||
array_geometry.push_back(webrtc::Point(i * FLAGS_mic_spacing, 0.f, 0.f));
|
array_geometry.push_back(webrtc::Point(i * FLAGS_mic_spacing, 0.f, 0.f));
|
||||||
}
|
}
|
||||||
webrtc::Beamformer bf(array_geometry);
|
webrtc::NonlinearBeamformer bf(array_geometry);
|
||||||
bf.Initialize(kChunkTimeMilliseconds, FLAGS_sample_rate);
|
bf.Initialize(kChunkTimeMilliseconds, FLAGS_sample_rate);
|
||||||
while (true) {
|
while (true) {
|
||||||
size_t samples_read = webrtc::PcmReadToFloat(read_file,
|
size_t samples_read = webrtc::PcmReadToFloat(read_file,
|
@ -25,7 +25,7 @@ struct AecCore;
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class AudioFrame;
|
class AudioFrame;
|
||||||
class Beamformer;
|
class NonlinearBeamformer;
|
||||||
class EchoCancellation;
|
class EchoCancellation;
|
||||||
class EchoControlMobile;
|
class EchoControlMobile;
|
||||||
class GainControl;
|
class GainControl;
|
||||||
@ -201,7 +201,8 @@ class AudioProcessing {
|
|||||||
// Allows passing in an optional configuration at create-time.
|
// Allows passing in an optional configuration at create-time.
|
||||||
static AudioProcessing* Create(const Config& config);
|
static AudioProcessing* Create(const Config& config);
|
||||||
// Only for testing.
|
// Only for testing.
|
||||||
static AudioProcessing* Create(const Config& config, Beamformer* beamformer);
|
static AudioProcessing* Create(const Config& config,
|
||||||
|
NonlinearBeamformer* beamformer);
|
||||||
virtual ~AudioProcessing() {}
|
virtual ~AudioProcessing() {}
|
||||||
|
|
||||||
// Initializes internal states, while retaining all user settings. This
|
// Initializes internal states, while retaining all user settings. This
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#include "webrtc/common_audio/resampler/include/push_resampler.h"
|
#include "webrtc/common_audio/resampler/include/push_resampler.h"
|
||||||
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
|
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
|
||||||
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
|
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
|
||||||
#include "webrtc/modules/audio_processing/beamformer/mock_beamformer.h"
|
#include "webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h"
|
||||||
#include "webrtc/modules/audio_processing/common.h"
|
#include "webrtc/modules/audio_processing/common.h"
|
||||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||||
#include "webrtc/modules/audio_processing/test/test_utils.h"
|
#include "webrtc/modules/audio_processing/test/test_utils.h"
|
||||||
@ -1228,8 +1228,8 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
|
|||||||
geometry.push_back(webrtc::Point(0.f, 0.f, 0.f));
|
geometry.push_back(webrtc::Point(0.f, 0.f, 0.f));
|
||||||
geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f));
|
geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f));
|
||||||
config.Set<Beamforming>(new Beamforming(true, geometry));
|
config.Set<Beamforming>(new Beamforming(true, geometry));
|
||||||
testing::NiceMock<MockBeamformer>* beamformer =
|
testing::NiceMock<MockNonlinearBeamformer>* beamformer =
|
||||||
new testing::NiceMock<MockBeamformer>(geometry);
|
new testing::NiceMock<MockNonlinearBeamformer>(geometry);
|
||||||
rtc::scoped_ptr<AudioProcessing> apm(
|
rtc::scoped_ptr<AudioProcessing> apm(
|
||||||
AudioProcessing::Create(config, beamformer));
|
AudioProcessing::Create(config, beamformer));
|
||||||
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
|
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
|
||||||
|
@ -170,8 +170,8 @@
|
|||||||
'audio_processing/beamformer/complex_matrix_unittest.cc',
|
'audio_processing/beamformer/complex_matrix_unittest.cc',
|
||||||
'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',
|
'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',
|
||||||
'audio_processing/beamformer/matrix_unittest.cc',
|
'audio_processing/beamformer/matrix_unittest.cc',
|
||||||
'audio_processing/beamformer/mock_beamformer.cc',
|
'audio_processing/beamformer/mock_nonlinear_beamformer.cc',
|
||||||
'audio_processing/beamformer/mock_beamformer.h',
|
'audio_processing/beamformer/mock_nonlinear_beamformer.h',
|
||||||
'audio_processing/beamformer/pcm_utils.cc',
|
'audio_processing/beamformer/pcm_utils.cc',
|
||||||
'audio_processing/beamformer/pcm_utils.h',
|
'audio_processing/beamformer/pcm_utils.h',
|
||||||
'audio_processing/echo_cancellation_impl_unittest.cc',
|
'audio_processing/echo_cancellation_impl_unittest.cc',
|
||||||
|
Reference in New Issue
Block a user