Stop using the beamformer inside APM

Removes the usage of an injected/enabled beamformer in APM, and marks
the API parts as deprecated.
Initialization and process calls are removed, and all enabled/disabled
flags are replaced by assuming no beamforming. Additionally, an AGC test
relying on the beamformer as a VAD is removed.

Bug: webrtc:9402
Change-Id: I0d3d0b9773da083ce43c28045db9a77278f59f95
Reviewed-on: https://webrtc-review.googlesource.com/83341
Reviewed-by: Minyue Li <minyue@webrtc.org>
Commit-Queue: Sam Zackrisson <saza@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23643}
This commit is contained in:
Sam Zackrisson
2018-06-14 10:11:35 +02:00
committed by Commit Bot
parent 431abd989b
commit 9394f6fda1
5 changed files with 12 additions and 171 deletions

View File

@ -1300,95 +1300,6 @@ TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
}
}
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
const int kSampleRateHz = 16000;
const size_t kSamplesPerChannel =
static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
const size_t kNumInputChannels = 2;
const size_t kNumOutputChannels = 1;
const size_t kNumChunks = 700;
const float kScaleFactor = 0.25f;
Config config;
std::vector<webrtc::Point> geometry;
geometry.push_back(webrtc::Point(0.f, 0.f, 0.f));
geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f));
config.Set<Beamforming>(new Beamforming(true, geometry));
testing::NiceMock<MockNonlinearBeamformer>* beamformer =
new testing::NiceMock<MockNonlinearBeamformer>(geometry, 1u);
std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder()
.SetNonlinearBeamformer(
std::unique_ptr<webrtc::NonlinearBeamformer>(beamformer))
.Create(config));
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels,
kNumOutputChannels);
std::unique_ptr<int16_t[]> int_data(new int16_t[max_length]);
std::unique_ptr<float[]> float_data(new float[max_length]);
std::string filename = ResourceFilePath("far", kSampleRateHz);
FILE* far_file = fopen(filename.c_str(), "rb");
ASSERT_TRUE(far_file != NULL) << "Could not open file " << filename << "\n";
const int kDefaultVolume = apm->gain_control()->stream_analog_level();
const int kDefaultCompressionGain =
apm->gain_control()->compression_gain_db();
bool is_target = false;
EXPECT_CALL(*beamformer, is_target_present())
.WillRepeatedly(testing::ReturnPointee(&is_target));
for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
for (size_t j = 0; j < kNumInputChannels; ++j) {
for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
EXPECT_EQ(kNoErr,
apm->ProcessStream(src_buf.channels(),
src_buf.num_frames(),
kSampleRateHz,
LayoutFromChannels(src_buf.num_channels()),
kSampleRateHz,
LayoutFromChannels(dest_buf.num_channels()),
dest_buf.channels()));
}
EXPECT_EQ(kDefaultVolume,
apm->gain_control()->stream_analog_level());
EXPECT_EQ(kDefaultCompressionGain,
apm->gain_control()->compression_gain_db());
rewind(far_file);
is_target = true;
for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
for (size_t j = 0; j < kNumInputChannels; ++j) {
for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
EXPECT_EQ(kNoErr,
apm->ProcessStream(src_buf.channels(),
src_buf.num_frames(),
kSampleRateHz,
LayoutFromChannels(src_buf.num_channels()),
kSampleRateHz,
LayoutFromChannels(dest_buf.num_channels()),
dest_buf.channels()));
}
EXPECT_LT(kDefaultVolume,
apm->gain_control()->stream_analog_level());
EXPECT_LT(kDefaultCompressionGain,
apm->gain_control()->compression_gain_db());
ASSERT_EQ(0, fclose(far_file));
}
#endif
TEST_F(ApmTest, NoiseSuppression) {
// Test valid suppression levels.
NoiseSuppression::Level level[] = {