Add an output capacity parameter to ACMResampler::Resample10Msec()

Also adding a unit tests to make sure that a desired output frequency
of 0 passed to AudioCodingModule::PlayoutData10Ms() is invalid.

R=turaj@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/14369005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5974 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org
2014-04-24 19:05:33 +00:00
parent 103657b484
commit 439a4c49f9
6 changed files with 56 additions and 27 deletions

View File

@ -428,9 +428,13 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
if (ptr_audio_buffer == audio_buffer_) { if (ptr_audio_buffer == audio_buffer_) {
// Data is written to local buffer. // Data is written to local buffer.
if (need_resampling) { if (need_resampling) {
samples_per_channel = resampler_.Resample10Msec( samples_per_channel =
audio_buffer_, current_sample_rate_hz_, desired_freq_hz, resampler_.Resample10Msec(audio_buffer_,
num_channels, audio_frame->data_); current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
audio_frame->data_);
if (samples_per_channel < 0) { if (samples_per_channel < 0) {
LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed."; LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed.";
return -1; return -1;
@ -444,9 +448,13 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
// Data is written into |audio_frame|. // Data is written into |audio_frame|.
if (need_resampling) { if (need_resampling) {
// We might end up here ONLY if codec is changed. // We might end up here ONLY if codec is changed.
samples_per_channel = resampler_.Resample10Msec( samples_per_channel =
audio_frame->data_, current_sample_rate_hz_, desired_freq_hz, resampler_.Resample10Msec(audio_frame->data_,
num_channels, audio_buffer_); current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
audio_buffer_);
if (samples_per_channel < 0) { if (samples_per_channel < 0) {
LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed."; LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed.";
return -1; return -1;

View File

@ -28,10 +28,15 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
int in_freq_hz, int in_freq_hz,
int out_freq_hz, int out_freq_hz,
int num_audio_channels, int num_audio_channels,
int out_capacity_samples,
int16_t* out_audio) { int16_t* out_audio) {
int in_length = in_freq_hz * num_audio_channels / 100; int in_length = in_freq_hz * num_audio_channels / 100;
int out_length = out_freq_hz * num_audio_channels / 100; int out_length = out_freq_hz * num_audio_channels / 100;
if (in_freq_hz == out_freq_hz) { if (in_freq_hz == out_freq_hz) {
if (out_capacity_samples < in_length) {
assert(false);
return -1;
}
memcpy(out_audio, in_audio, in_length * sizeof(int16_t)); memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
return in_length / num_audio_channels; return in_length / num_audio_channels;
} }
@ -43,9 +48,15 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
return -1; return -1;
} }
out_length = resampler_.Resample(in_audio, in_length, out_audio, out_length); out_length =
resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
if (out_length == -1) { if (out_length == -1) {
LOG_FERR4(LS_ERROR, Resample, in_audio, in_length, out_audio, out_length); LOG_FERR4(LS_ERROR,
Resample,
in_audio,
in_length,
out_audio,
out_capacity_samples);
return -1; return -1;
} }

View File

@ -26,6 +26,7 @@ class ACMResampler {
int in_freq_hz, int in_freq_hz,
int out_freq_hz, int out_freq_hz,
int num_audio_channels, int num_audio_channels,
int out_capacity_samples,
int16_t* out_audio); int16_t* out_audio);
private: private:

View File

@ -1362,9 +1362,13 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
// The result of the resampler is written to output frame. // The result of the resampler is written to output frame.
dest_ptr_audio = preprocess_frame_.data_; dest_ptr_audio = preprocess_frame_.data_;
preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec( preprocess_frame_.samples_per_channel_ =
src_ptr_audio, in_frame.sample_rate_hz_, send_codec_inst_.plfreq, resampler_.Resample10Msec(src_ptr_audio,
preprocess_frame_.num_channels_, dest_ptr_audio); in_frame.sample_rate_hz_,
send_codec_inst_.plfreq,
preprocess_frame_.num_channels_,
AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
if (preprocess_frame_.samples_per_channel_ < 0) { if (preprocess_frame_.samples_per_channel_ < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,

View File

@ -60,7 +60,7 @@ class AudioCodingModuleTest : public ::testing::Test {
AudioCodingModuleTest() AudioCodingModuleTest()
: id_(1), : id_(1),
rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)), rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
acm2_(AudioCodingModule::Create(id_)) {} acm_(AudioCodingModule::Create(id_)) {}
~AudioCodingModuleTest() {} ~AudioCodingModuleTest() {}
@ -72,7 +72,7 @@ class AudioCodingModuleTest : public ::testing::Test {
codec.pltype = kPayloadType; codec.pltype = kPayloadType;
// Register L16 codec in ACMs. // Register L16 codec in ACMs.
ASSERT_EQ(0, acm2_->RegisterReceiveCodec(codec)); ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec));
rtp_utility_->Populate(&rtp_header_); rtp_utility_->Populate(&rtp_header_);
} }
@ -82,20 +82,20 @@ class AudioCodingModuleTest : public ::testing::Test {
const uint8_t kPayload[kPayloadSizeBytes] = {0}; const uint8_t kPayload[kPayloadSizeBytes] = {0};
ASSERT_EQ(0, ASSERT_EQ(0,
acm2_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_)); acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame)); ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
rtp_utility_->Forward(&rtp_header_); rtp_utility_->Forward(&rtp_header_);
} }
void JustPullAudio() { void JustPullAudio() {
AudioFrame audio_frame; AudioFrame audio_frame;
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame)); ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
} }
const int id_; const int id_;
scoped_ptr<RtpUtility> rtp_utility_; scoped_ptr<RtpUtility> rtp_utility_;
scoped_ptr<AudioCodingModule> acm2_; scoped_ptr<AudioCodingModule> acm_;
WebRtcRTPHeader rtp_header_; WebRtcRTPHeader rtp_header_;
}; };
@ -104,7 +104,7 @@ class AudioCodingModuleTest : public ::testing::Test {
// all fields have to be zero. // all fields have to be zero.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) { TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) {
AudioDecodingCallStats stats; AudioDecodingCallStats stats;
acm2_->GetDecodingCallStatistics(&stats); acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq); EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator); EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal); EXPECT_EQ(0, stats.decoded_normal);
@ -119,15 +119,14 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
AudioDecodingCallStats stats; AudioDecodingCallStats stats;
const int kInitialDelay = 100; const int kInitialDelay = 100;
acm2_->SetInitialPlayoutDelay(kInitialDelay); acm_->SetInitialPlayoutDelay(kInitialDelay);
AudioFrame audio_frame;
int num_calls = 0; int num_calls = 0;
for (int time_ms = 0; time_ms < kInitialDelay; for (int time_ms = 0; time_ms < kInitialDelay;
time_ms += kFrameSizeMs, ++num_calls) { time_ms += kFrameSizeMs, ++num_calls) {
InsertPacketAndPullAudio(); InsertPacketAndPullAudio();
} }
acm2_->GetDecodingCallStatistics(&stats); acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq); EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(num_calls, stats.calls_to_silence_generator); EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal); EXPECT_EQ(0, stats.decoded_normal);
@ -143,11 +142,10 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
AudioDecodingCallStats stats; AudioDecodingCallStats stats;
const int kNumNormalCalls = 10; const int kNumNormalCalls = 10;
AudioFrame audio_frame;
for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) { for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
InsertPacketAndPullAudio(); InsertPacketAndPullAudio();
} }
acm2_->GetDecodingCallStatistics(&stats); acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq); EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator); EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal); EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
@ -162,7 +160,7 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) { for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
JustPullAudio(); JustPullAudio();
} }
acm2_->GetDecodingCallStatistics(&stats); acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq); EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator); EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal); EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
@ -174,7 +172,7 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
TEST_F(AudioCodingModuleTest, VerifyOutputFrame) { TEST_F(AudioCodingModuleTest, VerifyOutputFrame) {
AudioFrame audio_frame; AudioFrame audio_frame;
const int kSampleRateHz = 32000; const int kSampleRateHz = 32000;
EXPECT_EQ(0, acm2_->PlayoutData10Ms(kSampleRateHz, &audio_frame)); EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
// The energy must be -1 in order to have the energy calculated later on in // The energy must be -1 in order to have the energy calculated later on in
// the AudioConferenceMixer module. // the AudioConferenceMixer module.
EXPECT_EQ(static_cast<uint32_t>(-1), audio_frame.energy_); EXPECT_EQ(static_cast<uint32_t>(-1), audio_frame.energy_);
@ -185,4 +183,9 @@ TEST_F(AudioCodingModuleTest, VerifyOutputFrame) {
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_); EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
} }
TEST_F(AudioCodingModuleTest, FailOnZeroDesiredFrequency) {
AudioFrame audio_frame;
EXPECT_EQ(-1, acm_->PlayoutData10Ms(0, &audio_frame));
}
} // namespace webrtc } // namespace webrtc

View File

@ -211,8 +211,9 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
int frame_length, int percent_loss) { int frame_length, int percent_loss) {
AudioFrame audio_frame; AudioFrame audio_frame;
int32_t out_freq_hz_b = out_file_.SamplingFrequency(); int32_t out_freq_hz_b = out_file_.SamplingFrequency();
int16_t audio[480 * 12 * 2]; // Can hold 120 ms stereo audio. const int kBufferSizeSamples = 480 * 12 * 2; // Can hold 120 ms stereo audio.
int16_t out_audio[480 * 12 * 2]; // Can hold 120 ms stereo audio. int16_t audio[kBufferSizeSamples];
int16_t out_audio[kBufferSizeSamples];
int16_t audio_type; int16_t audio_type;
int written_samples = 0; int written_samples = 0;
int read_samples = 0; int read_samples = 0;
@ -257,6 +258,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
audio_frame.sample_rate_hz_, audio_frame.sample_rate_hz_,
48000, 48000,
channels, channels,
kBufferSizeSamples - written_samples,
&audio[written_samples])); &audio[written_samples]));
written_samples += 480 * channels; written_samples += 480 * channels;