Add muted_output parameter to ACM

The new parameter indicates if the output in the AudioFrame is muted. If
so, the output samples are not written, but should be interpreted as all
zero.

A version of AudioCodingModule::PlayoutData10Ms() without the new
parameter is maintained while waiting for downstream dependencies to
conform.

BUG=webrtc:5609

Review-Url: https://codereview.webrtc.org/1976913002
Cr-Commit-Position: refs/heads/master@{#12719}
This commit is contained in:
henrik.lundin
2016-05-13 03:45:24 -07:00
committed by Commit bot
parent 29dca2ce95
commit 834a6ea12b
7 changed files with 52 additions and 16 deletions

View File

@ -158,8 +158,11 @@ void AcmReceiveTestOldApi::Run() {
// Pull audio until time to insert packet.
while (clock_.TimeInMilliseconds() < packet->time_ms()) {
AudioFrame output_frame;
EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
bool muted;
EXPECT_EQ(0,
acm_->PlayoutData10Ms(output_freq_hz_, &output_frame, &muted));
ASSERT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
ASSERT_FALSE(muted);
const size_t samples_per_block =
static_cast<size_t>(output_freq_hz_ * 10 / 1000);
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);

View File

@ -132,16 +132,16 @@ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
return 0;
}
int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
int AcmReceiver::GetAudio(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted) {
// Accessing members, take the lock.
rtc::CritScope lock(&crit_sect_);
bool muted;
if (neteq_->GetAudio(audio_frame, &muted) != NetEq::kOK) {
if (neteq_->GetAudio(audio_frame, muted) != NetEq::kOK) {
LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
return -1;
}
RTC_DCHECK(!muted);
const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();

View File

@ -82,11 +82,13 @@ class AcmReceiver {
// Output:
// -audio_frame : an audio frame were output data and
// associated parameters are written to.
// -muted : if true, the sample data in audio_frame is not
// populated, and must be interpreted as all zero.
//
// Return value : 0 if OK.
// -1 if NetEq returned an error.
//
int GetAudio(int desired_freq_hz, AudioFrame* audio_frame);
int GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted);
//
// Adds a new codec to the NetEq codec database.

View File

@ -285,7 +285,8 @@ TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) {
const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
InsertOnePacketOfSilence(codec.id);
for (int k = 0; k < num_10ms_frames; ++k) {
EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame));
bool muted;
EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame, &muted));
}
EXPECT_EQ(codec.inst.plfreq, receiver_->last_output_sample_rate_hz());
}
@ -326,13 +327,15 @@ class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
rtc::CheckedDivExact(5 * output_sample_rate_hz, 8000);
AudioFrame frame;
EXPECT_EQ(0, receiver_->GetAudio(output_sample_rate_hz, &frame));
bool muted;
EXPECT_EQ(0, receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
// Expect timestamp = 0 before first packet is inserted.
EXPECT_EQ(0u, frame.timestamp_);
for (int i = 0; i < 5; ++i) {
InsertOnePacketOfSilence(codec.id);
for (int k = 0; k < num_10ms_frames; ++k) {
EXPECT_EQ(0, receiver_->GetAudio(output_sample_rate_hz, &frame));
EXPECT_EQ(0,
receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
EXPECT_EQ(expected_output_ts, frame.timestamp_);
expected_output_ts += 10 * samples_per_ms;
EXPECT_EQ(10 * samples_per_ms, frame.samples_per_channel_);
@ -340,6 +343,7 @@ class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
EXPECT_EQ(output_channels, frame.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, frame.speech_type_);
EXPECT_EQ(expected_vad_activity, frame.vad_activity_);
EXPECT_FALSE(muted);
}
}
}
@ -388,8 +392,10 @@ TEST_F(AcmReceiverTestOldApi, MAYBE_PostdecodingVad) {
AudioFrame frame;
for (int n = 0; n < kNumPackets; ++n) {
InsertOnePacketOfSilence(codec.id);
for (int k = 0; k < num_10ms_frames; ++k)
ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame));
for (int k = 0; k < num_10ms_frames; ++k) {
bool muted;
ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame, &muted));
}
}
EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
}
@ -417,8 +423,10 @@ TEST_F(AcmReceiverTestPostDecodeVadPassiveOldApi, MAYBE_PostdecodingVad) {
AudioFrame frame;
for (int n = 0; n < kNumPackets; ++n) {
InsertOnePacketOfSilence(codec.id);
for (int k = 0; k < num_10ms_frames; ++k)
ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame));
for (int k = 0; k < num_10ms_frames; ++k) {
bool muted;
ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame, &muted));
}
}
EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
}

View File

@ -791,9 +791,10 @@ int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
// Get 10 milliseconds of raw audio data to play out.
// Automatic resample to the requested frequency.
int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
AudioFrame* audio_frame) {
AudioFrame* audio_frame,
bool* muted) {
// GetAudio always returns 10 ms, at the requested sample rate.
if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) {
if (receiver_.GetAudio(desired_freq_hz, audio_frame, muted) != 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"PlayoutData failed, RecOut Failed");
return -1;
@ -802,6 +803,14 @@ int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
return 0;
}
int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
AudioFrame* audio_frame) {
bool muted;
int ret = PlayoutData10Ms(desired_freq_hz, audio_frame, &muted);
RTC_DCHECK(!muted);
return ret;
}
/////////////////////////////////////////
// Statistics
//

View File

@ -163,6 +163,9 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
// Get 10 milliseconds of raw audio data to play out, and
// automatic resample to the requested frequency if > 0.
int PlayoutData10Ms(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted) override;
int PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) override;
/////////////////////////////////////////

View File

@ -687,13 +687,24 @@ class AudioCodingModule {
// and other relevant parameters, c.f.
// module_common_types.h for the definition of
// AudioFrame.
// -muted : if true, the sample data in audio_frame is not
// populated, and must be interpreted as all zero.
//
// Return value:
// -1 if the function fails,
// 0 if the function succeeds.
//
virtual int32_t PlayoutData10Ms(int32_t desired_freq_hz,
AudioFrame* audio_frame) = 0;
AudioFrame* audio_frame,
bool* muted) = 0;
/////////////////////////////////////////////////////////////////////////////
// Same as above, but without the muted parameter. This methods should not be
// used if enable_fast_accelerate was set to true in NetEq::Config.
// TODO(henrik.lundin) Remove this method when downstream dependencies are
// ready.
virtual int32_t PlayoutData10Ms(int32_t desired_freq_hz,
AudioFrame* audio_frame) = 0;
///////////////////////////////////////////////////////////////////////////
// Codec specific