We changed Encode() and EncodeInternal() return type from bool to void in this issue:

https://webrtc-codereview.appspot.com/38279004/
Now we don't have to pass EncodedInfo as output parameter, but can return it instead. This also adds the benefit of making clear that EncodeInternal() needs to fill in this info.

R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/43839004

Cr-Commit-Position: refs/heads/master@{#8749}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8749 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
jmarusic@webrtc.org
2015-03-17 12:12:17 +00:00
parent 73d763e71f
commit 0cb612b43b
21 changed files with 222 additions and 237 deletions

View File

@ -109,13 +109,12 @@ void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
speech_encoder_->SetProjectedPacketLossRate(fraction);
}
void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
info->encoded_bytes = 0;
const int num_samples = SampleRateHz() / 100 * NumChannels();
if (speech_buffer_.empty()) {
CHECK_EQ(frames_in_buffer_, 0);
@ -126,7 +125,7 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
}
++frames_in_buffer_;
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
return;
return kZeroEncodedBytes;
}
CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
@ -159,14 +158,15 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
}
EncodedInfo info;
switch (activity) {
case Vad::kPassive: {
EncodePassive(max_encoded_bytes, encoded, info);
info = EncodePassive(max_encoded_bytes, encoded);
last_frame_active_ = false;
break;
}
case Vad::kActive: {
EncodeActive(max_encoded_bytes, encoded, info);
info = EncodeActive(max_encoded_bytes, encoded);
last_frame_active_ = true;
break;
}
@ -178,15 +178,17 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
speech_buffer_.clear();
frames_in_buffer_ = 0;
return info;
}
void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
size_t max_encoded_bytes,
uint8_t* encoded) {
bool force_sid = last_frame_active_;
bool output_produced = false;
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
AudioEncoder::EncodedInfo info;
for (int i = 0; i < frames_in_buffer_; ++i) {
int16_t encoded_bytes_tmp = 0;
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
@ -195,30 +197,32 @@ void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
encoded, &encoded_bytes_tmp, force_sid), 0);
if (encoded_bytes_tmp > 0) {
CHECK(!output_produced);
info->encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
output_produced = true;
force_sid = false;
}
}
info->encoded_timestamp = first_timestamp_in_buffer_;
info->payload_type = cng_payload_type_;
info->send_even_if_empty = true;
info->speech = false;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = cng_payload_type_;
info.send_even_if_empty = true;
info.speech = false;
return info;
}
void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
size_t max_encoded_bytes,
uint8_t* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
for (int i = 0; i < frames_in_buffer_; ++i) {
speech_encoder_->Encode(first_timestamp_in_buffer_,
&speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes,
encoded, info);
info = speech_encoder_->Encode(
first_timestamp_in_buffer_, &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded);
if (i < frames_in_buffer_ - 1) {
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
}
}
return info;
}
size_t AudioEncoderCng::SamplesPer10msFrame() const {

View File

@ -75,9 +75,8 @@ class AudioEncoderCngTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
encoded_info_ = AudioEncoder::EncodedInfo();
cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
encoded_.size(), &encoded_[0], &encoded_info_);
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
encoded_.size(), &encoded_[0]);
timestamp_ += num_audio_samples_10ms_;
}
@ -92,24 +91,24 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
// Don't expect any calls to the encoder yet.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
for (int i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
}
AudioEncoder::EncodedInfo info;
if (active_speech) {
// Now expect |blocks_per_frame| calls to the encoder in sequence.
// Let the speech codec mock return true and set the number of encoded
// bytes to |kMockReturnEncodedBytes|.
InSequence s;
for (int j = 0; j < blocks_per_frame - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(SetArgPointee<4>(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
}
AudioEncoder::EncodedInfo info;
info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(SetArgPointee<4>(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
}
Encode();
if (active_speech) {
@ -254,7 +253,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillRepeatedly(Return(Vad::kPassive));
// Expect no calls at all to the speech encoder mock.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
uint32_t expected_timestamp = timestamp_;
for (int i = 0; i < 100; ++i) {
Encode();
@ -284,20 +283,23 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
CreateCng();
// All of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.Times(6)
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
EXPECT_TRUE(encoded_info_.speech);
// First half of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.Times(6)
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
EXPECT_TRUE(encoded_info_.speech);
// Second half of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.Times(6)
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
EXPECT_TRUE(encoded_info_.speech);
@ -336,22 +338,10 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
CheckVadInputSize(60, 30, 30);
}
// Verifies that the EncodedInfo struct pointer passed to
// AudioEncoderCng::Encode is propagated to the Encode call to the underlying
// speech encoder.
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kActive));
Encode();
}
// Verifies that the correct payload type is set when CNG is encoded.
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
@ -385,8 +375,7 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
.WillOnce(Return(Vad::kActive));
AudioEncoder::EncodedInfo info;
info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(SetArgPointee<4>(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
Encode();
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);

View File

@ -56,11 +56,10 @@ class AudioEncoderCng final : public AudioEncoder {
void SetProjectedPacketLossRate(double fraction) override;
protected:
void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
private:
// Deleter for use with scoped_ptr. E.g., use as
@ -69,12 +68,8 @@ class AudioEncoderCng final : public AudioEncoder {
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
};
void EncodePassive(size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info);
void EncodeActive(size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info);
EncodedInfo EncodePassive(size_t max_encoded_bytes, uint8_t* encoded);
EncodedInfo EncodeActive(size_t max_encoded_bytes, uint8_t* encoded);
size_t SamplesPer10msFrame() const;
AudioEncoder* speech_encoder_;