AudioCodingModuleImpl::Encode: Use a Buffer instead of a stack-allocated array

The Buffer is saved between calls, so after the initial allocation
it'll already be allocated and of the right size. The stack-allocated
array had the advantage of requiring no heap allocation at all, but
for most popular encoders it ended up allocating about 15 kB too much,
and now that we allow user-defined encoders there was also the
(remote) possibility that the buffer would actually be too small.

R=henrik.lundin@webrtc.org

Review URL: https://codereview.webrtc.org/1303413003 .

Cr-Commit-Position: refs/heads/master@{#9789}
This commit is contained in:
Karl Wiberg
2015-08-26 20:24:19 +02:00
parent d838d27919
commit 0163fb2ad7
4 changed files with 16 additions and 12 deletions

View File

@ -21,9 +21,6 @@
namespace webrtc {
// The maximum size of a payload, that is 60 ms of PCM-16 @ 32 kHz stereo
#define MAX_PAYLOAD_SIZE_BYTE 7680
// General codec specific defines
const int kIsacWbDefaultRate = 32000;
const int kIsacSwbDefaultRate = 56000;

View File

@ -157,7 +157,6 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE]; // Make room for 1 RED payload.
AudioEncoder::EncodedInfo encoded_info;
uint8_t previous_pltype;
@ -179,11 +178,13 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
last_rtp_timestamp_ = rtp_timestamp;
first_frame_ = false;
encoded_info = audio_encoder->Encode(rtp_timestamp, input_data.audio,
input_data.length_per_channel,
sizeof(stream), stream);
encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
encoded_info = audio_encoder->Encode(
rtp_timestamp, input_data.audio, input_data.length_per_channel,
encode_buffer_.size(), encode_buffer_.data());
encode_buffer_.SetSize(encoded_info.encoded_bytes);
bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
// Not enough data.
return 0;
}
@ -192,11 +193,11 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
RTPFragmentationHeader my_fragmentation;
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
FrameType frame_type;
if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
frame_type = kFrameEmpty;
encoded_info.payload_type = previous_pltype;
} else {
DCHECK_GT(encoded_info.encoded_bytes, 0u);
DCHECK_GT(encode_buffer_.size(), 0u);
frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
}
@ -205,7 +206,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (packetization_callback_) {
packetization_callback_->SendData(
frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
stream, encoded_info.encoded_bytes,
encode_buffer_.data(), encode_buffer_.size(),
my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
: nullptr);
}
@ -216,7 +217,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
}
}
previous_pltype_ = encoded_info.payload_type;
return static_cast<int32_t>(encoded_info.encoded_bytes);
return static_cast<int32_t>(encode_buffer_.size());
}
/////////////////////////////////////////

View File

@ -13,6 +13,7 @@
#include <vector>
#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
@ -265,6 +266,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
int UpdateUponReceivingCodec(int index);
const rtc::scoped_ptr<CriticalSectionWrapper> acm_crit_sect_;
rtc::Buffer encode_buffer_ GUARDED_BY(acm_crit_sect_);
int id_; // TODO(henrik.lundin) Make const.
uint32_t expected_codec_ts_ GUARDED_BY(acm_crit_sect_);
uint32_t expected_in_ts_ GUARDED_BY(acm_crit_sect_);

View File

@ -1619,6 +1619,10 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
EXPECT_CALL(mock_encoder, NumChannels())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderMutablePcmU::NumChannels));
EXPECT_CALL(mock_encoder, MaxEncodedBytes())
.Times(AtLeast(1))
.WillRepeatedly(
Invoke(&encoder, &AudioEncoderMutablePcmU::MaxEncodedBytes));
EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
.Times(AtLeast(1))
.WillRepeatedly(