ACM: Corrected temporary buffer size

This CL corrects the temporary buffers size in the
pre-processing of the capture audio before encoding.

As part of this it removes the ACM-specific hardcoding
of the size and instead ensures that the size of the
temporary buffer matches that of the AudioFrame.

Bug: webrtc:11242
Change-Id: I56dd6cadfd4e140e8e159966c33d1027383ea9fa
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/170340
Commit-Queue: Per Åhgren <peah@webrtc.org>
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30775}
This commit is contained in:
Per Åhgren
2020-03-12 11:53:30 +01:00
committed by Commit Bot
parent c71be24c82
commit d82a02c837
3 changed files with 20 additions and 9 deletions

View File

@ -37,6 +37,8 @@ namespace {
// 48 kHz data. // 48 kHz data.
constexpr size_t kInitialInputDataBufferSize = 6 * 480; constexpr size_t kInitialInputDataBufferSize = 6 * 480;
constexpr int32_t kMaxInputSampleRateHz = 192000;
class AudioCodingModuleImpl final : public AudioCodingModule { class AudioCodingModuleImpl final : public AudioCodingModule {
public: public:
explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config); explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
@ -346,7 +348,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
return -1; return -1;
} }
if (audio_frame.sample_rate_hz_ > 192000) { if (audio_frame.sample_rate_hz_ > kMaxInputSampleRateHz) {
assert(false); assert(false);
RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid"; RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid";
return -1; return -1;
@ -463,20 +465,25 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
*ptr_out = &preprocess_frame_; *ptr_out = &preprocess_frame_;
preprocess_frame_.num_channels_ = in_frame.num_channels_; preprocess_frame_.num_channels_ = in_frame.num_channels_;
preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
std::array<int16_t, WEBRTC_10MS_PCM_AUDIO> audio; std::array<int16_t, AudioFrame::kMaxDataSizeSamples> audio;
const int16_t* src_ptr_audio = in_frame.data(); const int16_t* src_ptr_audio;
if (down_mix) { if (down_mix) {
// If a resampling is required the output of a down-mix is written into a // If a resampling is required, the output of a down-mix is written into a
// local buffer, otherwise, it will be written to the output frame. // local buffer, otherwise, it will be written to the output frame.
int16_t* dest_ptr_audio = int16_t* dest_ptr_audio =
resample ? audio.data() : preprocess_frame_.mutable_data(); resample ? audio.data() : preprocess_frame_.mutable_data();
RTC_DCHECK_GE(audio.size(), preprocess_frame_.samples_per_channel_);
RTC_DCHECK_GE(audio.size(), in_frame.samples_per_channel_); RTC_DCHECK_GE(audio.size(), in_frame.samples_per_channel_);
DownMixFrame(in_frame, DownMixFrame(in_frame,
rtc::ArrayView<int16_t>( rtc::ArrayView<int16_t>(
dest_ptr_audio, preprocess_frame_.samples_per_channel_)); dest_ptr_audio, preprocess_frame_.samples_per_channel_));
preprocess_frame_.num_channels_ = 1; preprocess_frame_.num_channels_ = 1;
// Set the input of the resampler is the down-mixed signal.
// Set the input of the resampler to the down-mixed signal.
src_ptr_audio = audio.data(); src_ptr_audio = audio.data();
} else {
// Set the input of the resampler to the original data.
src_ptr_audio = in_frame.data();
} }
preprocess_frame_.timestamp_ = expected_codec_ts_; preprocess_frame_.timestamp_ = expected_codec_ts_;

View File

@ -33,8 +33,6 @@ class AudioEncoder;
class AudioFrame; class AudioFrame;
struct RTPHeader; struct RTPHeader;
#define WEBRTC_10MS_PCM_AUDIO 960 // 16 bits super wideband 48 kHz
// Callback class used for sending data ready to be packetized // Callback class used for sending data ready to be packetized
class AudioPacketizationCallback { class AudioPacketizationCallback {
public: public:

View File

@ -24,6 +24,12 @@
namespace webrtc { namespace webrtc {
namespace {
// Buffer size for stereo 48 kHz audio.
constexpr size_t kWebRtc10MsPcmAudio = 960;
} // namespace
TestPacketization::TestPacketization(RTPStream* rtpStream, uint16_t frequency) TestPacketization::TestPacketization(RTPStream* rtpStream, uint16_t frequency)
: _rtpStream(rtpStream), _frequency(frequency), _seqNo(0) {} : _rtpStream(rtpStream), _frequency(frequency), _seqNo(0) {}
@ -92,7 +98,7 @@ void Sender::Run() {
} }
Receiver::Receiver() Receiver::Receiver()
: _playoutLengthSmpls(WEBRTC_10MS_PCM_AUDIO), : _playoutLengthSmpls(kWebRtc10MsPcmAudio),
_payloadSizeBytes(MAX_INCOMING_PAYLOAD) {} _payloadSizeBytes(MAX_INCOMING_PAYLOAD) {}
void Receiver::Setup(AudioCodingModule* acm, void Receiver::Setup(AudioCodingModule* acm,
@ -139,7 +145,7 @@ void Receiver::Setup(AudioCodingModule* acm,
_pcmFile.Open(file_name, 32000, "wb+"); _pcmFile.Open(file_name, 32000, "wb+");
_realPayloadSizeBytes = 0; _realPayloadSizeBytes = 0;
_playoutBuffer = new int16_t[WEBRTC_10MS_PCM_AUDIO]; _playoutBuffer = new int16_t[kWebRtc10MsPcmAudio];
_frequency = playSampFreq; _frequency = playSampFreq;
_acm = acm; _acm = acm;
_firstTime = true; _firstTime = true;