Change NetEq::GetAudio to use AudioFrame
With this change, NetEq now uses AudioFrame as output type, like the surrounding functions in ACM and VoiceEngine already do. The computational savings is probably slim, since one memcpy is removed while another one is added (both in AcmReceiver::GetAudio). More simplifications and clean-up will be done in AcmReceiver::GetAudio in future CLs. BUG=webrtc:5607 Review URL: https://codereview.webrtc.org/1750353002 Cr-Commit-Position: refs/heads/master@{#11874}
This commit is contained in:
committed by
Commit bot
parent
6459f84766
commit
6d8e011b64
@ -13,6 +13,7 @@
|
||||
|
||||
#include "webrtc/base/constructormagic.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
|
||||
#include "webrtc/modules/include/module_common_types.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -65,8 +66,10 @@ class SyncBuffer : public AudioMultiVector {
|
||||
|
||||
// Reads |requested_len| samples from each channel and writes them interleaved
|
||||
// into |output|. The |next_index_| is updated to point to the sample to read
|
||||
// next time.
|
||||
size_t GetNextAudioInterleaved(size_t requested_len, int16_t* output);
|
||||
// next time. The AudioFrame |output| is first reset, and the |data_|,
|
||||
// |interleaved_|, |num_channels_|, and |samples_per_channel_| fields are
|
||||
// updated.
|
||||
void GetNextAudioInterleaved(size_t requested_len, AudioFrame* output);
|
||||
|
||||
// Adds |increment| to |end_timestamp_|.
|
||||
void IncreaseEndTimestamp(uint32_t increment);
|
||||
|
||||
Reference in New Issue
Block a user