Removed the memory pool from the mixer.

Memory frames are now expected to be owned by the mixing participants.

Review-Url: https://codereview.webrtc.org/2127763002
Cr-Commit-Position: refs/heads/master@{#13554}
This commit is contained in:
aleloi
2016-07-28 06:36:22 -07:00
committed by Commit bot
parent 1a993d12bb
commit a0db81f83a
4 changed files with 61 additions and 106 deletions

View File

@ -21,22 +21,6 @@ class NewMixHistory;
// A callback class that all mixer participants must inherit from/implement.
class MixerAudioSource {
public:
// The implementation of this function should update audioFrame with new
// audio every time it's called.
//
// If it returns -1, the frame will not be added to the mix.
//
// NOTE: This function should not be called. It will remain for a short
// time so that subclasses can override it without getting warnings.
// TODO(henrik.lundin) Remove this function.
virtual int32_t GetAudioFrame(int32_t id, AudioFrame* audioFrame) {
RTC_CHECK(false);
return -1;
}
// The implementation of GetAudioFrameWithMuted should update audio_frame
// with new audio every time it's called. The return value will be
// interpreted as follows.
enum class AudioFrameInfo {
kNormal, // The samples in audio_frame are valid and should be used.
kMuted, // The samples in audio_frame should not be used, but should be
@ -45,11 +29,19 @@ class MixerAudioSource {
kError // audio_frame will not be used.
};
virtual AudioFrameInfo GetAudioFrameWithMuted(int32_t id,
AudioFrame* audio_frame) {
return GetAudioFrame(id, audio_frame) == -1 ? AudioFrameInfo::kError
: AudioFrameInfo::kNormal;
}
struct AudioFrameWithInfo {
AudioFrame* audio_frame;
AudioFrameInfo audio_frame_info;
};
// The implementation of GetAudioFrameWithMuted should update
// audio_frame with new audio every time it's called. Implementing
// classes are allowed to return the same AudioFrame pointer on
// different calls. The pointer must stay valid until the next
// mixing call or until this audio source is disconnected from the
// mixer.
virtual AudioFrameWithInfo GetAudioFrameWithMuted(int32_t id,
int sample_rate_hz) = 0;
// Returns true if the participant was mixed this mix iteration.
bool IsMixed() const;

View File

@ -114,14 +114,13 @@ NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
_minimumMixingFreq(kLowestPossible),
_outputFrequency(kDefaultFrequency),
_sampleSize(0),
_audioFramePool(NULL),
audio_source_list_(),
additional_audio_source_list_(),
num_mixed_audio_sources_(0),
use_limiter_(true),
_timeStamp(0) {
thread_checker_.DetachFromThread();
}
}
bool NewAudioConferenceMixerImpl::Init() {
_crit.reset(CriticalSectionWrapper::CreateCriticalSection());
@ -138,11 +137,6 @@ bool NewAudioConferenceMixerImpl::Init() {
if (!_limiter.get())
return false;
MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
DEFAULT_AUDIO_FRAME_POOLSIZE);
if (_audioFramePool == NULL)
return false;
if (SetOutputFrequency(kDefaultFrequency) == -1)
return false;
@ -169,11 +163,6 @@ bool NewAudioConferenceMixerImpl::Init() {
return true;
}
NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr));
}
void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
RTC_DCHECK(thread_checker_.CalledOnValidThread());
@ -463,47 +452,39 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
bool wasMixed = false;
wasMixed = (*audio_source)->_mixHistory->WasMixed();
AudioFrame* audioFrame = NULL;
if (_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
"failed PopMemory() call");
RTC_NOTREACHED();
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
auto audio_frame_with_info =
(*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from audio source");
_audioFramePool->PushMemory(audioFrame);
continue;
}
const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
if (audio_source_list_.size() != 1) {
// TODO(wu): Issue 3390, add support for multiple audio sources case.
audioFrame->ntp_time_ms_ = -1;
audio_frame->ntp_time_ms_ = -1;
}
// TODO(aleloi): this assert triggers in some test cases where SRTP is
// used which prevents NetEQ from making a VAD. Temporarily disable this
// assert until the problem is fixed on a higher level.
// RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown);
if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
// RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown);
if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"invalid VAD state from audio source");
}
if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
if (!wasMixed && !muted) {
RampIn(*audioFrame);
RampIn(*audio_frame);
}
if (activeList.size() >= *maxAudioFrameCounter) {
// There are already more active audio sources than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
bool found_replace_item = false;
for (AudioFrameList::iterator iter = activeList.begin();
@ -532,8 +513,8 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
mixAudioSourceList->erase(replaceFrame.frame->id_);
activeList.erase(replaceItem);
activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
(*mixAudioSourceList)[audioFrame->id_] = *audio_source;
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
@ -545,42 +526,36 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory(replaceFrame.frame);
}
} else {
if (wasMixed) {
if (!muted) {
RampOut(*audioFrame);
RampOut(*audio_frame);
}
rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory(audioFrame);
}
}
} else {
activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
(*mixAudioSourceList)[audioFrame->id_] = *audio_source;
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audioFrame, muted);
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasMixedList.push_back(part_struct);
} else if (mustAddToPassiveList) {
if (!muted) {
RampIn(*audioFrame);
RampIn(*audio_frame);
}
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audioFrame, muted);
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasNotMixedList.push_back(part_struct);
} else {
_audioFramePool->PushMemory(audioFrame);
}
}
}
@ -604,8 +579,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory((*iter)->audio_frame);
}
delete *iter;
}
@ -619,8 +592,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory((*iter)->audio_frame);
}
delete *iter;
}
@ -633,9 +604,9 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"GetAdditionalAudio(additionalFramesList)");
// The GetAudioFrameWithMuted() callback may result in the audio source being
// removed from additionalAudioSourceList_. If that happens it will
// removed from additionalAudioFramesList_. If that happens it will
// invalidate any iterators. Create a copy of the audio sources list such
// that the list of audio sources can be traversed safely.
// that the list of participants can be traversed safely.
MixerAudioSourceList additionalAudioSourceList;
additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
additional_audio_source_list_.begin(),
@ -644,28 +615,21 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
for (MixerAudioSourceList::const_iterator audio_source =
additionalAudioSourceList.begin();
audio_source != additionalAudioSourceList.end(); ++audio_source) {
AudioFrame* audioFrame = NULL;
if (_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
"failed PopMemory() call");
RTC_NOTREACHED();
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
auto audio_frame_with_info =
(*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from audio_source");
_audioFramePool->PushMemory(audioFrame);
continue;
}
if (audioFrame->samples_per_channel_ == 0) {
if (audio_frame->samples_per_channel_ == 0) {
// Empty frame. Don't use it.
_audioFramePool->PushMemory(audioFrame);
continue;
}
additionalFramesList->push_back(FrameAndMuteInfo(
audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
}
}
@ -698,10 +662,6 @@ void NewAudioConferenceMixerImpl::ClearAudioFrameList(
AudioFrameList* audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"ClearAudioFrameList(audioFrameList)");
for (AudioFrameList::iterator iter = audioFrameList->begin();
iter != audioFrameList->end(); ++iter) {
_audioFramePool->PushMemory(iter->frame);
}
audioFrameList->clear();
}

View File

@ -62,7 +62,8 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
enum { kProcessPeriodicityInMs = 10 };
explicit NewAudioConferenceMixerImpl(int id);
~NewAudioConferenceMixerImpl();
// The dtor not needed, because this class does no longer manage
// memory.
// Must be called after ctor.
bool Init();
@ -79,8 +80,6 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
const MixerAudioSource& audio_source) const override;
private:
enum { DEFAULT_AUDIO_FRAME_POOLSIZE = 50 };
// Set/get mix frequency
int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
@ -151,11 +150,9 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
Frequency _outputFrequency;
size_t _sampleSize;
// Memory pool to avoid allocating/deallocating AudioFrames
MemoryPool<AudioFrame>* _audioFramePool;
// List of all audio sources. Note all lists are disjunct
MixerAudioSourceList audio_source_list_; // May be mixed.
// Always mixed, anonomously.
MixerAudioSourceList additional_audio_source_list_;

View File

@ -19,7 +19,7 @@
#include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
using testing::_;
using testing::AtLeast;
using testing::Exactly;
using testing::Invoke;
using testing::Return;
@ -29,19 +29,24 @@ namespace webrtc {
class MockMixerAudioSource : public MixerAudioSource {
public:
MockMixerAudioSource() {
ON_CALL(*this, GetAudioFrame(_, _))
.WillByDefault(Invoke(this, &MockMixerAudioSource::FakeAudioFrame));
ON_CALL(*this, GetAudioFrameWithMuted(_, _))
.WillByDefault(
Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted));
}
MOCK_METHOD2(GetAudioFrame,
int32_t(const int32_t id, AudioFrame* audio_frame));
MOCK_METHOD2(GetAudioFrameWithMuted,
AudioFrameWithInfo(const int32_t id, int sample_rate_hz));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
private:
AudioFrame fake_frame_;
int32_t FakeAudioFrame(const int32_t id, AudioFrame* audio_frame) {
audio_frame->CopyFrom(fake_frame_);
return 0;
AudioFrameWithInfo FakeAudioFrameWithMuted(const int32_t id,
int sample_rate_hz) {
return {
fake_frame(), // audio_frame_pointer
AudioFrameInfo::kNormal, // audio_frame_info
};
}
};
@ -168,7 +173,8 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
participants[i].fake_frame()->data_[80] = i;
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
EXPECT_CALL(participants[i], GetAudioFrame(_, _)).Times(AtLeast(1));
EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _))
.Times(Exactly(1));
EXPECT_CALL(participants[i], NeededFrequency(_))
.WillRepeatedly(Return(kSampleRateHz));
}
@ -196,7 +202,7 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
}
TEST_F(BothMixersTest, CompareInitialFrameAudio) {
EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1));
EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1));
// Make sure the participant is marked as 'non-mixed' so that it is
// ramped in next round.
@ -222,7 +228,7 @@ TEST_F(BothMixersTest, CompareInitialFrameAudio) {
}
TEST_F(BothMixersTest, CompareSecondFrameAudio) {
EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1));
EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2));
// Make sure the participant is marked as 'non-mixed' so that it is
// ramped in next round.