Rewrote UpdateToMix in the audio mixer.

The new version is much shorter than the old one, and hopefully easier
to read. This is part of the effort to rewrite the old mixer.

NOTRY=True

Review-Url: https://codereview.webrtc.org/2132563002
Cr-Commit-Position: refs/heads/master@{#13570}
This commit is contained in:
aleloi
2016-07-29 02:12:41 -07:00
committed by Commit bot
parent ea4c141ffa
commit f3882571b0
4 changed files with 291 additions and 239 deletions

View File

@ -29,7 +29,7 @@ class MixerAudioSource {
kError // audio_frame will not be used.
};
struct AudioFrameWithInfo {
struct AudioFrameWithMuted {
AudioFrame* audio_frame;
AudioFrameInfo audio_frame_info;
};
@ -40,8 +40,8 @@ class MixerAudioSource {
// different calls. The pointer must stay valid until the next
// mixing call or until this audio source is disconnected from the
// mixer.
virtual AudioFrameWithInfo GetAudioFrameWithMuted(int32_t id,
int sample_rate_hz) = 0;
virtual AudioFrameWithMuted GetAudioFrameWithMuted(int32_t id,
int sample_rate_hz) = 0;
// Returns true if the participant was mixed this mix iteration.
bool IsMixed() const;

View File

@ -11,6 +11,7 @@
#include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
#include <algorithm>
#include <functional>
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
@ -22,15 +23,40 @@
namespace webrtc {
namespace {
struct AudioSourceWithFrame {
AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m)
: audio_source(p), audio_frame(a), muted(m) {}
MixerAudioSource* audio_source;
AudioFrame* audio_frame;
bool muted;
};
class SourceFrame {
public:
SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before)
: audio_source_(p),
audio_frame_(a),
muted_(m),
was_mixed_before_(was_mixed_before) {
if (!muted_) {
energy_ = CalculateEnergy(*a);
}
}
typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList;
// a.shouldMixBefore(b) is used to select mixer participants.
bool shouldMixBefore(const SourceFrame& other) const {
if (muted_ != other.muted_) {
return other.muted_;
}
auto our_activity = audio_frame_->vad_activity_;
auto other_activity = other.audio_frame_->vad_activity_;
if (our_activity != other_activity) {
return our_activity == AudioFrame::kVadActive;
}
return energy_ > other.energy_;
}
MixerAudioSource* audio_source_;
AudioFrame* audio_frame_;
bool muted_;
uint32_t energy_;
bool was_mixed_before_;
};
// Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
// These effects are applied to |frame| itself prior to mixing. Assumes that
@ -167,7 +193,6 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
RTC_DCHECK(thread_checker_.CalledOnValidThread());
AudioFrameList mixList;
AudioFrameList rampOutList;
AudioFrameList additionalFramesList;
std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
{
@ -214,20 +239,17 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
}
}
UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap,
&remainingAudioSourcesAllowedToMix);
mixList = UpdateToMix(remainingAudioSourcesAllowedToMix);
remainingAudioSourcesAllowedToMix -= mixList.size();
GetAdditionalAudio(&additionalFramesList);
UpdateMixedStatus(mixedAudioSourcesMap);
}
// TODO(aleloi): it might be better to decide the number of channels
// with an API instead of dynamically.
// Find the max channels over all mixing lists.
const size_t num_mixed_channels = std::max(
MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
MaxNumChannels(&rampOutList)));
const size_t num_mixed_channels =
std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList));
audio_frame_for_mixing->UpdateFrame(
-1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech,
@ -245,7 +267,6 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
{
CriticalSectionScoped cs(_crit.get());
MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList);
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
@ -256,10 +277,6 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
LimitMixedAudio(audio_frame_for_mixing);
}
}
ClearAudioFrameList(&mixList);
ClearAudioFrameList(&rampOutList);
ClearAudioFrameList(&additionalFramesList);
return;
}
@ -426,177 +443,62 @@ int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
return highestFreq;
}
void NewAudioConferenceMixerImpl::UpdateToMix(
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)",
*maxAudioFrameCounter);
const size_t mixListStartSize = mixList->size();
AudioFrameList activeList;
// Struct needed by the passive lists to keep track of which AudioFrame
// belongs to which MixerAudioSource.
AudioSourceWithFrameList passiveWasNotMixedList;
AudioSourceWithFrameList passiveWasMixedList;
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
// Stop keeping track of passive audioSources if there are already
// enough audio sources available (they wont be mixed anyway).
bool mustAddToPassiveList =
(*maxAudioFrameCounter >
(activeList.size() + passiveWasMixedList.size() +
passiveWasNotMixedList.size()));
AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
size_t maxAudioFrameCounter) const {
AudioFrameList result;
std::vector<SourceFrame> audioSourceMixingDataList;
bool wasMixed = false;
wasMixed = (*audio_source)->_mixHistory->WasMixed();
// Get audio source audio and put it in the struct vector.
for (MixerAudioSource* audio_source : audio_source_list_) {
auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
_id, static_cast<int>(_outputFrequency));
auto audio_frame_with_info =
(*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
auto audio_frame_info = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from participant");
continue;
}
const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
if (audio_source_list_.size() != 1) {
// TODO(wu): Issue 3390, add support for multiple audio sources case.
audio_frame->ntp_time_ms_ = -1;
audioSourceMixingDataList.emplace_back(
audio_source, audio_source_audio_frame,
audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
audio_source->_mixHistory->WasMixed());
}
// Sort frames by sorting function.
std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(),
std::mem_fn(&SourceFrame::shouldMixBefore));
// Go through list in order and put things in mixList.
for (SourceFrame& p : audioSourceMixingDataList) {
// Filter muted.
if (p.muted_) {
p.audio_source_->_mixHistory->SetIsMixed(false);
continue;
}
// TODO(aleloi): this assert triggers in some test cases where SRTP is
// used which prevents NetEQ from making a VAD. Temporarily disable this
// assert until the problem is fixed on a higher level.
// RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown);
if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"invalid VAD state from audio source");
}
if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
if (!wasMixed && !muted) {
RampIn(*audio_frame);
}
if (activeList.size() >= *maxAudioFrameCounter) {
// There are already more active audio sources than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
bool found_replace_item = false;
for (AudioFrameList::iterator iter = activeList.begin();
iter != activeList.end(); ++iter) {
const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
if (energy < lowestEnergy) {
replaceItem = iter;
lowestEnergy = energy;
found_replace_item = true;
}
}
if (found_replace_item) {
RTC_DCHECK(!muted); // Cannot replace with a muted frame.
FrameAndMuteInfo replaceFrame = *replaceItem;
bool replaceWasMixed = false;
std::map<int, MixerAudioSource*>::const_iterator it =
mixAudioSourceList->find(replaceFrame.frame->id_);
// When a frame is pushed to |activeList| it is also pushed
// to mixAudioSourceList with the frame's id. This means
// that the Find call above should never fail.
RTC_DCHECK(it != mixAudioSourceList->end());
replaceWasMixed = it->second->_mixHistory->WasMixed();
mixAudioSourceList->erase(replaceFrame.frame->id_);
activeList.erase(replaceItem);
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
if (replaceWasMixed) {
if (!replaceFrame.muted) {
RampOut(*replaceFrame.frame);
}
rampOutList->push_back(replaceFrame);
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
if (!muted) {
RampOut(*audio_frame);
}
rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
}
} else {
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasMixedList.push_back(part_struct);
} else if (mustAddToPassiveList) {
if (!muted) {
RampIn(*audio_frame);
}
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasNotMixedList.push_back(part_struct);
// Add frame to result vector for mixing.
bool is_mixed = false;
if (maxAudioFrameCounter > 0) {
--maxAudioFrameCounter;
if (!p.was_mixed_before_) {
RampIn(*p.audio_frame_);
}
result.emplace_back(p.audio_frame_, false);
is_mixed = true;
}
}
RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
// At this point it is known which audio sources should be mixed. Transfer
// this information to this functions output parameters.
for (AudioFrameList::const_iterator iter = activeList.begin();
iter != activeList.end(); ++iter) {
mixList->push_back(*iter);
}
activeList.clear();
// Always mix a constant number of AudioFrames. If there aren't enough
// active audio sources mix passive ones. Starting with those that was mixed
// last iteration.
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasMixedList.begin();
iter != passiveWasMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
// Ramp out unmuted.
if (p.was_mixed_before_ && !is_mixed) {
RampOut(*p.audio_frame_);
result.emplace_back(p.audio_frame_, false);
}
delete *iter;
p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
}
// And finally the ones that have not been mixed for a while.
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasNotMixedList.begin();
iter != passiveWasNotMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
delete *iter;
}
RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
*maxAudioFrameCounter += mixListStartSize - mixList->size();
return result;
}
void NewAudioConferenceMixerImpl::GetAdditionalAudio(
@ -633,38 +535,6 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
}
}
void NewAudioConferenceMixerImpl::UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateMixedStatus(mixedAudioSourcesMap)");
RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
// Loop through all audio_sources. If they are in the mix map they
// were mixed.
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
bool isMixed = false;
for (std::map<int, MixerAudioSource*>::const_iterator it =
mixedAudioSourcesMap.begin();
it != mixedAudioSourcesMap.end(); ++it) {
if (it->second == *audio_source) {
isMixed = true;
break;
}
}
(*audio_source)->_mixHistory->SetIsMixed(isMixed);
}
}
void NewAudioConferenceMixerImpl::ClearAudioFrameList(
AudioFrameList* audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"ClearAudioFrameList(audioFrameList)");
audioFrameList->clear();
}
bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
const MixerAudioSource& audio_source,
const MixerAudioSourceList& audioSourceList) const {

View File

@ -84,17 +84,10 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
// Fills mixList with the AudioFrames pointers that should be used when
// mixing.
// maxAudioFrameCounter both input and output specifies how many more
// AudioFrames that are allowed to be mixed.
// rampOutList contain AudioFrames corresponding to an audio stream that
// used to be mixed but shouldn't be mixed any longer. These AudioFrames
// should be ramped out over this AudioFrame to avoid audio discontinuities.
void UpdateToMix(AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const;
// Compute what audio sources to mix from audio_source_list_. Ramp in
// and out. Update mixed status. maxAudioFrameCounter specifies how
// many participants are allowed to be mixed.
AudioFrameList UpdateToMix(size_t maxAudioFrameCounter) const;
// Return the lowest mixing frequency that can be used without having to
// downsample any audio.
@ -105,11 +98,6 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
// Return the AudioFrames that should be mixed anonymously.
void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
// Update the NewMixHistory of all MixerAudioSources. mixedAudioSourcesList
// should contain a map of MixerAudioSources that have been mixed.
void UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedAudioSourcesList) const;
// Clears audioFrameList and reclaims all memory associated with it.
void ClearAudioFrameList(AudioFrameList* audioFrameList) const;

View File

@ -9,9 +9,12 @@
*/
#include <memory>
#include <utility>
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_mixer/audio_mixer.h"
#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
@ -26,30 +29,154 @@ using testing::Return;
using webrtc::voe::AudioMixer;
namespace webrtc {
class MockAudioMixerParticipant : public MixerParticipant {
public:
MockAudioMixerParticipant()
: fake_audio_frame_info_(MixerParticipant::AudioFrameInfo::kNormal) {
ON_CALL(*this, GetAudioFrameWithMuted(_, _))
.WillByDefault(
Invoke(this, &MockAudioMixerParticipant::FakeAudioFrameWithMuted));
}
MOCK_METHOD2(GetAudioFrameWithMuted,
AudioFrameInfo(const int32_t id, AudioFrame* audio_frame));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
AudioFrameInfo fake_info() { return this->fake_audio_frame_info_; }
void set_fake_info(const AudioFrameInfo audio_frame_info) {
fake_audio_frame_info_ = audio_frame_info;
}
private:
AudioFrame fake_frame_;
AudioFrameInfo fake_audio_frame_info_;
AudioFrameInfo FakeAudioFrameWithMuted(const int32_t id,
AudioFrame* audio_frame) {
audio_frame->CopyFrom(*fake_frame());
return fake_info();
}
};
class MockMixerAudioSource : public MixerAudioSource {
public:
MockMixerAudioSource() {
MockMixerAudioSource()
: fake_audio_frame_info_(MixerAudioSource::AudioFrameInfo::kNormal) {
ON_CALL(*this, GetAudioFrameWithMuted(_, _))
.WillByDefault(
Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted));
}
MOCK_METHOD2(GetAudioFrameWithMuted,
AudioFrameWithInfo(const int32_t id, int sample_rate_hz));
AudioFrameWithMuted(const int32_t id, int sample_rate_hz));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
AudioFrameInfo fake_info() { return fake_audio_frame_info_; }
void set_fake_info(const AudioFrameInfo audio_frame_info) {
fake_audio_frame_info_ = audio_frame_info;
}
private:
AudioFrame fake_frame_;
AudioFrameWithInfo FakeAudioFrameWithMuted(const int32_t id,
int sample_rate_hz) {
AudioFrameInfo fake_audio_frame_info_;
AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id,
int sample_rate_hz) {
return {
fake_frame(), // audio_frame_pointer
AudioFrameInfo::kNormal, // audio_frame_info
fake_frame(), // audio_frame_pointer
fake_info(), // audio_frame_info
};
}
};
// Keeps two identical sets of participants and two mixers to test
// that the same participants are chosen for mixing.
class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver {
protected:
constexpr static int kId = 1;
constexpr static int kSampleRateHz = 32000;
CompareWithOldMixerTest()
: old_mixer_(AudioConferenceMixer::Create(kId)),
new_mixer_(NewAudioConferenceMixer::Create(kId)) {}
~CompareWithOldMixerTest() { Reset(); }
// Mixes with both mixers and compares results: resulting frames and
// mix statuses.
void MixAndCompare() {
old_mixer_->Process();
new_mixer_->Mix(&new_mixer_frame_);
EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_,
sizeof(old_mixer_frame_.data_)));
for (auto& participant_pair : participants_) {
EXPECT_EQ(participant_pair.first->IsMixed(),
participant_pair.second->IsMixed());
}
}
std::unique_ptr<AudioFrame> last_mixed_audio_old() {
std::unique_ptr<AudioFrame> result(new AudioFrame);
result->CopyFrom(old_mixer_frame_);
return result;
}
void Reset() {
old_mixer_.reset(AudioConferenceMixer::Create(kId));
new_mixer_.reset(NewAudioConferenceMixer::Create(kId));
for (auto& participant_pair : participants_) {
delete participant_pair.first;
delete participant_pair.second;
}
participants_.clear();
}
void ResetFrame(AudioFrame* audio_frame) {
audio_frame->sample_rate_hz_ = kSampleRateHz;
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
audio_frame->vad_activity_ = AudioFrame::kVadActive;
audio_frame->num_channels_ = 1;
}
void AddParticipant(AudioFrame* audio_frame,
MixerParticipant::AudioFrameInfo audio_frame_info) {
auto old_participant = new MockAudioMixerParticipant;
auto new_participant = new MockMixerAudioSource;
old_participant->fake_frame()->CopyFrom(*audio_frame);
new_participant->fake_frame()->CopyFrom(*audio_frame);
old_participant->set_fake_info(audio_frame_info);
MixerAudioSource::AudioFrameInfo new_audio_frame_info;
switch (audio_frame_info) {
case MixerParticipant::AudioFrameInfo::kNormal:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kNormal;
break;
case MixerParticipant::AudioFrameInfo::kMuted:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kMuted;
break;
default:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kError;
}
new_participant->set_fake_info(new_audio_frame_info);
participants_.emplace_back(old_participant, new_participant);
}
void NewMixedAudio(const int32_t id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
const uint32_t size) override {
old_mixer_frame_.CopyFrom(generalAudioFrame);
}
AudioFrame old_mixer_frame_;
AudioFrame new_mixer_frame_;
std::vector<std::pair<MockAudioMixerParticipant*, MockMixerAudioSource*>>
participants_;
std::unique_ptr<AudioConferenceMixer> old_mixer_;
std::unique_ptr<NewAudioConferenceMixer> new_mixer_;
};
class BothMixersTest : public testing::Test {
protected:
BothMixersTest() {
@ -246,4 +373,71 @@ TEST_F(BothMixersTest, CompareSecondFrameAudio) {
sizeof(mixing_round_frame.data_)));
}
TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) {
Reset();
AudioFrame first_frame, second_frame;
ResetFrame(&first_frame);
ResetFrame(&second_frame);
first_frame.id_ = 1;
second_frame.id_ = 2;
AddParticipant(&first_frame, MixerParticipant::AudioFrameInfo::kNormal);
AddParticipant(&second_frame, MixerParticipant::AudioFrameInfo::kNormal);
for (int i = 0; i < 3; ++i) {
MixAndCompare();
}
}
TEST_F(CompareWithOldMixerTest, ThreeParticipantsDifferentFrames) {
Reset();
AudioFrame first_frame, second_frame, third_frame;
ResetFrame(&first_frame);
ResetFrame(&second_frame);
ResetFrame(&third_frame);
first_frame.id_ = 1;
second_frame.id_ = 2;
third_frame.id_ = 3;
second_frame.vad_activity_ = AudioFrame::kVadPassive;
AddParticipant(&first_frame, MixerParticipant::AudioFrameInfo::kNormal);
AddParticipant(&second_frame, MixerParticipant::AudioFrameInfo::kMuted);
AddParticipant(&third_frame, MixerParticipant::AudioFrameInfo::kMuted);
for (int i = 0; i < 3; ++i) {
MixAndCompare();
}
}
TEST_F(CompareWithOldMixerTest, ManyParticipantsDifferentFrames) {
Reset();
constexpr int num_participants = 20;
AudioFrame audio_frames[num_participants];
for (int i = 0; i < num_participants; ++i) {
ResetFrame(&audio_frames[i]);
audio_frames[i].id_ = 1;
audio_frames[i].data_[10] = 100 * (i % 5);
audio_frames[i].data_[100] = 100 * (i % 5);
if (i % 2 == 0) {
audio_frames[i].vad_activity_ = AudioFrame::kVadPassive;
}
}
for (int i = 0; i < num_participants; ++i) {
if (i % 2 == 0) {
AddParticipant(&audio_frames[i],
MixerParticipant::AudioFrameInfo::kMuted);
} else {
AddParticipant(&audio_frames[i],
MixerParticipant::AudioFrameInfo::kNormal);
}
MixAndCompare();
}
}
} // namespace webrtc