Removed callback in old AudioConferenceMixer.

OutputMixer and AudioConferenceMixer communicated via a callback. OutputMixer implemented an AudioMixerOutputReceiver interface, which defines the callback function NewMixedAudio. This has been removed and replaced by a simple function in the new mixer. The audio frame with mixed audio is now copied one time less. I have also removed one forward declaration.

Review-Url: https://codereview.webrtc.org/2111293003
Cr-Commit-Position: refs/heads/master@{#13550}
This commit is contained in:
aleloi
2016-07-28 03:52:15 -07:00
committed by Commit bot
parent e6b60a4368
commit 09f45108c2
9 changed files with 510 additions and 326 deletions

View File

@ -20,6 +20,7 @@ group("modules") {
"audio_coding",
"audio_conference_mixer",
"audio_device",
"audio_mixer",
"audio_processing",
"bitrate_controller",
"desktop_capture",
@ -104,6 +105,7 @@ if (rtc_include_tests) {
"audio_coding/neteq/tools/packet_unittest.cc",
"audio_conference_mixer/test/audio_conference_mixer_unittest.cc",
"audio_device/fine_audio_buffer_unittest.cc",
"audio_mixer/test/audio_mixer_unittest.cc",
"audio_processing/aec/echo_cancellation_unittest.cc",
"audio_processing/aec/system_delay_unittest.cc",
"audio_processing/agc/agc_manager_direct_unittest.cc",
@ -380,6 +382,7 @@ if (rtc_include_tests) {
"audio_coding:webrtc_opus",
"audio_conference_mixer",
"audio_device",
"audio_mixer",
"audio_processing",
"audio_processing:audioproc_test_utils",
"bitrate_controller",

View File

@ -22,17 +22,6 @@
namespace webrtc {
namespace voe {
void AudioMixer::NewMixedAudio(int32_t id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
uint32_t size) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::NewMixedAudio(id=%d, size=%u)", id, size);
_audioFrame.CopyFrom(generalAudioFrame);
_audioFrame.id_ = id;
}
void AudioMixer::PlayNotification(int32_t id, uint32_t durationMs) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::PlayNotification(id=%d, durationMs=%d)", id,
@ -58,7 +47,7 @@ void AudioMixer::PlayFileEnded(int32_t id) {
void AudioMixer::RecordFileEnded(int32_t id) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::RecordFileEnded(id=%d)", id);
assert(id == _instanceId);
RTC_DCHECK_EQ(id, _instanceId);
rtc::CritScope cs(&_fileCritSect);
_outputFileRecording = false;
@ -93,12 +82,6 @@ AudioMixer::AudioMixer(uint32_t instanceId)
_outputFileRecording(false) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::AudioMixer() - ctor");
if (_mixerModule.RegisterMixedStreamCallback(this) == -1) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::AudioMixer() failed to register mixer"
"callbacks");
}
}
void AudioMixer::Destroy(AudioMixer*& mixer) {
@ -123,7 +106,6 @@ AudioMixer::~AudioMixer() {
_outputFileRecorderPtr = NULL;
}
}
_mixerModule.UnRegisterMixedStreamCallback();
delete &_mixerModule;
}
@ -167,18 +149,18 @@ int AudioMixer::DeRegisterExternalMediaProcessing() {
return 0;
}
int32_t AudioMixer::SetMixabilityStatus(MixerAudioSource& participant,
int32_t AudioMixer::SetMixabilityStatus(MixerAudioSource& audio_source,
bool mixable) {
return _mixerModule.SetMixabilityStatus(&participant, mixable);
return _mixerModule.SetMixabilityStatus(&audio_source, mixable);
}
int32_t AudioMixer::SetAnonymousMixabilityStatus(MixerAudioSource& participant,
int32_t AudioMixer::SetAnonymousMixabilityStatus(MixerAudioSource& audio_source,
bool mixable) {
return _mixerModule.SetAnonymousMixabilityStatus(&participant, mixable);
return _mixerModule.SetAnonymousMixabilityStatus(&audio_source, mixable);
}
int32_t AudioMixer::MixActiveChannels() {
_mixerModule.Process();
_mixerModule.Mix(&_audioFrame);
return 0;
}
@ -414,7 +396,7 @@ int32_t AudioMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm) {
// Pure stereo mode (we are receiving a stereo signal).
}
assert(_audioFrame.num_channels_ == 2);
RTC_DCHECK_EQ(_audioFrame.num_channels_, static_cast<size_t>(2));
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
}

View File

@ -32,7 +32,7 @@ class Statistics;
// Note: this class is in the process of being rewritten and merged
// with AudioConferenceMixer. Expect inheritance chains to be changed,
// member functions removed or renamed.
class AudioMixer : public OldAudioMixerOutputReceiver, public FileCallback {
class AudioMixer : public FileCallback {
public:
static int32_t Create(AudioMixer*& mixer, uint32_t instanceId); // NOLINT
@ -52,11 +52,12 @@ class AudioMixer : public OldAudioMixerOutputReceiver, public FileCallback {
int32_t DoOperationsOnCombinedSignal(bool feed_data_to_apm);
int32_t SetMixabilityStatus(MixerAudioSource& participant, // NOLINT
int32_t SetMixabilityStatus(MixerAudioSource& audio_source, // NOLINT
bool mixable);
int32_t SetAnonymousMixabilityStatus(MixerAudioSource& participant, // NOLINT
bool mixable);
int32_t SetAnonymousMixabilityStatus(
MixerAudioSource& audio_source, // NOLINT
bool mixable);
int GetMixedAudio(int sample_rate_hz,
size_t num_channels,
@ -79,12 +80,6 @@ class AudioMixer : public OldAudioMixerOutputReceiver, public FileCallback {
virtual ~AudioMixer();
// from AudioMixerOutputReceiver
virtual void NewMixedAudio(int32_t id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
uint32_t size);
// For file recording
void PlayNotification(int32_t id, uint32_t durationMs);

View File

@ -64,21 +64,6 @@ class MixerAudioSource {
MixerAudioSource();
virtual ~MixerAudioSource();
};
class OldAudioMixerOutputReceiver {
public:
// This callback function provides the mixed audio for this mix iteration.
// Note that uniqueAudioFrames is an array of AudioFrame pointers with the
// size according to the size parameter.
virtual void NewMixedAudio(const int32_t id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
const uint32_t size) = 0;
protected:
OldAudioMixerOutputReceiver() {}
virtual ~OldAudioMixerOutputReceiver() {}
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_AUDIO_MIXER_DEFINES_H_

View File

@ -16,13 +16,11 @@
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
class OldAudioMixerOutputReceiver;
class MixerAudioSource;
class Trace;
class NewAudioConferenceMixer : public Module {
public:
enum { kMaximumAmountOfMixedParticipants = 3 };
enum { kMaximumAmountOfMixedAudioSources = 3 };
enum Frequency {
kNbInHz = 8000,
kWbInHz = 16000,
@ -40,32 +38,32 @@ class NewAudioConferenceMixer : public Module {
int64_t TimeUntilNextProcess() override = 0;
void Process() override = 0;
// Register/unregister a callback class for receiving the mixed audio.
virtual int32_t RegisterMixedStreamCallback(
OldAudioMixerOutputReceiver* receiver) = 0;
virtual int32_t UnRegisterMixedStreamCallback() = 0;
// Add/remove participants as candidates for mixing.
virtual int32_t SetMixabilityStatus(MixerAudioSource* participant,
// Add/remove audio sources as candidates for mixing.
virtual int32_t SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// Returns true if a participant is a candidate for mixing.
virtual bool MixabilityStatus(const MixerAudioSource& participant) const = 0;
// Returns true if an audio source is a candidate for mixing.
virtual bool MixabilityStatus(const MixerAudioSource& audio_source) const = 0;
// Inform the mixer that the participant should always be mixed and not
// count toward the number of mixed participants. Note that a participant
// Inform the mixer that the audio source should always be mixed and not
// count toward the number of mixed audio sources. Note that an audio source
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
virtual int32_t SetAnonymousMixabilityStatus(MixerAudioSource* participant,
virtual int32_t SetAnonymousMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// Returns true if the participant is mixed anonymously.
virtual bool AnonymousMixabilityStatus(
const MixerAudioSource& participant) const = 0;
// Performs mixing by asking registered audio sources for audio.
// The mixed result is placed in the provided AudioFrame.
virtual void Mix(AudioFrame* audio_frame_for_mixing) = 0;
// Set the minimum sampling frequency at which to mix. The mixing algorithm
// may still choose to mix at a higher samling frequency to avoid
// downsampling of audio contributing to the mixed audio.
virtual int32_t SetMinimumMixingFrequency(Frequency freq) = 0;
// Returns true if the audio source is mixed anonymously.
virtual bool AnonymousMixabilityStatus(
const MixerAudioSource& audio_source) const = 0;
protected:
NewAudioConferenceMixer() {}
};

View File

@ -22,15 +22,15 @@
namespace webrtc {
namespace {
struct ParticipantFrameStruct {
ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m)
: participant(p), audioFrame(a), muted(m) {}
MixerAudioSource* participant;
AudioFrame* audioFrame;
struct AudioSourceWithFrame {
AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m)
: audio_source(p), audio_frame(a), muted(m) {}
MixerAudioSource* audio_source;
AudioFrame* audio_frame;
bool muted;
};
typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList;
// Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
// These effects are applied to |frame| itself prior to mixing. Assumes that
@ -39,7 +39,7 @@ typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
//
// TODO(andrew): consider not modifying |frame| here.
void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
assert(mixed_frame->num_channels_ >= frame->num_channels_);
RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_);
if (use_limiter) {
// Divide by two to avoid saturation in the mixing.
// This is only meaningful if the limiter will be used.
@ -47,7 +47,8 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
}
if (mixed_frame->num_channels_ > frame->num_channels_) {
// We only support mono-to-stereo.
assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1);
RTC_DCHECK_EQ(mixed_frame->num_channels_, static_cast<size_t>(2));
RTC_DCHECK_EQ(frame->num_channels_, static_cast<size_t>(1));
AudioFrameOperations::MonoToStereo(frame);
}
@ -111,13 +112,12 @@ NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) {
NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
: _id(id),
_minimumMixingFreq(kLowestPossible),
_mixReceiver(NULL),
_outputFrequency(kDefaultFrequency),
_sampleSize(0),
_audioFramePool(NULL),
_participantList(),
_additionalParticipantList(),
_numMixedParticipants(0),
audio_source_list_(),
additional_audio_source_list_(),
num_mixed_audio_sources_(0),
use_limiter_(true),
_timeStamp(0),
_timeScheduler(kProcessPeriodicityInMs),
@ -171,7 +171,7 @@ bool NewAudioConferenceMixerImpl::Init() {
NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
assert(_audioFramePool == NULL);
RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr));
}
// Process should be called every kProcessPeriodicityInMs ms
@ -182,17 +182,22 @@ int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"failed in TimeToNextUpdate() call");
// Sanity check
assert(false);
RTC_NOTREACHED();
return -1;
}
return timeUntilNextProcess;
}
void NewAudioConferenceMixerImpl::Process() {
size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants;
// TODO(aleloi) Remove this method.
RTC_NOTREACHED();
}
void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
{
CriticalSectionScoped cs(_crit.get());
assert(_processCalls == 0);
RTC_DCHECK_EQ(_processCalls, 0);
_processCalls++;
// Let the scheduler know that we are running one iteration.
@ -202,7 +207,7 @@ void NewAudioConferenceMixerImpl::Process() {
AudioFrameList mixList;
AudioFrameList rampOutList;
AudioFrameList additionalFramesList;
std::map<int, MixerAudioSource*> mixedParticipantsMap;
std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
{
CriticalSectionScoped cs(_cbCrit.get());
@ -210,7 +215,7 @@ void NewAudioConferenceMixerImpl::Process() {
// SILK can run in 12 kHz and 24 kHz. These frequencies are not
// supported so use the closest higher frequency to not lose any
// information.
// TODO(henrike): this is probably more appropriate to do in
// TODO(aleloi): this is probably more appropriate to do in
// GetLowestMixingFrequency().
if (lowFreq == 12000) {
lowFreq = 16000;
@ -244,7 +249,7 @@ void NewAudioConferenceMixerImpl::Process() {
}
break;
default:
assert(false);
RTC_NOTREACHED();
CriticalSectionScoped cs(_crit.get());
_processCalls--;
@ -252,69 +257,49 @@ void NewAudioConferenceMixerImpl::Process() {
}
}
UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
&remainingParticipantsAllowedToMix);
UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap,
&remainingAudioSourcesAllowedToMix);
GetAdditionalAudio(&additionalFramesList);
UpdateMixedStatus(mixedParticipantsMap);
UpdateMixedStatus(mixedAudioSourcesMap);
}
// Get an AudioFrame for mixing from the memory pool.
AudioFrame* mixedAudio = NULL;
if (_audioFramePool->PopMemory(mixedAudio) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
"failed PopMemory() call");
assert(false);
return;
}
// TODO(aleloi): it might be better to decide the number of channels
// with an API instead of dynamically.
// Find the max channels over all mixing lists.
const size_t num_mixed_channels = std::max(
MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
MaxNumChannels(&rampOutList)));
audio_frame_for_mixing->UpdateFrame(
-1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, num_mixed_channels);
_timeStamp += static_cast<uint32_t>(_sampleSize);
use_limiter_ = num_mixed_audio_sources_ > 1 &&
_outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
// We only use the limiter if it supports the output sample rate and
// we're actually mixing multiple streams.
MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_);
{
CriticalSectionScoped cs(_crit.get());
MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList);
// TODO(henrike): it might be better to decide the number of channels
// with an API instead of dynamically.
// Find the max channels over all mixing lists.
const size_t num_mixed_channels =
std::max(MaxNumChannels(&mixList),
std::max(MaxNumChannels(&additionalFramesList),
MaxNumChannels(&rampOutList)));
mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
AudioFrame::kNormalSpeech, AudioFrame::kVadPassive,
num_mixed_channels);
_timeStamp += static_cast<uint32_t>(_sampleSize);
// We only use the limiter if it supports the output sample rate and
// we're actually mixing multiple streams.
use_limiter_ = _numMixedParticipants > 1 &&
_outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
MixFromList(mixedAudio, mixList);
MixAnonomouslyFromList(mixedAudio, additionalFramesList);
MixAnonomouslyFromList(mixedAudio, rampOutList);
if (mixedAudio->samples_per_channel_ == 0) {
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
mixedAudio->samples_per_channel_ = _sampleSize;
mixedAudio->Mute();
audio_frame_for_mixing->samples_per_channel_ = _sampleSize;
audio_frame_for_mixing->Mute();
} else {
// Only call the limiter if we have something to mix.
LimitMixedAudio(mixedAudio);
LimitMixedAudio(audio_frame_for_mixing);
}
}
{
CriticalSectionScoped cs(_cbCrit.get());
if (_mixReceiver != NULL) {
const AudioFrame** dummy = NULL;
_mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0);
}
}
// Reclaim all outstanding memory.
_audioFramePool->PushMemory(mixedAudio);
ClearAudioFrameList(&mixList);
ClearAudioFrameList(&rampOutList);
ClearAudioFrameList(&additionalFramesList);
@ -325,25 +310,6 @@ void NewAudioConferenceMixerImpl::Process() {
return;
}
int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback(
OldAudioMixerOutputReceiver* mixReceiver) {
CriticalSectionScoped cs(_cbCrit.get());
if (_mixReceiver != NULL) {
return -1;
}
_mixReceiver = mixReceiver;
return 0;
}
int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
CriticalSectionScoped cs(_cbCrit.get());
if (_mixReceiver == NULL) {
return -1;
}
_mixReceiver = NULL;
return 0;
}
int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
const Frequency& frequency) {
CriticalSectionScoped cs(_crit.get());
@ -362,17 +328,17 @@ NewAudioConferenceMixerImpl::OutputFrequency() const {
}
int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
MixerAudioSource* participant,
MixerAudioSource* audio_source,
bool mixable) {
if (!mixable) {
// Anonymous participants are in a separate list. Make sure that the
// participant is in the _participantList if it is being mixed.
SetAnonymousMixabilityStatus(participant, false);
// Anonymous audio sources are in a separate list. Make sure that the
// audio source is in the _audioSourceList if it is being mixed.
SetAnonymousMixabilityStatus(audio_source, false);
}
size_t numMixedParticipants;
size_t numMixedAudioSources;
{
CriticalSectionScoped cs(_cbCrit.get());
const bool isMixed = IsParticipantInList(*participant, _participantList);
const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
// API must be called with a new state.
if (!(mixable ^ isMixed)) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
@ -381,75 +347,77 @@ int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
}
bool success = false;
if (mixable) {
success = AddParticipantToList(participant, &_participantList);
success = AddAudioSourceToList(audio_source, &audio_source_list_);
} else {
success = RemoveParticipantFromList(participant, &_participantList);
success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
}
if (!success) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"failed to %s participant", mixable ? "add" : "remove");
assert(false);
"failed to %s audio_source", mixable ? "add" : "remove");
RTC_NOTREACHED();
return -1;
}
size_t numMixedNonAnonymous = _participantList.size();
if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
size_t numMixedNonAnonymous = audio_source_list_.size();
if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
}
numMixedParticipants =
numMixedNonAnonymous + _additionalParticipantList.size();
numMixedAudioSources =
numMixedNonAnonymous + additional_audio_source_list_.size();
}
// A MixerAudioSource was added or removed. Make sure the scratch
// buffer is updated if necessary.
// Note: The scratch buffer may only be updated in Process().
CriticalSectionScoped cs(_crit.get());
_numMixedParticipants = numMixedParticipants;
num_mixed_audio_sources_ = numMixedAudioSources;
return 0;
}
bool NewAudioConferenceMixerImpl::MixabilityStatus(
const MixerAudioSource& participant) const {
const MixerAudioSource& audio_source) const {
CriticalSectionScoped cs(_cbCrit.get());
return IsParticipantInList(participant, _participantList);
return IsAudioSourceInList(audio_source, audio_source_list_);
}
int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
MixerAudioSource* participant,
MixerAudioSource* audio_source,
bool anonymous) {
CriticalSectionScoped cs(_cbCrit.get());
if (IsParticipantInList(*participant, _additionalParticipantList)) {
if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
if (anonymous) {
return 0;
}
if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) {
if (!RemoveAudioSourceFromList(audio_source,
&additional_audio_source_list_)) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"unable to remove participant from anonymous list");
assert(false);
"unable to remove audio_source from anonymous list");
RTC_NOTREACHED();
return -1;
}
return AddParticipantToList(participant, &_participantList) ? 0 : -1;
return AddAudioSourceToList(audio_source, &audio_source_list_) ? 0 : -1;
}
if (!anonymous) {
return 0;
}
const bool mixable =
RemoveParticipantFromList(participant, &_participantList);
RemoveAudioSourceFromList(audio_source, &audio_source_list_);
if (!mixable) {
WEBRTC_TRACE(
kTraceWarning, kTraceAudioMixerServer, _id,
"participant must be registered before turning it into anonymous");
"audio_source must be registered before turning it into anonymous");
// Setting anonymous status is only possible if MixerAudioSource is
// already registered.
return -1;
}
return AddParticipantToList(participant, &_additionalParticipantList) ? 0
: -1;
return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
? 0
: -1;
}
bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus(
const MixerAudioSource& participant) const {
const MixerAudioSource& audio_source) const {
CriticalSectionScoped cs(_cbCrit.get());
return IsParticipantInList(participant, _additionalParticipantList);
return IsAudioSourceInList(audio_source, additional_audio_source_list_);
}
int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
@ -468,7 +436,7 @@ int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
} else {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"SetMinimumMixingFrequency incorrect frequency: %i", freq);
assert(false);
RTC_NOTREACHED();
return -1;
}
}
@ -476,12 +444,12 @@ int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
// Check all AudioFrames that are to be mixed. The highest sampling frequency
// found is the lowest that can be used without losing information.
int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const {
const int participantListFrequency =
GetLowestMixingFrequencyFromList(_participantList);
const int audioSourceListFrequency =
GetLowestMixingFrequencyFromList(audio_source_list_);
const int anonymousListFrequency =
GetLowestMixingFrequencyFromList(_additionalParticipantList);
const int highestFreq = (participantListFrequency > anonymousListFrequency)
? participantListFrequency
GetLowestMixingFrequencyFromList(additional_audio_source_list_);
const int highestFreq = (audioSourceListFrequency > anonymousListFrequency)
? audioSourceListFrequency
: anonymousListFrequency;
// Check if the user specified a lowest mixing frequency.
if (_minimumMixingFreq != kLowestPossible) {
@ -508,58 +476,58 @@ int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
void NewAudioConferenceMixerImpl::UpdateToMix(
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixParticipantList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
"UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)",
*maxAudioFrameCounter);
const size_t mixListStartSize = mixList->size();
AudioFrameList activeList;
// Struct needed by the passive lists to keep track of which AudioFrame
// belongs to which MixerAudioSource.
ParticipantFrameStructList passiveWasNotMixedList;
ParticipantFrameStructList passiveWasMixedList;
for (MixerAudioSourceList::const_iterator participant =
_participantList.begin();
participant != _participantList.end(); ++participant) {
// Stop keeping track of passive participants if there are already
// enough participants available (they wont be mixed anyway).
AudioSourceWithFrameList passiveWasNotMixedList;
AudioSourceWithFrameList passiveWasMixedList;
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
// Stop keeping track of passive audioSources if there are already
// enough audio sources available (they wont be mixed anyway).
bool mustAddToPassiveList =
(*maxAudioFrameCounter >
(activeList.size() + passiveWasMixedList.size() +
passiveWasNotMixedList.size()));
bool wasMixed = false;
wasMixed = (*participant)->_mixHistory->WasMixed();
wasMixed = (*audio_source)->_mixHistory->WasMixed();
AudioFrame* audioFrame = NULL;
if (_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
"failed PopMemory() call");
assert(false);
RTC_NOTREACHED();
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from participant");
"failed to GetAudioFrameWithMuted() from audio source");
_audioFramePool->PushMemory(audioFrame);
continue;
}
const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
if (_participantList.size() != 1) {
// TODO(wu): Issue 3390, add support for multiple participants case.
if (audio_source_list_.size() != 1) {
// TODO(wu): Issue 3390, add support for multiple audio sources case.
audioFrame->ntp_time_ms_ = -1;
}
// TODO(henrike): this assert triggers in some test cases where SRTP is
// TODO(aleloi): this assert triggers in some test cases where SRTP is
// used which prevents NetEQ from making a VAD. Temporarily disable this
// assert until the problem is fixed on a higher level.
// assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
// RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown);
if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"invalid VAD state from participant");
"invalid VAD state from audio source");
}
if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
@ -568,7 +536,7 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
}
if (activeList.size() >= *maxAudioFrameCounter) {
// There are already more active participants than should be
// There are already more active audio sources than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
@ -589,28 +557,30 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
bool replaceWasMixed = false;
std::map<int, MixerAudioSource*>::const_iterator it =
mixParticipantList->find(replaceFrame.frame->id_);
mixAudioSourceList->find(replaceFrame.frame->id_);
// When a frame is pushed to |activeList| it is also pushed
// to mixParticipantList with the frame's id. This means
// to mixAudioSourceList with the frame's id. This means
// that the Find call above should never fail.
assert(it != mixParticipantList->end());
RTC_DCHECK(it != mixAudioSourceList->end());
replaceWasMixed = it->second->_mixHistory->WasMixed();
mixParticipantList->erase(replaceFrame.frame->id_);
mixAudioSourceList->erase(replaceFrame.frame->id_);
activeList.erase(replaceItem);
activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
(*mixParticipantList)[audioFrame->id_] = *participant;
assert(mixParticipantList->size() <=
kMaximumAmountOfMixedParticipants);
(*mixAudioSourceList)[audioFrame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
if (replaceWasMixed) {
if (!replaceFrame.muted) {
RampOut(*replaceFrame.frame);
}
rampOutList->push_back(replaceFrame);
assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory(replaceFrame.frame);
}
@ -620,35 +590,38 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
RampOut(*audioFrame);
}
rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory(audioFrame);
}
}
} else {
activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
(*mixParticipantList)[audioFrame->id_] = *participant;
assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
(*mixAudioSourceList)[audioFrame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
ParticipantFrameStruct* part_struct =
new ParticipantFrameStruct(*participant, audioFrame, muted);
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audioFrame, muted);
passiveWasMixedList.push_back(part_struct);
} else if (mustAddToPassiveList) {
if (!muted) {
RampIn(*audioFrame);
}
ParticipantFrameStruct* part_struct =
new ParticipantFrameStruct(*participant, audioFrame, muted);
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audioFrame, muted);
passiveWasNotMixedList.push_back(part_struct);
} else {
_audioFramePool->PushMemory(audioFrame);
}
}
}
assert(activeList.size() <= *maxAudioFrameCounter);
// At this point it is known which participants should be mixed. Transfer
RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
// At this point it is known which audio sources should be mixed. Transfer
// this information to this functions output parameters.
for (AudioFrameList::const_iterator iter = activeList.begin();
iter != activeList.end(); ++iter) {
@ -656,34 +629,38 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
}
activeList.clear();
// Always mix a constant number of AudioFrames. If there aren't enough
// active participants mix passive ones. Starting with those that was mixed
// active audio sources mix passive ones. Starting with those that was mixed
// last iteration.
for (ParticipantFrameStructList::const_iterator iter =
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasMixedList.begin();
iter != passiveWasMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
(*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory((*iter)->audioFrame);
_audioFramePool->PushMemory((*iter)->audio_frame);
}
delete *iter;
}
// And finally the ones that have not been mixed for a while.
for (ParticipantFrameStructList::const_iterator iter =
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasNotMixedList.begin();
iter != passiveWasNotMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
(*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
} else {
_audioFramePool->PushMemory((*iter)->audioFrame);
_audioFramePool->PushMemory((*iter)->audio_frame);
}
delete *iter;
}
assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
*maxAudioFrameCounter += mixListStartSize - mixList->size();
}
@ -691,30 +668,30 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
AudioFrameList* additionalFramesList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"GetAdditionalAudio(additionalFramesList)");
// The GetAudioFrameWithMuted() callback may result in the participant being
// removed from additionalParticipantList_. If that happens it will
// invalidate any iterators. Create a copy of the participants list such
// that the list of participants can be traversed safely.
MixerAudioSourceList additionalParticipantList;
additionalParticipantList.insert(additionalParticipantList.begin(),
_additionalParticipantList.begin(),
_additionalParticipantList.end());
// The GetAudioFrameWithMuted() callback may result in the audio source being
// removed from additionalAudioSourceList_. If that happens it will
// invalidate any iterators. Create a copy of the audio sources list such
// that the list of audio sources can be traversed safely.
MixerAudioSourceList additionalAudioSourceList;
additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
additional_audio_source_list_.begin(),
additional_audio_source_list_.end());
for (MixerAudioSourceList::const_iterator participant =
additionalParticipantList.begin();
participant != additionalParticipantList.end(); ++participant) {
for (MixerAudioSourceList::const_iterator audio_source =
additionalAudioSourceList.begin();
audio_source != additionalAudioSourceList.end(); ++audio_source) {
AudioFrame* audioFrame = NULL;
if (_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
"failed PopMemory() call");
assert(false);
RTC_NOTREACHED();
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from participant");
"failed to GetAudioFrameWithMuted() from audio_source");
_audioFramePool->PushMemory(audioFrame);
continue;
}
@ -729,26 +706,27 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
}
void NewAudioConferenceMixerImpl::UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const {
const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateMixedStatus(mixedParticipantsMap)");
assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
"UpdateMixedStatus(mixedAudioSourcesMap)");
RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
// Loop through all participants. If they are in the mix map they
// Loop through all audio_sources. If they are in the mix map they
// were mixed.
for (MixerAudioSourceList::const_iterator participant =
_participantList.begin();
participant != _participantList.end(); ++participant) {
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
bool isMixed = false;
for (std::map<int, MixerAudioSource*>::const_iterator it =
mixedParticipantsMap.begin();
it != mixedParticipantsMap.end(); ++it) {
if (it->second == *participant) {
mixedAudioSourcesMap.begin();
it != mixedAudioSourcesMap.end(); ++it) {
if (it->second == *audio_source) {
isMixed = true;
break;
}
}
(*participant)->_mixHistory->SetIsMixed(isMixed);
(*audio_source)->_mixHistory->SetIsMixed(isMixed);
}
}
@ -763,42 +741,42 @@ void NewAudioConferenceMixerImpl::ClearAudioFrameList(
audioFrameList->clear();
}
bool NewAudioConferenceMixerImpl::IsParticipantInList(
const MixerAudioSource& participant,
const MixerAudioSourceList& participantList) const {
bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
const MixerAudioSource& audio_source,
const MixerAudioSourceList& audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"IsParticipantInList(participant,participantList)");
for (MixerAudioSourceList::const_iterator iter = participantList.begin();
iter != participantList.end(); ++iter) {
if (&participant == *iter) {
"IsAudioSourceInList(audio_source,audioSourceList)");
for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin();
iter != audioSourceList.end(); ++iter) {
if (&audio_source == *iter) {
return true;
}
}
return false;
}
bool NewAudioConferenceMixerImpl::AddParticipantToList(
MixerAudioSource* participant,
MixerAudioSourceList* participantList) const {
bool NewAudioConferenceMixerImpl::AddAudioSourceToList(
MixerAudioSource* audio_source,
MixerAudioSourceList* audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"AddParticipantToList(participant, participantList)");
participantList->push_back(participant);
"AddAudioSourceToList(audio_source, audioSourceList)");
audioSourceList->push_back(audio_source);
// Make sure that the mixed status is correct for new MixerAudioSource.
participant->_mixHistory->ResetMixedStatus();
audio_source->_mixHistory->ResetMixedStatus();
return true;
}
bool NewAudioConferenceMixerImpl::RemoveParticipantFromList(
MixerAudioSource* participant,
MixerAudioSourceList* participantList) const {
bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList(
MixerAudioSource* audio_source,
MixerAudioSourceList* audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"RemoveParticipantFromList(participant, participantList)");
for (MixerAudioSourceList::iterator iter = participantList->begin();
iter != participantList->end(); ++iter) {
if (*iter == participant) {
participantList->erase(iter);
// Participant is no longer mixed, reset to default.
participant->_mixHistory->ResetMixedStatus();
"RemoveAudioSourceFromList(audio_source, audioSourceList)");
for (MixerAudioSourceList::iterator iter = audioSourceList->begin();
iter != audioSourceList->end(); ++iter) {
if (*iter == audio_source) {
audioSourceList->erase(iter);
// AudioSource is no longer mixed, reset to default.
audio_source->_mixHistory->ResetMixedStatus();
return true;
}
}
@ -807,15 +785,17 @@ bool NewAudioConferenceMixerImpl::RemoveParticipantFromList(
int32_t NewAudioConferenceMixerImpl::MixFromList(
AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
const AudioFrameList& audioFrameList,
int32_t id,
bool use_limiter) {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
"MixFromList(mixedAudio, audioFrameList)");
if (audioFrameList.empty())
return 0;
uint32_t position = 0;
if (_numMixedParticipants == 1) {
if (audioFrameList.size() == 1) {
mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_;
mixedAudio->elapsed_time_ms_ =
audioFrameList.front().frame->elapsed_time_ms_;
@ -828,17 +808,17 @@ int32_t NewAudioConferenceMixerImpl::MixFromList(
for (AudioFrameList::const_iterator iter = audioFrameList.begin();
iter != audioFrameList.end(); ++iter) {
if (position >= kMaximumAmountOfMixedParticipants) {
if (position >= kMaximumAmountOfMixedAudioSources) {
WEBRTC_TRACE(
kTraceMemory, kTraceAudioMixerServer, _id,
"Trying to mix more than max amount of mixed participants:%d!",
kMaximumAmountOfMixedParticipants);
kTraceMemory, kTraceAudioMixerServer, id,
"Trying to mix more than max amount of mixed audio sources:%d!",
kMaximumAmountOfMixedAudioSources);
// Assert and avoid crash
assert(false);
RTC_NOTREACHED();
position = 0;
}
if (!iter->muted) {
MixFrames(mixedAudio, iter->frame, use_limiter_);
MixFrames(mixedAudio, iter->frame, use_limiter);
}
position++;
@ -880,7 +860,7 @@ bool NewAudioConferenceMixerImpl::LimitMixedAudio(
//
// It's possible to apply the gain in the AGC (with a target level of 0 dbFS
// and compression gain of 6 dB). However, in the transition frame when this
// is enabled (moving from one to two participants) it has the potential to
// is enabled (moving from one to two audio sources) it has the potential to
// create discontinuities in the mixed frame.
//
// Instead we double the frame (with addition since left-shifting a
@ -890,7 +870,7 @@ bool NewAudioConferenceMixerImpl::LimitMixedAudio(
if (error != _limiter->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"Error from AudioProcessing: %d", error);
assert(false);
RTC_NOTREACHED();
return false;
}
return true;

View File

@ -40,10 +40,10 @@ class NewMixHistory {
NewMixHistory();
~NewMixHistory();
// Returns true if the participant is being mixed.
// Returns true if the audio source is being mixed.
bool IsMixed() const;
// Returns true if the participant was mixed previous mix
// Returns true if the audio source was mixed previous mix
// iteration.
bool WasMixed() const;
@ -72,17 +72,15 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
void Process() override;
// NewAudioConferenceMixer functions
int32_t RegisterMixedStreamCallback(
OldAudioMixerOutputReceiver* mixReceiver) override;
int32_t UnRegisterMixedStreamCallback() override;
int32_t SetMixabilityStatus(MixerAudioSource* participant,
int32_t SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) override;
bool MixabilityStatus(const MixerAudioSource& participant) const override;
int32_t SetMinimumMixingFrequency(Frequency freq) override;
int32_t SetAnonymousMixabilityStatus(MixerAudioSource* participant,
bool MixabilityStatus(const MixerAudioSource& audio_source) const override;
int32_t SetAnonymousMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) override;
void Mix(AudioFrame* audio_frame_for_mixing) override;
int32_t SetMinimumMixingFrequency(Frequency freq) override;
bool AnonymousMixabilityStatus(
const MixerAudioSource& participant) const override;
const MixerAudioSource& audio_source) const override;
private:
enum { DEFAULT_AUDIO_FRAME_POOLSIZE = 50 };
@ -100,7 +98,7 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
// should be ramped out over this AudioFrame to avoid audio discontinuities.
void UpdateToMix(AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixParticipantList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const;
// Return the lowest mixing frequency that can be used without having to
@ -112,29 +110,31 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
// Return the AudioFrames that should be mixed anonymously.
void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
// Update the NewMixHistory of all MixerAudioSources. mixedParticipantsList
// Update the NewMixHistory of all MixerAudioSources. mixedAudioSourcesList
// should contain a map of MixerAudioSources that have been mixed.
void UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedParticipantsList) const;
const std::map<int, MixerAudioSource*>& mixedAudioSourcesList) const;
// Clears audioFrameList and reclaims all memory associated with it.
void ClearAudioFrameList(AudioFrameList* audioFrameList) const;
// This function returns true if it finds the MixerAudioSource in the
// specified list of MixerAudioSources.
bool IsParticipantInList(const MixerAudioSource& participant,
const MixerAudioSourceList& participantList) const;
bool IsAudioSourceInList(const MixerAudioSource& audio_source,
const MixerAudioSourceList& audioSourceList) const;
// Add/remove the MixerAudioSource to the specified
// MixerAudioSource list.
bool AddParticipantToList(MixerAudioSource* participant,
MixerAudioSourceList* participantList) const;
bool RemoveParticipantFromList(MixerAudioSource* removeParticipant,
MixerAudioSourceList* participantList) const;
bool AddAudioSourceToList(MixerAudioSource* audio_source,
MixerAudioSourceList* audioSourceList) const;
bool RemoveAudioSourceFromList(MixerAudioSource* removeAudioSource,
MixerAudioSourceList* audioSourceList) const;
// Mix the AudioFrames stored in audioFrameList into mixedAudio.
int32_t MixFromList(AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const;
static int32_t MixFromList(AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList,
int32_t id,
bool use_limiter);
// Mix the AudioFrames stored in audioFrameList into mixedAudio. No
// record will be kept of this mix (e.g. the corresponding MixerAudioSources
@ -151,9 +151,6 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
Frequency _minimumMixingFreq;
// Mix result callback
OldAudioMixerOutputReceiver* _mixReceiver;
// The current sample frequency and sample size when mixing.
Frequency _outputFrequency;
size_t _sampleSize;
@ -161,12 +158,12 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
// Memory pool to avoid allocating/deallocating AudioFrames
MemoryPool<AudioFrame>* _audioFramePool;
// List of all participants. Note all lists are disjunct
MixerAudioSourceList _participantList; // May be mixed.
// List of all audio sources. Note all lists are disjunct
MixerAudioSourceList audio_source_list_; // May be mixed.
// Always mixed, anonomously.
MixerAudioSourceList _additionalParticipantList;
MixerAudioSourceList additional_audio_source_list_;
size_t _numMixedParticipants;
size_t num_mixed_audio_sources_;
// Determines if we will use a limiter for clipping protection during
// mixing.
bool use_limiter_;

View File

@ -0,0 +1,243 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_mixer/audio_mixer.h"
#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
#include "webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h"
#include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
using testing::_;
using testing::AtLeast;
using testing::Invoke;
using testing::Return;
using webrtc::voe::AudioMixer;
namespace webrtc {
class MockMixerAudioSource : public MixerAudioSource {
public:
MockMixerAudioSource() {
ON_CALL(*this, GetAudioFrame(_, _))
.WillByDefault(Invoke(this, &MockMixerAudioSource::FakeAudioFrame));
}
MOCK_METHOD2(GetAudioFrame,
int32_t(const int32_t id, AudioFrame* audio_frame));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
private:
AudioFrame fake_frame_;
int32_t FakeAudioFrame(const int32_t id, AudioFrame* audio_frame) {
audio_frame->CopyFrom(fake_frame_);
return 0;
}
};
class BothMixersTest : public testing::Test {
protected:
BothMixersTest() {
// Create an OutputMixer.
AudioMixer::Create(audio_mixer_, kId);
// Create one mixer participant and add it to the mixer.
EXPECT_EQ(0, audio_mixer_->SetMixabilityStatus(participant_, true));
// Each iteration, the participant will return a frame with this content:
participant_.fake_frame()->id_ = 1;
participant_.fake_frame()->sample_rate_hz_ = kSampleRateHz;
participant_.fake_frame()->speech_type_ = AudioFrame::kNormalSpeech;
participant_.fake_frame()->vad_activity_ = AudioFrame::kVadActive;
participant_.fake_frame()->num_channels_ = 1;
// We modify one sample within the RampIn window and one sample
// outside of it.
participant_.fake_frame()->data_[10] = 100;
participant_.fake_frame()->data_[20] = -200;
participant_.fake_frame()->data_[30] = 300;
participant_.fake_frame()->data_[90] = -400;
// Frame duration 10ms.
participant_.fake_frame()->samples_per_channel_ = kSampleRateHz / 100;
EXPECT_CALL(participant_, NeededFrequency(_))
.WillRepeatedly(Return(kSampleRateHz));
}
~BothMixersTest() { AudioMixer::Destroy(audio_mixer_); }
// Mark the participant as 'unmixed' last round.
void ResetAudioSource() { participant_._mixHistory->SetIsMixed(false); }
AudioMixer* audio_mixer_;
MockMixerAudioSource participant_;
AudioFrame mixing_round_frame, mixed_results_frame_;
constexpr static int kSampleRateHz = 48000;
constexpr static int kId = 1;
};
TEST(AudioMixer, AnonymousAndNamed) {
constexpr int kId = 1;
// Should not matter even if partipants are more than
// kMaximumAmountOfMixedAudioSources.
constexpr int kNamed =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
constexpr int kAnonymous =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
MockMixerAudioSource named[kNamed];
MockMixerAudioSource anonymous[kAnonymous];
for (int i = 0; i < kNamed; ++i) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true));
EXPECT_TRUE(mixer->MixabilityStatus(named[i]));
}
for (int i = 0; i < kAnonymous; ++i) {
// AudioSource must be registered before turning it into anonymous.
EXPECT_EQ(-1, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[i], true));
EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
EXPECT_TRUE(mixer->AnonymousMixabilityStatus(anonymous[i]));
// Anonymous participants do not show status by MixabilityStatus.
EXPECT_FALSE(mixer->MixabilityStatus(anonymous[i]));
}
for (int i = 0; i < kNamed; ++i) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], false));
EXPECT_FALSE(mixer->MixabilityStatus(named[i]));
}
for (int i = 0; i < kAnonymous - 1; i++) {
EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], false));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
// SetAnonymousMixabilityStatus(anonymous, false) moves anonymous to the
// named group.
EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
}
// SetMixabilityStatus(anonymous, false) will remove anonymous from both
// anonymous and named groups.
EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1]));
EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1]));
}
TEST(AudioMixer, LargestEnergyVadActiveMixed) {
const int kId = 1;
const int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 3;
const int kSampleRateHz = 32000;
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
MockMixerAudioSource participants[kAudioSources];
for (int i = 0; i < kAudioSources; ++i) {
participants[i].fake_frame()->id_ = i;
participants[i].fake_frame()->sample_rate_hz_ = kSampleRateHz;
participants[i].fake_frame()->speech_type_ = AudioFrame::kNormalSpeech;
participants[i].fake_frame()->vad_activity_ = AudioFrame::kVadActive;
participants[i].fake_frame()->num_channels_ = 1;
// Frame duration 10ms.
participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100;
// We set the 80-th sample value since the first 80 samples may be
// modified by a ramped-in window.
participants[i].fake_frame()->data_[80] = i;
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
EXPECT_CALL(participants[i], GetAudioFrame(_, _)).Times(AtLeast(1));
EXPECT_CALL(participants[i], NeededFrequency(_))
.WillRepeatedly(Return(kSampleRateHz));
}
// Last participant gives audio frame with passive VAD, although it has the
// largest energy.
participants[kAudioSources - 1].fake_frame()->vad_activity_ =
AudioFrame::kVadPassive;
AudioFrame audio_frame;
mixer->Mix(&audio_frame);
for (int i = 0; i < kAudioSources; ++i) {
bool is_mixed = participants[i].IsMixed();
if (i == kAudioSources - 1 ||
i < kAudioSources - 1 -
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) {
EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i
<< " wrong.";
} else {
EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i
<< " wrong.";
}
}
}
TEST_F(BothMixersTest, CompareInitialFrameAudio) {
EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1));
// Make sure the participant is marked as 'non-mixed' so that it is
// ramped in next round.
ResetAudioSource();
// Construct the expected sound for the first mixing round.
mixing_round_frame.CopyFrom(*participant_.fake_frame());
RampIn(mixing_round_frame);
// Mix frames and put the result into a frame.
audio_mixer_->MixActiveChannels();
audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_);
// Compare the received frame with the expected.
EXPECT_EQ(mixing_round_frame.sample_rate_hz_,
mixed_results_frame_.sample_rate_hz_);
EXPECT_EQ(mixing_round_frame.num_channels_,
mixed_results_frame_.num_channels_);
EXPECT_EQ(mixing_round_frame.samples_per_channel_,
mixed_results_frame_.samples_per_channel_);
EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_,
sizeof(mixing_round_frame.data_)));
}
TEST_F(BothMixersTest, CompareSecondFrameAudio) {
EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1));
// Make sure the participant is marked as 'non-mixed' so that it is
// ramped in next round.
ResetAudioSource();
// Do one mixing iteration.
audio_mixer_->MixActiveChannels();
// Mix frames a second time and compare with the expected frame
// (which is the participant's frame).
audio_mixer_->MixActiveChannels();
audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_);
EXPECT_EQ(0,
memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_,
sizeof(mixing_round_frame.data_)));
}
} // namespace webrtc

View File

@ -233,6 +233,7 @@
'audio_coding/neteq/tools/packet_unittest.cc',
'audio_conference_mixer/test/audio_conference_mixer_unittest.cc',
'audio_device/fine_audio_buffer_unittest.cc',
'audio_mixer/test/audio_mixer_unittest.cc',
'audio_processing/aec/echo_cancellation_unittest.cc',
'audio_processing/aec/system_delay_unittest.cc',
'audio_processing/agc/agc_manager_direct_unittest.cc',