Stylizing AudioConferenceMixer.

Cleaning AudioConferenceMixer APIs to match Chromium style guide.

Main changes:
1. change all mutable references to pointers
2. add const to all non-mutable references
3. add const to as many methods as possible

BUG=
R=andrew@webrtc.org

Review URL: https://codereview.webrtc.org/1311733003 .

Cr-Commit-Position: refs/heads/master@{#9821}
This commit is contained in:
minyuel
2015-08-31 16:04:32 +02:00
parent ca28fdcf9f
commit 0f4b3731c3
8 changed files with 172 additions and 178 deletions

View File

@ -44,25 +44,25 @@ public:
// Register/unregister a callback class for receiving the mixed audio.
virtual int32_t RegisterMixedStreamCallback(
AudioMixerOutputReceiver& receiver) = 0;
AudioMixerOutputReceiver* receiver) = 0;
virtual int32_t UnRegisterMixedStreamCallback() = 0;
// Add/remove participants as candidates for mixing.
virtual int32_t SetMixabilityStatus(MixerParticipant& participant,
virtual int32_t SetMixabilityStatus(MixerParticipant* participant,
bool mixable) = 0;
// mixable is set to true if a participant is a candidate for mixing.
virtual int32_t MixabilityStatus(MixerParticipant& participant,
bool& mixable) = 0;
// Returns true if a participant is a candidate for mixing.
virtual bool MixabilityStatus(
const MixerParticipant& participant) const = 0;
// Inform the mixer that the participant should always be mixed and not
// count toward the number of mixed participants. Note that a participant
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
virtual int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant,
const bool mixable) = 0;
// mixable is set to true if the participant is mixed anonymously.
virtual int32_t AnonymousMixabilityStatus(MixerParticipant& participant,
bool& mixable) = 0;
virtual int32_t SetAnonymousMixabilityStatus(
MixerParticipant* participant, bool mixable) = 0;
// Returns true if the participant is mixed anonymously.
virtual bool AnonymousMixabilityStatus(
const MixerParticipant& participant) const = 0;
// Set the minimum sampling frequency at which to mix. The mixing algorithm
// may still choose to mix at a higher samling frequency to avoid

View File

@ -25,14 +25,15 @@ public:
// audio every time it's called.
//
// If it returns -1, the frame will not be added to the mix.
virtual int32_t GetAudioFrame(const int32_t id, AudioFrame& audioFrame) = 0;
virtual int32_t GetAudioFrame(int32_t id,
AudioFrame* audioFrame) = 0;
// mixed will be set to true if the participant was mixed this mix iteration
int32_t IsMixed(bool& mixed) const;
// Returns true if the participant was mixed this mix iteration.
bool IsMixed() const;
// This function specifies the sampling frequency needed for the AudioFrame
// for future GetAudioFrame(..) calls.
virtual int32_t NeededFrequency(const int32_t id) = 0;
virtual int32_t NeededFrequency(int32_t id) const = 0;
MixHistory* _mixHistory;
protected:

View File

@ -70,8 +70,8 @@ MixerParticipant::~MixerParticipant() {
delete _mixHistory;
}
int32_t MixerParticipant::IsMixed(bool& mixed) const {
return _mixHistory->IsMixed(mixed);
bool MixerParticipant::IsMixed() const {
return _mixHistory->IsMixed();
}
MixHistory::MixHistory()
@ -81,15 +81,14 @@ MixHistory::MixHistory()
MixHistory::~MixHistory() {
}
int32_t MixHistory::IsMixed(bool& mixed) const {
mixed = _isMixed;
return 0;
bool MixHistory::IsMixed() const {
return _isMixed;
}
int32_t MixHistory::WasMixed(bool& wasMixed) const {
bool MixHistory::WasMixed() const {
// Was mixed is the same as is mixed depending on perspective. This function
// is for the perspective of AudioConferenceMixerImpl.
return IsMixed(wasMixed);
return IsMixed();
}
int32_t MixHistory::SetIsMixed(const bool mixed) {
@ -256,7 +255,7 @@ int32_t AudioConferenceMixerImpl::Process() {
}
UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
remainingParticipantsAllowedToMix);
&remainingParticipantsAllowedToMix);
GetAdditionalAudio(&additionalFramesList);
UpdateMixedStatus(mixedParticipantsMap);
@ -294,9 +293,9 @@ int32_t AudioConferenceMixerImpl::Process() {
use_limiter_ = _numMixedParticipants > 1 &&
_outputFrequency <= kAudioProcMaxNativeSampleRateHz;
MixFromList(*mixedAudio, &mixList);
MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
MixAnonomouslyFromList(*mixedAudio, &rampOutList);
MixFromList(mixedAudio, mixList);
MixAnonomouslyFromList(mixedAudio, additionalFramesList);
MixAnonomouslyFromList(mixedAudio, rampOutList);
if(mixedAudio->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
@ -304,7 +303,7 @@ int32_t AudioConferenceMixerImpl::Process() {
mixedAudio->Mute();
} else {
// Only call the limiter if we have something to mix.
if(!LimitMixedAudio(*mixedAudio))
if(!LimitMixedAudio(mixedAudio))
retval = -1;
}
}
@ -334,12 +333,12 @@ int32_t AudioConferenceMixerImpl::Process() {
}
int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
AudioMixerOutputReceiver& mixReceiver) {
AudioMixerOutputReceiver* mixReceiver) {
CriticalSectionScoped cs(_cbCrit.get());
if(_mixReceiver != NULL) {
return -1;
}
_mixReceiver = &mixReceiver;
_mixReceiver = mixReceiver;
return 0;
}
@ -353,7 +352,7 @@ int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
}
int32_t AudioConferenceMixerImpl::SetOutputFrequency(
const Frequency frequency) {
const Frequency& frequency) {
CriticalSectionScoped cs(_crit.get());
_outputFrequency = frequency;
@ -370,8 +369,7 @@ AudioConferenceMixerImpl::OutputFrequency() const {
}
int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
MixerParticipant& participant,
bool mixable) {
MixerParticipant* participant, bool mixable) {
if (!mixable) {
// Anonymous participants are in a separate list. Make sure that the
// participant is in the _participantList if it is being mixed.
@ -381,7 +379,7 @@ int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
{
CriticalSectionScoped cs(_cbCrit.get());
const bool isMixed =
IsParticipantInList(participant, &_participantList);
IsParticipantInList(*participant, _participantList);
// API must be called with a new state.
if(!(mixable ^ isMixed)) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
@ -418,18 +416,16 @@ int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
return 0;
}
int32_t AudioConferenceMixerImpl::MixabilityStatus(
MixerParticipant& participant,
bool& mixable) {
bool AudioConferenceMixerImpl::MixabilityStatus(
const MixerParticipant& participant) const {
CriticalSectionScoped cs(_cbCrit.get());
mixable = IsParticipantInList(participant, &_participantList);
return 0;
return IsParticipantInList(participant, _participantList);
}
int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
MixerParticipant& participant, const bool anonymous) {
MixerParticipant* participant, bool anonymous) {
CriticalSectionScoped cs(_cbCrit.get());
if(IsParticipantInList(participant, &_additionalParticipantList)) {
if(IsParticipantInList(*participant, _additionalParticipantList)) {
if(anonymous) {
return 0;
}
@ -461,12 +457,10 @@ int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
0 : -1;
}
int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus(
MixerParticipant& participant, bool& mixable) {
bool AudioConferenceMixerImpl::AnonymousMixabilityStatus(
const MixerParticipant& participant) const {
CriticalSectionScoped cs(_cbCrit.get());
mixable = IsParticipantInList(participant,
&_additionalParticipantList);
return 0;
return IsParticipantInList(participant, _additionalParticipantList);
}
int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
@ -493,11 +487,11 @@ int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
// Check all AudioFrames that are to be mixed. The highest sampling frequency
// found is the lowest that can be used without losing information.
int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const {
const int participantListFrequency =
GetLowestMixingFrequencyFromList(&_participantList);
GetLowestMixingFrequencyFromList(_participantList);
const int anonymousListFrequency =
GetLowestMixingFrequencyFromList(&_additionalParticipantList);
GetLowestMixingFrequencyFromList(_additionalParticipantList);
const int highestFreq =
(participantListFrequency > anonymousListFrequency) ?
participantListFrequency : anonymousListFrequency;
@ -511,10 +505,10 @@ int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
}
int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
MixerParticipantList* mixList) {
const MixerParticipantList& mixList) const {
int32_t highestFreq = 8000;
for (MixerParticipantList::iterator iter = mixList->begin();
iter != mixList->end();
for (MixerParticipantList::const_iterator iter = mixList.begin();
iter != mixList.end();
++iter) {
const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
if(neededFrequency > highestFreq) {
@ -528,28 +522,28 @@ void AudioConferenceMixerImpl::UpdateToMix(
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerParticipant*>* mixParticipantList,
size_t& maxAudioFrameCounter) {
size_t* maxAudioFrameCounter) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
maxAudioFrameCounter);
*maxAudioFrameCounter);
const size_t mixListStartSize = mixList->size();
AudioFrameList activeList;
// Struct needed by the passive lists to keep track of which AudioFrame
// belongs to which MixerParticipant.
ParticipantFramePairList passiveWasNotMixedList;
ParticipantFramePairList passiveWasMixedList;
for (MixerParticipantList::iterator participant = _participantList.begin();
participant != _participantList.end();
for (MixerParticipantList::const_iterator participant =
_participantList.begin(); participant != _participantList.end();
++participant) {
// Stop keeping track of passive participants if there are already
// enough participants available (they wont be mixed anyway).
bool mustAddToPassiveList = (maxAudioFrameCounter >
bool mustAddToPassiveList = (*maxAudioFrameCounter >
(activeList.size() +
passiveWasMixedList.size() +
passiveWasNotMixedList.size()));
bool wasMixed = false;
(*participant)->_mixHistory->WasMixed(wasMixed);
wasMixed = (*participant)->_mixHistory->WasMixed();
AudioFrame* audioFrame = NULL;
if(_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
@ -559,7 +553,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
audioFrame->sample_rate_hz_ = _outputFrequency;
if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) {
if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrame() from participant");
_audioFramePool->PushMemory(audioFrame);
@ -584,7 +578,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
RampIn(*audioFrame);
}
if(activeList.size() >= maxAudioFrameCounter) {
if(activeList.size() >= *maxAudioFrameCounter) {
// There are already more active participants than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
@ -606,14 +600,14 @@ void AudioConferenceMixerImpl::UpdateToMix(
AudioFrame* replaceFrame = *replaceItem;
bool replaceWasMixed = false;
std::map<int, MixerParticipant*>::iterator it =
std::map<int, MixerParticipant*>::const_iterator it =
mixParticipantList->find(replaceFrame->id_);
// When a frame is pushed to |activeList| it is also pushed
// to mixParticipantList with the frame's id. This means
// that the Find call above should never fail.
assert(it != mixParticipantList->end());
it->second->_mixHistory->WasMixed(replaceWasMixed);
replaceWasMixed = it->second->_mixHistory->WasMixed();
mixParticipantList->erase(replaceFrame->id_);
activeList.erase(replaceItem);
@ -664,10 +658,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
}
}
assert(activeList.size() <= maxAudioFrameCounter);
assert(activeList.size() <= *maxAudioFrameCounter);
// At this point it is known which participants should be mixed. Transfer
// this information to this functions output parameters.
for (AudioFrameList::iterator iter = activeList.begin();
for (AudioFrameList::const_iterator iter = activeList.begin();
iter != activeList.end();
++iter) {
mixList->push_back(*iter);
@ -676,10 +670,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
// Always mix a constant number of AudioFrames. If there aren't enough
// active participants mix passive ones. Starting with those that was mixed
// last iteration.
for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin();
iter != passiveWasMixedList.end();
for (ParticipantFramePairList::const_iterator
iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end();
++iter) {
if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back((*iter)->audioFrame);
(*mixParticipantList)[(*iter)->audioFrame->id_] =
(*iter)->participant;
@ -691,11 +685,11 @@ void AudioConferenceMixerImpl::UpdateToMix(
delete *iter;
}
// And finally the ones that have not been mixed for a while.
for (ParticipantFramePairList::iterator iter =
for (ParticipantFramePairList::const_iterator iter =
passiveWasNotMixedList.begin();
iter != passiveWasNotMixedList.end();
++iter) {
if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back((*iter)->audioFrame);
(*mixParticipantList)[(*iter)->audioFrame->id_] =
(*iter)->participant;
@ -706,12 +700,12 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
delete *iter;
}
assert(maxAudioFrameCounter + mixListStartSize >= mixList->size());
maxAudioFrameCounter += mixListStartSize - mixList->size();
assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
*maxAudioFrameCounter += mixListStartSize - mixList->size();
}
void AudioConferenceMixerImpl::GetAdditionalAudio(
AudioFrameList* additionalFramesList) {
AudioFrameList* additionalFramesList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"GetAdditionalAudio(additionalFramesList)");
// The GetAudioFrame() callback may result in the participant being removed
@ -723,7 +717,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
_additionalParticipantList.begin(),
_additionalParticipantList.end());
for (MixerParticipantList::iterator participant =
for (MixerParticipantList::const_iterator participant =
additionalParticipantList.begin();
participant != additionalParticipantList.end();
++participant) {
@ -735,7 +729,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) {
if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrame() from participant");
_audioFramePool->PushMemory(audioFrame);
@ -751,18 +745,19 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
}
void AudioConferenceMixerImpl::UpdateMixedStatus(
std::map<int, MixerParticipant*>& mixedParticipantsMap) {
const std::map<int, MixerParticipant*>& mixedParticipantsMap) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateMixedStatus(mixedParticipantsMap)");
assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
// Loop through all participants. If they are in the mix map they
// were mixed.
for (MixerParticipantList::iterator participant = _participantList.begin();
participant != _participantList.end();
for (MixerParticipantList::const_iterator
participant =_participantList.begin();
participant != _participantList.end();
++participant) {
bool isMixed = false;
for (std::map<int, MixerParticipant*>::iterator it =
for (std::map<int, MixerParticipant*>::const_iterator it =
mixedParticipantsMap.begin();
it != mixedParticipantsMap.end();
++it) {
@ -776,7 +771,7 @@ void AudioConferenceMixerImpl::UpdateMixedStatus(
}
void AudioConferenceMixerImpl::ClearAudioFrameList(
AudioFrameList* audioFrameList) {
AudioFrameList* audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"ClearAudioFrameList(audioFrameList)");
for (AudioFrameList::iterator iter = audioFrameList->begin();
@ -788,11 +783,11 @@ void AudioConferenceMixerImpl::ClearAudioFrameList(
}
void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
AudioFrameList* mixList) {
AudioFrameList* mixList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateVADPositiveParticipants(mixList)");
for (AudioFrameList::iterator iter = mixList->begin();
for (AudioFrameList::const_iterator iter = mixList->begin();
iter != mixList->end();
++iter) {
CalculateEnergy(**iter);
@ -800,12 +795,12 @@ void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
}
bool AudioConferenceMixerImpl::IsParticipantInList(
MixerParticipant& participant,
MixerParticipantList* participantList) const {
const MixerParticipant& participant,
const MixerParticipantList& participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"IsParticipantInList(participant,participantList)");
for (MixerParticipantList::const_iterator iter = participantList->begin();
iter != participantList->end();
for (MixerParticipantList::const_iterator iter = participantList.begin();
iter != participantList.end();
++iter) {
if(&participant == *iter) {
return true;
@ -815,28 +810,28 @@ bool AudioConferenceMixerImpl::IsParticipantInList(
}
bool AudioConferenceMixerImpl::AddParticipantToList(
MixerParticipant& participant,
MixerParticipantList* participantList) {
MixerParticipant* participant,
MixerParticipantList* participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"AddParticipantToList(participant, participantList)");
participantList->push_back(&participant);
participantList->push_back(participant);
// Make sure that the mixed status is correct for new MixerParticipant.
participant._mixHistory->ResetMixedStatus();
participant->_mixHistory->ResetMixedStatus();
return true;
}
bool AudioConferenceMixerImpl::RemoveParticipantFromList(
MixerParticipant& participant,
MixerParticipantList* participantList) {
MixerParticipant* participant,
MixerParticipantList* participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"RemoveParticipantFromList(participant, participantList)");
for (MixerParticipantList::iterator iter = participantList->begin();
iter != participantList->end();
++iter) {
if(*iter == &participant) {
if(*iter == participant) {
participantList->erase(iter);
// Participant is no longer mixed, reset to default.
participant._mixHistory->ResetMixedStatus();
participant->_mixHistory->ResetMixedStatus();
return true;
}
}
@ -844,26 +839,26 @@ bool AudioConferenceMixerImpl::RemoveParticipantFromList(
}
int32_t AudioConferenceMixerImpl::MixFromList(
AudioFrame& mixedAudio,
const AudioFrameList* audioFrameList) {
AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"MixFromList(mixedAudio, audioFrameList)");
if(audioFrameList->empty()) return 0;
if(audioFrameList.empty()) return 0;
uint32_t position = 0;
if (_numMixedParticipants == 1) {
mixedAudio.timestamp_ = audioFrameList->front()->timestamp_;
mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
} else {
// TODO(wu): Issue 3390.
// Audio frame timestamp is only supported in one channel case.
mixedAudio.timestamp_ = 0;
mixedAudio.elapsed_time_ms_ = -1;
mixedAudio->timestamp_ = 0;
mixedAudio->elapsed_time_ms_ = -1;
}
for (AudioFrameList::const_iterator iter = audioFrameList->begin();
iter != audioFrameList->end();
for (AudioFrameList::const_iterator iter = audioFrameList.begin();
iter != audioFrameList.end();
++iter) {
if(position >= kMaximumAmountOfMixedParticipants) {
WEBRTC_TRACE(
@ -876,7 +871,7 @@ int32_t AudioConferenceMixerImpl::MixFromList(
assert(false);
position = 0;
}
MixFrames(&mixedAudio, (*iter), use_limiter_);
MixFrames(mixedAudio, (*iter), use_limiter_);
position++;
}
@ -886,28 +881,28 @@ int32_t AudioConferenceMixerImpl::MixFromList(
// TODO(andrew): consolidate this function with MixFromList.
int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
AudioFrame& mixedAudio,
const AudioFrameList* audioFrameList) {
AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"MixAnonomouslyFromList(mixedAudio, audioFrameList)");
if(audioFrameList->empty()) return 0;
if(audioFrameList.empty()) return 0;
for (AudioFrameList::const_iterator iter = audioFrameList->begin();
iter != audioFrameList->end();
for (AudioFrameList::const_iterator iter = audioFrameList.begin();
iter != audioFrameList.end();
++iter) {
MixFrames(&mixedAudio, *iter, use_limiter_);
MixFrames(mixedAudio, *iter, use_limiter_);
}
return 0;
}
bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
if (!use_limiter_) {
return true;
}
// Smoothly limit the mixed frame.
const int error = _limiter->ProcessStream(&mixedAudio);
const int error = _limiter->ProcessStream(mixedAudio);
// And now we can safely restore the level. This procedure results in
// some loss of resolution, deemed acceptable.
@ -919,7 +914,7 @@ bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
mixedAudio += mixedAudio;
*mixedAudio += *mixedAudio;
if(error != _limiter->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,

View File

@ -35,15 +35,15 @@ public:
MixHistory();
~MixHistory();
// MixerParticipant function
int32_t IsMixed(bool& mixed) const;
// Returns true if the participant is being mixed.
bool IsMixed() const;
// Sets wasMixed to true if the participant was mixed previous mix
// Returns true if the participant was mixed previous mix
// iteration.
int32_t WasMixed(bool& wasMixed) const;
bool WasMixed() const;
// Updates the mixed status.
int32_t SetIsMixed(const bool mixed);
int32_t SetIsMixed(bool mixed);
void ResetMixedStatus();
private:
@ -68,23 +68,22 @@ public:
// AudioConferenceMixer functions
int32_t RegisterMixedStreamCallback(
AudioMixerOutputReceiver& mixReceiver) override;
AudioMixerOutputReceiver* mixReceiver) override;
int32_t UnRegisterMixedStreamCallback() override;
int32_t SetMixabilityStatus(MixerParticipant& participant,
int32_t SetMixabilityStatus(MixerParticipant* participant,
bool mixable) override;
int32_t MixabilityStatus(MixerParticipant& participant,
bool& mixable) override;
bool MixabilityStatus(const MixerParticipant& participant) const override;
int32_t SetMinimumMixingFrequency(Frequency freq) override;
int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant,
const bool mixable) override;
int32_t AnonymousMixabilityStatus(MixerParticipant& participant,
bool& mixable) override;
int32_t SetAnonymousMixabilityStatus(
MixerParticipant* participant, bool mixable) override;
bool AnonymousMixabilityStatus(
const MixerParticipant& participant) const override;
private:
enum{DEFAULT_AUDIO_FRAME_POOLSIZE = 50};
// Set/get mix frequency
int32_t SetOutputFrequency(const Frequency frequency);
int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
// Fills mixList with the AudioFrames pointers that should be used when
@ -98,55 +97,54 @@ private:
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerParticipant*>* mixParticipantList,
size_t& maxAudioFrameCounter);
size_t* maxAudioFrameCounter) const;
// Return the lowest mixing frequency that can be used without having to
// downsample any audio.
int32_t GetLowestMixingFrequency();
int32_t GetLowestMixingFrequencyFromList(MixerParticipantList* mixList);
int32_t GetLowestMixingFrequency() const;
int32_t GetLowestMixingFrequencyFromList(
const MixerParticipantList& mixList) const;
// Return the AudioFrames that should be mixed anonymously.
void GetAdditionalAudio(AudioFrameList* additionalFramesList);
void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
// Update the MixHistory of all MixerParticipants. mixedParticipantsList
// should contain a map of MixerParticipants that have been mixed.
void UpdateMixedStatus(
std::map<int, MixerParticipant*>& mixedParticipantsList);
const std::map<int, MixerParticipant*>& mixedParticipantsList) const;
// Clears audioFrameList and reclaims all memory associated with it.
void ClearAudioFrameList(AudioFrameList* audioFrameList);
void ClearAudioFrameList(AudioFrameList* audioFrameList) const;
// Update the list of MixerParticipants who have a positive VAD. mixList
// should be a list of AudioFrames
void UpdateVADPositiveParticipants(
AudioFrameList* mixList);
void UpdateVADPositiveParticipants(AudioFrameList* mixList) const;
// This function returns true if it finds the MixerParticipant in the
// specified list of MixerParticipants.
bool IsParticipantInList(
MixerParticipant& participant,
MixerParticipantList* participantList) const;
bool IsParticipantInList(const MixerParticipant& participant,
const MixerParticipantList& participantList) const;
// Add/remove the MixerParticipant to the specified
// MixerParticipant list.
bool AddParticipantToList(
MixerParticipant& participant,
MixerParticipantList* participantList);
MixerParticipant* participant,
MixerParticipantList* participantList) const;
bool RemoveParticipantFromList(
MixerParticipant& removeParticipant,
MixerParticipantList* participantList);
MixerParticipant* removeParticipant,
MixerParticipantList* participantList) const;
// Mix the AudioFrames stored in audioFrameList into mixedAudio.
int32_t MixFromList(
AudioFrame& mixedAudio,
const AudioFrameList* audioFrameList);
int32_t MixFromList(AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const;
// Mix the AudioFrames stored in audioFrameList into mixedAudio. No
// record will be kept of this mix (e.g. the corresponding MixerParticipants
// will not be marked as IsMixed()
int32_t MixAnonomouslyFromList(AudioFrame& mixedAudio,
const AudioFrameList* audioFrameList);
int32_t MixAnonomouslyFromList(AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const;
bool LimitMixedAudio(AudioFrame& mixedAudio);
bool LimitMixedAudio(AudioFrame* mixedAudio) const;
rtc::scoped_ptr<CriticalSectionWrapper> _crit;
rtc::scoped_ptr<CriticalSectionWrapper> _cbCrit;

View File

@ -445,14 +445,14 @@ bool Channel::OnRecoveredPacket(const uint8_t* rtp_packet,
return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
}
int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::GetAudioFrame(id=%d)", id);
// Get 10ms raw PCM data from the ACM (mixer limits output frequency)
if (audio_coding_->PlayoutData10Ms(audioFrame.sample_rate_hz_,
&audioFrame) == -1)
if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_,
audioFrame) == -1)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_instanceId,_channelId),
@ -466,18 +466,18 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
if (_RxVadDetection)
{
UpdateRxVadDetection(audioFrame);
UpdateRxVadDetection(*audioFrame);
}
// Convert module ID to internal VoE channel ID
audioFrame.id_ = VoEChannelId(audioFrame.id_);
audioFrame->id_ = VoEChannelId(audioFrame->id_);
// Store speech type for dead-or-alive detection
_outputSpeechType = audioFrame.speech_type_;
_outputSpeechType = audioFrame->speech_type_;
ChannelState::State state = channel_state_.Get();
if (state.rx_apm_is_enabled) {
int err = rx_audioproc_->ProcessStream(&audioFrame);
int err = rx_audioproc_->ProcessStream(audioFrame);
if (err) {
LOG(LS_ERROR) << "ProcessStream() error: " << err;
assert(false);
@ -497,7 +497,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
// Output volume scaling
if (output_gain < 0.99f || output_gain > 1.01f)
{
AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
AudioFrameOperations::ScaleWithSat(output_gain, *audioFrame);
}
// Scale left and/or right channel(s) if stereo and master balance is
@ -505,39 +505,39 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
if (left_pan != 1.0f || right_pan != 1.0f)
{
if (audioFrame.num_channels_ == 1)
if (audioFrame->num_channels_ == 1)
{
// Emulate stereo mode since panning is active.
// The mono signal is copied to both left and right channels here.
AudioFrameOperations::MonoToStereo(&audioFrame);
AudioFrameOperations::MonoToStereo(audioFrame);
}
// For true stereo mode (when we are receiving a stereo signal), no
// action is needed.
// Do the panning operation (the audio frame contains stereo at this
// stage)
AudioFrameOperations::Scale(left_pan, right_pan, audioFrame);
AudioFrameOperations::Scale(left_pan, right_pan, *audioFrame);
}
// Mix decoded PCM output with file if file mixing is enabled
if (state.output_file_playing)
{
MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_);
}
// External media
if (_outputExternalMedia)
{
CriticalSectionScoped cs(&_callbackCritSect);
const bool isStereo = (audioFrame.num_channels_ == 2);
const bool isStereo = (audioFrame->num_channels_ == 2);
if (_outputExternalMediaCallbackPtr)
{
_outputExternalMediaCallbackPtr->Process(
_channelId,
kPlaybackPerChannel,
(int16_t*)audioFrame.data_,
audioFrame.samples_per_channel_,
audioFrame.sample_rate_hz_,
(int16_t*)audioFrame->data_,
audioFrame->samples_per_channel_,
audioFrame->sample_rate_hz_,
isStereo);
}
}
@ -548,16 +548,16 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
if (_outputFileRecording && _outputFileRecorderPtr)
{
_outputFileRecorderPtr->RecordAudioToFile(audioFrame);
_outputFileRecorderPtr->RecordAudioToFile(*audioFrame);
}
}
// Measure audio level (0-9)
_outputAudioLevel.ComputeLevel(audioFrame);
_outputAudioLevel.ComputeLevel(*audioFrame);
if (capture_start_rtp_time_stamp_ < 0 && audioFrame.timestamp_ != 0) {
if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) {
// The first frame with a valid rtp timestamp.
capture_start_rtp_time_stamp_ = audioFrame.timestamp_;
capture_start_rtp_time_stamp_ = audioFrame->timestamp_;
}
if (capture_start_rtp_time_stamp_ >= 0) {
@ -565,22 +565,22 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
// Compute elapsed time.
int64_t unwrap_timestamp =
rtp_ts_wraparound_handler_->Unwrap(audioFrame.timestamp_);
audioFrame.elapsed_time_ms_ =
rtp_ts_wraparound_handler_->Unwrap(audioFrame->timestamp_);
audioFrame->elapsed_time_ms_ =
(unwrap_timestamp - capture_start_rtp_time_stamp_) /
(GetPlayoutFrequency() / 1000);
{
CriticalSectionScoped lock(ts_stats_lock_.get());
// Compute ntp time.
audioFrame.ntp_time_ms_ = ntp_estimator_.Estimate(
audioFrame.timestamp_);
audioFrame->ntp_time_ms_ = ntp_estimator_.Estimate(
audioFrame->timestamp_);
// |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
if (audioFrame.ntp_time_ms_ > 0) {
if (audioFrame->ntp_time_ms_ > 0) {
// Compute |capture_start_ntp_time_ms_| so that
// |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
capture_start_ntp_time_ms_ =
audioFrame.ntp_time_ms_ - audioFrame.elapsed_time_ms_;
audioFrame->ntp_time_ms_ - audioFrame->elapsed_time_ms_;
}
}
}
@ -589,7 +589,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
}
int32_t
Channel::NeededFrequency(int32_t id)
Channel::NeededFrequency(int32_t id) const
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::NeededFrequency(id=%d)", id);

View File

@ -379,8 +379,8 @@ public:
int SendRTCPPacket(int /*channel*/, const void* data, size_t len) override;
// From MixerParticipant
int32_t GetAudioFrame(int32_t id, AudioFrame& audioFrame) override;
int32_t NeededFrequency(int32_t id) override;
int32_t GetAudioFrame(int32_t id, AudioFrame* audioFrame) override;
int32_t NeededFrequency(int32_t id) const override;
// From FileCallback
void PlayNotification(int32_t id, uint32_t durationMs) override;

View File

@ -108,7 +108,7 @@ OutputMixer::OutputMixer(uint32_t instanceId) :
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::OutputMixer() - ctor");
if (_mixerModule.RegisterMixedStreamCallback(*this) == -1)
if (_mixerModule.RegisterMixedStreamCallback(this) == -1)
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::OutputMixer() failed to register mixer"
@ -215,14 +215,14 @@ int32_t
OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
bool mixable)
{
return _mixerModule.SetMixabilityStatus(participant, mixable);
return _mixerModule.SetMixabilityStatus(&participant, mixable);
}
int32_t
OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant,
bool mixable)
{
return _mixerModule.SetAnonymousMixabilityStatus(participant,mixable);
return _mixerModule.SetAnonymousMixabilityStatus(&participant, mixable);
}
int32_t

View File

@ -158,7 +158,7 @@ int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
}
frame->sample_rate_hz_ =
desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz;
return channelPtr->GetAudioFrame(channel, *frame);
return channelPtr->GetAudioFrame(channel, frame);
}
int VoEExternalMediaImpl::SetExternalMixing(int channel, bool enable) {