Cleanup of the mixer interface.

This implements some of the suggestions in https://codereview.webrtc.org/2386383003/, namely

* Removing anonymous mixing.
* Removing the volume meter.

BUG=webrtc:6346

Review-Url: https://codereview.webrtc.org/2402283003
Cr-Commit-Position: refs/heads/master@{#14609}
This commit is contained in:
aleloi
2016-10-12 03:06:09 -07:00
committed by Commit bot
parent 73a28ee066
commit e97974d203
4 changed files with 60 additions and 379 deletions

View File

@ -13,7 +13,6 @@
#include <memory>
#include "webrtc/modules/include/module.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@ -21,14 +20,6 @@ namespace webrtc {
class AudioMixer {
public:
static const int kMaximumAmountOfMixedAudioSources = 3;
enum Frequency {
kNbInHz = 8000,
kWbInHz = 16000,
kSwbInHz = 32000,
kFbInHz = 48000,
kDefaultFrequency = kWbInHz
};
// A callback class that all mixer participants must inherit from/implement.
class Source {
public:
@ -54,52 +45,26 @@ class AudioMixer {
// AudioFrame pointer at any time until the next call to
// GetAudioFrameWithInfo, or until the source is removed from the
// mixer.
virtual AudioFrameWithInfo GetAudioFrameWithInfo(int32_t id,
int sample_rate_hz) = 0;
virtual AudioFrameWithInfo GetAudioFrameWithInfo(int sample_rate_hz) = 0;
protected:
virtual ~Source() {}
};
// Factory method. Constructor disabled.
static std::unique_ptr<AudioMixer> Create(int id);
static std::unique_ptr<AudioMixer> Create();
virtual ~AudioMixer() {}
// Add/remove audio sources as candidates for mixing.
virtual int32_t SetMixabilityStatus(Source* audio_source, bool mixable) = 0;
// Returns true if an audio source is a candidate for mixing.
virtual bool MixabilityStatus(const Source& audio_source) const = 0;
// Inform the mixer that the audio source should always be mixed and not
// count toward the number of mixed audio sources. Note that an audio source
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
virtual int32_t SetAnonymousMixabilityStatus(Source* audio_source,
bool mixable) = 0;
// Performs mixing by asking registered audio sources for audio. The
// mixed result is placed in the provided AudioFrame. Can only be
// mixed result is placed in the provided AudioFrame. Will only be
// called from a single thread. The rate and channels arguments
// specify the rate and number of channels of the mix result.
virtual void Mix(int sample_rate,
virtual void Mix(int sample_rate_hz,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) = 0;
// Returns true if the audio source is mixed anonymously.
virtual bool AnonymousMixabilityStatus(const Source& audio_source) const = 0;
// Output level functions for VoEVolumeControl. Return value
// between 0 and 9 is returned by voe::AudioLevel.
virtual int GetOutputAudioLevel() = 0;
// Return value between 0 and 0x7fff is returned by voe::AudioLevel.
virtual int GetOutputAudioLevelFullRange() = 0;
protected:
AudioMixer() {}
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioMixer);
};
} // namespace webrtc

View File

@ -17,7 +17,6 @@
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
namespace {
@ -81,12 +80,10 @@ void RampAndUpdateGain(
// Mix the AudioFrames stored in audioFrameList into mixed_audio.
int32_t MixFromList(AudioFrame* mixed_audio,
const AudioFrameList& audio_frame_list,
int32_t id,
bool use_limiter) {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
"MixFromList(mixed_audio, audio_frame_list)");
if (audio_frame_list.empty())
if (audio_frame_list.empty()) {
return 0;
}
if (audio_frame_list.size() == 1) {
mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_;
@ -128,6 +125,7 @@ AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList(
});
}
// TODO(aleloi): remove non-const version when WEBRTC only supports modern STL.
AudioMixerImpl::SourceStatusList::iterator FindSourceInList(
AudioMixerImpl::Source const* audio_source,
AudioMixerImpl::SourceStatusList* audio_source_list) {
@ -139,14 +137,12 @@ AudioMixerImpl::SourceStatusList::iterator FindSourceInList(
} // namespace
std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
return AudioMixerImpl::Create(id);
std::unique_ptr<AudioMixer> AudioMixer::Create() {
return AudioMixerImpl::Create();
}
AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter)
: id_(id),
audio_source_list_(),
additional_audio_source_list_(),
AudioMixerImpl::AudioMixerImpl(std::unique_ptr<AudioProcessing> limiter)
: audio_source_list_(),
num_mixed_audio_sources_(0),
use_limiter_(true),
time_stamp_(0),
@ -157,34 +153,41 @@ AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter)
AudioMixerImpl::~AudioMixerImpl() {}
std::unique_ptr<AudioMixerImpl> AudioMixerImpl::Create(int id) {
std::unique_ptr<AudioMixerImpl> AudioMixerImpl::Create() {
Config config;
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
if (!limiter.get())
if (!limiter.get()) {
return nullptr;
}
if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
limiter->kNoError)
limiter->kNoError) {
return nullptr;
}
// We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
// divide-by-2 but -7 is used instead to give a bit of headroom since the
// AGC is not a hard limiter.
if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError)
if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) {
return nullptr;
}
if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError)
if (limiter->gain_control()->set_compression_gain_db(0) !=
limiter->kNoError) {
return nullptr;
}
if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError)
if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) {
return nullptr;
}
if (limiter->gain_control()->Enable(true) != limiter->kNoError)
if (limiter->gain_control()->Enable(true) != limiter->kNoError) {
return nullptr;
}
return std::unique_ptr<AudioMixerImpl>(
new AudioMixerImpl(id, std::move(limiter)));
new AudioMixerImpl(std::move(limiter)));
}
void AudioMixerImpl::Mix(int sample_rate,
@ -193,31 +196,18 @@ void AudioMixerImpl::Mix(int sample_rate,
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
RTC_DCHECK_RUN_ON(&thread_checker_);
if (sample_rate != kNbInHz && sample_rate != kWbInHz &&
sample_rate != kSwbInHz && sample_rate != kFbInHz) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
"Invalid frequency: %d", sample_rate);
RTC_NOTREACHED();
return;
}
if (OutputFrequency() != sample_rate) {
SetOutputFrequency(static_cast<Frequency>(sample_rate));
SetOutputFrequency(sample_rate);
}
AudioFrameList mix_list;
AudioFrameList anonymous_mix_list;
size_t num_mixed_audio_sources;
{
rtc::CritScope lock(&crit_);
mix_list = GetNonAnonymousAudio();
anonymous_mix_list = GetAnonymousAudio();
num_mixed_audio_sources = num_mixed_audio_sources_;
}
mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
anonymous_mix_list.end());
for (const auto& frame : mix_list) {
RemixFrame(number_of_channels, frame);
}
@ -231,7 +221,7 @@ void AudioMixerImpl::Mix(int sample_rate,
use_limiter_ = num_mixed_audio_sources > 1;
// We only use the limiter if we're actually mixing multiple streams.
MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_);
MixFromList(audio_frame_for_mixing, mix_list, use_limiter_);
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
@ -242,40 +232,28 @@ void AudioMixerImpl::Mix(int sample_rate,
LimitMixedAudio(audio_frame_for_mixing);
}
// Pass the final result to the level indicator.
audio_level_.ComputeLevel(*audio_frame_for_mixing);
return;
}
int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
void AudioMixerImpl::SetOutputFrequency(int frequency) {
RTC_DCHECK_RUN_ON(&thread_checker_);
output_frequency_ = frequency;
sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000;
return 0;
}
AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
int AudioMixerImpl::OutputFrequency() const {
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_frequency_;
}
int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
bool mixable) {
if (!mixable) {
// Anonymous audio sources are in a separate list. Make sure that the
// audio source is in the _audioSourceList if it is being mixed.
SetAnonymousMixabilityStatus(audio_source, false);
}
{
rtc::CritScope lock(&crit_);
const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) !=
audio_source_list_.end();
// API must be called with a new state.
if (!(mixable ^ is_mixed)) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
"Mixable is aready %s", is_mixed ? "ON" : "off");
return -1;
}
bool success = false;
@ -285,8 +263,6 @@ int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
}
if (!success) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
"failed to %s audio_source", mixable ? "add" : "remove");
RTC_NOTREACHED();
return -1;
}
@ -295,64 +271,15 @@ int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) {
num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources;
}
num_mixed_audio_sources_ =
num_mixed_non_anonymous + additional_audio_source_list_.size();
num_mixed_audio_sources_ = num_mixed_non_anonymous;
}
return 0;
}
bool AudioMixerImpl::MixabilityStatus(const Source& audio_source) const {
rtc::CritScope lock(&crit_);
return FindSourceInList(&audio_source, &audio_source_list_) !=
audio_source_list_.end();
}
int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(Source* audio_source,
bool anonymous) {
rtc::CritScope lock(&crit_);
if (FindSourceInList(audio_source, &additional_audio_source_list_) !=
additional_audio_source_list_.end()) {
if (anonymous) {
return 0;
}
if (!RemoveAudioSourceFromList(audio_source,
&additional_audio_source_list_)) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
"unable to remove audio_source from anonymous list");
RTC_NOTREACHED();
return -1;
}
return AddAudioSourceToList(audio_source, &audio_source_list_) ? 0 : -1;
}
if (!anonymous) {
return 0;
}
const bool mixable =
RemoveAudioSourceFromList(audio_source, &audio_source_list_);
if (!mixable) {
WEBRTC_TRACE(
kTraceWarning, kTraceAudioMixerServer, id_,
"audio_source must be registered before turning it into anonymous");
// Setting anonymous status is only possible if MixerAudioSource is
// already registered.
return -1;
}
return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
? 0
: -1;
}
bool AudioMixerImpl::AnonymousMixabilityStatus(
const Source& audio_source) const {
rtc::CritScope lock(&crit_);
return FindSourceInList(&audio_source, &additional_audio_source_list_) !=
additional_audio_source_list_.end();
}
AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
RTC_DCHECK_RUN_ON(&thread_checker_);
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"GetNonAnonymousAudio()");
AudioFrameList result;
std::vector<SourceFrame> audio_source_mixing_data_list;
std::vector<SourceFrame> ramp_list;
@ -361,14 +288,13 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
for (auto& source_and_status : audio_source_list_) {
auto audio_frame_with_info =
source_and_status.audio_source->GetAudioFrameWithInfo(
id_, static_cast<int>(OutputFrequency()));
static_cast<int>(OutputFrequency()));
const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
if (audio_frame_info == Source::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
"failed to GetAudioFrameWithMuted() from source");
LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source";
continue;
}
audio_source_mixing_data_list.emplace_back(
@ -404,38 +330,9 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
return result;
}
AudioFrameList AudioMixerImpl::GetAnonymousAudio() {
RTC_DCHECK_RUN_ON(&thread_checker_);
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"GetAnonymousAudio()");
std::vector<SourceFrame> ramp_list;
AudioFrameList result;
for (auto& source_and_status : additional_audio_source_list_) {
const auto audio_frame_with_info =
source_and_status.audio_source->GetAudioFrameWithInfo(
id_, OutputFrequency());
const auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
if (ret == Source::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
"failed to GetAudioFrameWithMuted() from audio_source");
continue;
}
if (ret != Source::AudioFrameInfo::kMuted) {
result.push_back(audio_frame);
ramp_list.emplace_back(&source_and_status, audio_frame, false, 0);
source_and_status.is_mixed = true;
}
}
RampAndUpdateGain(ramp_list);
return result;
}
bool AudioMixerImpl::AddAudioSourceToList(
Source* audio_source,
SourceStatusList* audio_source_list) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"AddAudioSourceToList(audio_source, audio_source_list)");
audio_source_list->emplace_back(audio_source, false, 0);
return true;
}
@ -443,8 +340,6 @@ bool AudioMixerImpl::AddAudioSourceToList(
bool AudioMixerImpl::RemoveAudioSourceFromList(
Source* audio_source,
SourceStatusList* audio_source_list) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"RemoveAudioSourceFromList(audio_source, audio_source_list)");
const auto iter = FindSourceInList(audio_source, audio_source_list);
if (iter != audio_source_list->end()) {
audio_source_list->erase(iter);
@ -476,32 +371,15 @@ bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const {
*mixed_audio += *mixed_audio;
if (error != limiter_->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
"Error from AudioProcessing: %d", error);
LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;
RTC_NOTREACHED();
return false;
}
return true;
}
int AudioMixerImpl::GetOutputAudioLevel() {
RTC_DCHECK_RUN_ON(&thread_checker_);
const int level = audio_level_.Level();
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
"GetAudioOutputLevel() => level=%d", level);
return level;
}
int AudioMixerImpl::GetOutputAudioLevelFullRange() {
RTC_DCHECK_RUN_ON(&thread_checker_);
const int level = audio_level_.LevelFullRange();
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
"GetAudioOutputLevelFullRange() => level=%d", level);
return level;
}
bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
AudioMixerImpl::Source* audio_source) {
AudioMixerImpl::Source* audio_source) const {
RTC_DCHECK_RUN_ON(&thread_checker_);
rtc::CritScope lock(&crit_);
@ -511,12 +389,6 @@ bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
return non_anonymous_iter->is_mixed;
}
const auto anonymous_iter =
FindSourceInList(audio_source, &additional_audio_source_list_);
if (anonymous_iter != audio_source_list_.end()) {
return anonymous_iter->is_mixed;
}
LOG(LS_ERROR) << "Audio source unknown";
return false;
}

View File

@ -11,7 +11,6 @@
#ifndef WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#define WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#include <map>
#include <memory>
#include <vector>
@ -42,41 +41,35 @@ class AudioMixerImpl : public AudioMixer {
// AudioProcessing only accepts 10 ms frames.
static const int kFrameDurationInMs = 10;
static const int kDefaultFrequency = 48000;
static std::unique_ptr<AudioMixerImpl> Create(int id);
static std::unique_ptr<AudioMixerImpl> Create();
~AudioMixerImpl() override;
// AudioMixer functions
int32_t SetMixabilityStatus(Source* audio_source, bool mixable) override;
bool MixabilityStatus(const Source& audio_source) const override;
int32_t SetAnonymousMixabilityStatus(Source* audio_source,
bool mixable) override;
void Mix(int sample_rate,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) override;
bool AnonymousMixabilityStatus(const Source& audio_source) const override;
// Returns true if the source was mixed last round. Returns
// false and logs an error if the source was never added to the
// mixer.
bool GetAudioSourceMixabilityStatusForTest(Source* audio_source);
bool GetAudioSourceMixabilityStatusForTest(Source* audio_source) const;
private:
AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter);
explicit AudioMixerImpl(std::unique_ptr<AudioProcessing> limiter);
// Set/get mix frequency
int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
void SetOutputFrequency(int frequency);
int OutputFrequency() const;
// Compute what audio sources to mix from audio_source_list_. Ramp
// in and out. Update mixed status. Mixes up to
// kMaximumAmountOfMixedAudioSources audio sources.
AudioFrameList GetNonAnonymousAudio() EXCLUSIVE_LOCKS_REQUIRED(crit_);
// Return the AudioFrames that should be mixed anonymously. Ramp in
// and out. Update mixed status.
AudioFrameList GetAnonymousAudio() EXCLUSIVE_LOCKS_REQUIRED(crit_);
// Add/remove the MixerAudioSource to the specified
// MixerAudioSource list.
@ -87,25 +80,16 @@ class AudioMixerImpl : public AudioMixer {
bool LimitMixedAudio(AudioFrame* mixed_audio) const;
// Output level functions for VoEVolumeControl.
int GetOutputAudioLevel() override;
int GetOutputAudioLevelFullRange() override;
rtc::CriticalSection crit_;
const int32_t id_;
// The current sample frequency and sample size when mixing.
Frequency output_frequency_ ACCESS_ON(&thread_checker_);
int output_frequency_ ACCESS_ON(&thread_checker_);
size_t sample_size_ ACCESS_ON(&thread_checker_);
// List of all audio sources. Note all lists are disjunct
SourceStatusList audio_source_list_ GUARDED_BY(crit_); // May be mixed.
// Always mixed, anonymously.
SourceStatusList additional_audio_source_list_ GUARDED_BY(crit_);
size_t num_mixed_audio_sources_ GUARDED_BY(crit_);
// Determines if we will use a limiter for clipping protection during
// mixing.
@ -119,9 +103,6 @@ class AudioMixerImpl : public AudioMixer {
// Used for inhibiting saturation in mixing.
std::unique_ptr<AudioProcessing> limiter_ ACCESS_ON(&thread_checker_);
// Measures audio level for the combined signal.
voe::AudioLevel audio_level_ ACCESS_ON(&thread_checker_);
RTC_DISALLOW_COPY_AND_ASSIGN(AudioMixerImpl);
};
} // namespace webrtc

View File

@ -52,13 +52,12 @@ class MockMixerAudioSource : public AudioMixer::Source {
public:
MockMixerAudioSource()
: fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) {
ON_CALL(*this, GetAudioFrameWithInfo(_, _))
ON_CALL(*this, GetAudioFrameWithInfo(_))
.WillByDefault(
Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo));
}
MOCK_METHOD2(GetAudioFrameWithInfo,
AudioFrameWithInfo(const int32_t id, int sample_rate_hz));
MOCK_METHOD1(GetAudioFrameWithInfo, AudioFrameWithInfo(int sample_rate_hz));
AudioFrame* fake_frame() { return &fake_frame_; }
AudioFrameInfo fake_info() { return fake_audio_frame_info_; }
@ -69,8 +68,7 @@ class MockMixerAudioSource : public AudioMixer::Source {
private:
AudioFrame fake_frame_, fake_output_frame_;
AudioFrameInfo fake_audio_frame_info_;
AudioFrameWithInfo FakeAudioFrameWithInfo(const int32_t id,
int sample_rate_hz) {
AudioFrameWithInfo FakeAudioFrameWithInfo(int sample_rate_hz) {
fake_output_frame_.CopyFrom(fake_frame_);
return {
&fake_output_frame_, // audio_frame_pointer
@ -89,7 +87,7 @@ void MixAndCompare(
RTC_DCHECK(frames.size() == frame_info.size());
RTC_DCHECK(frame_info.size() == expected_status.size());
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create(kId));
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create());
std::vector<MockMixerAudioSource> participants(num_audio_sources);
for (int i = 0; i < num_audio_sources; i++) {
@ -99,7 +97,7 @@ void MixAndCompare(
for (int i = 0; i < num_audio_sources; i++) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz))
.Times(Exactly(1));
}
@ -112,62 +110,11 @@ void MixAndCompare(
}
}
TEST(AudioMixer, AnonymousAndNamed) {
// Should not matter even if partipants are more than
// kMaximumAmountOfMixedAudioSources.
constexpr int kNamed = AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
constexpr int kAnonymous = AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource named[kNamed];
MockMixerAudioSource anonymous[kAnonymous];
for (int i = 0; i < kNamed; ++i) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true));
EXPECT_TRUE(mixer->MixabilityStatus(named[i]));
}
for (int i = 0; i < kAnonymous; ++i) {
// AudioSource must be registered before turning it into anonymous.
EXPECT_EQ(-1, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[i], true));
EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
EXPECT_TRUE(mixer->AnonymousMixabilityStatus(anonymous[i]));
// Anonymous participants do not show status by MixabilityStatus.
EXPECT_FALSE(mixer->MixabilityStatus(anonymous[i]));
}
for (int i = 0; i < kNamed; ++i) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], false));
EXPECT_FALSE(mixer->MixabilityStatus(named[i]));
}
for (int i = 0; i < kAnonymous - 1; i++) {
EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], false));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
// SetAnonymousMixabilityStatus(anonymous, false) moves anonymous to the
// named group.
EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
}
// SetMixabilityStatus(anonymous, false) will remove anonymous from both
// anonymous and named groups.
EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false));
EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1]));
EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1]));
}
TEST(AudioMixer, LargestEnergyVadActiveMixed) {
constexpr int kAudioSources =
AudioMixer::kMaximumAmountOfMixedAudioSources + 3;
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create(kId));
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create());
MockMixerAudioSource participants[kAudioSources];
@ -179,7 +126,7 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
participants[i].fake_frame()->data_[80] = i;
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1));
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_)).Times(Exactly(1));
}
// Last participant gives audio frame with passive VAD, although it has the
@ -207,7 +154,7 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
}
TEST(AudioMixer, FrameNotModifiedForSingleParticipant) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create());
MockMixerAudioSource participant;
@ -220,36 +167,7 @@ TEST(AudioMixer, FrameNotModifiedForSingleParticipant) {
}
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2));
AudioFrame audio_frame;
// Two mix iteration to compare after the ramp-up step.
for (int i = 0; i < 2; i++) {
mixer->Mix(kDefaultSampleRateHz,
1, // number of channels
&audio_frame);
}
EXPECT_EQ(
0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples));
}
TEST(AudioMixer, FrameNotModifiedForSingleAnonymousParticipant) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
const int n_samples = participant.fake_frame()->samples_per_channel_;
// Modify the frame so that it's not zero.
for (int j = 0; j < n_samples; j++) {
participant.fake_frame()->data_[j] = j;
}
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&participant, true));
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2));
EXPECT_CALL(participant, GetAudioFrameWithInfo(_)).Times(Exactly(2));
AudioFrame audio_frame;
// Two mix iteration to compare after the ramp-up step.
@ -264,14 +182,14 @@ TEST(AudioMixer, FrameNotModifiedForSingleAnonymousParticipant) {
}
TEST(AudioMixer, ParticipantSampleRate) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create());
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
for (auto frequency : {8000, 16000, 32000, 48000}) {
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, frequency))
EXPECT_CALL(participant, GetAudioFrameWithInfo(frequency))
.Times(Exactly(1));
participant.fake_frame()->sample_rate_hz_ = frequency;
participant.fake_frame()->samples_per_channel_ = frequency / 100;
@ -281,77 +199,27 @@ TEST(AudioMixer, ParticipantSampleRate) {
}
TEST(AudioMixer, ParticipantNumberOfChannels) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create());
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
for (size_t number_of_channels : {1, 2}) {
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz))
.Times(Exactly(1));
mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing);
EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_);
}
}
// Test that the volume is reported as zero when the mixer input
// comprises only zero values.
TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
for (int i = 0; i < 11; i++) {
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
.Times(Exactly(1));
mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing);
}
EXPECT_EQ(0, mixer->GetOutputAudioLevel());
EXPECT_EQ(0, mixer->GetOutputAudioLevelFullRange());
}
// Test that the reported volume is maximal when the mixer
// input comprises frames with maximal values.
TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) {
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
// Fill participant frame data with maximal sound.
std::fill(participant.fake_frame()->data_,
participant.fake_frame()->data_ + kDefaultSampleRateHz / 100,
std::numeric_limits<int16_t>::max());
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true));
// We do >10 iterations, because the audio level indicator only
// updates once every 10 calls.
for (int i = 0; i < 11; i++) {
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
.Times(Exactly(1));
mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing);
}
// 9 is the highest possible audio level
EXPECT_EQ(9, mixer->GetOutputAudioLevel());
// 0x7fff = 32767 is the highest full range audio level.
EXPECT_EQ(std::numeric_limits<int16_t>::max(),
mixer->GetOutputAudioLevelFullRange());
}
// Maximal amount of participants are mixed one iteration, then
// another participant with higher energy is added.
TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
constexpr int kAudioSources =
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create(kId));
const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create());
MockMixerAudioSource participants[kAudioSources];
for (int i = 0; i < kAudioSources; i++) {
@ -364,7 +232,7 @@ TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
// Add all participants but the loudest for mixing.
for (int i = 0; i < kAudioSources - 1; i++) {
EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz))
.Times(Exactly(1));
}
@ -381,7 +249,7 @@ TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
EXPECT_EQ(0,
mixer->SetMixabilityStatus(&participants[kAudioSources - 1], true));
for (int i = 0; i < kAudioSources; i++) {
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz))
.Times(Exactly(1));
}
@ -407,7 +275,7 @@ TEST(AudioMixer, ConstructFromOtherThread) {
init_thread->Start();
std::unique_ptr<AudioMixer> mixer(
init_thread->Invoke<std::unique_ptr<AudioMixer>>(
RTC_FROM_HERE, std::bind(&AudioMixer::Create, kId)));
RTC_FROM_HERE, &AudioMixer::Create));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
@ -417,12 +285,7 @@ TEST(AudioMixer, ConstructFromOtherThread) {
RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetMixabilityStatus,
mixer.get(), &participant, true)));
EXPECT_EQ(
0, participant_thread->Invoke<int>(
RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetAnonymousMixabilityStatus,
mixer.get(), &participant, true)));
EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz))
EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz))
.Times(Exactly(1));
// Do one mixer iteration