Move dynamic memory allocations of webrtc::AudioMixerImpl from RT thead

(4 vector allocations removed)

Bug: webrtc:12035,webrtc:12036
Change-Id: Ie0d734cd0016a27c57809af67187ceb97f92f233
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/188621
Commit-Queue: Olga Sharonova <olka@webrtc.org>
Reviewed-by: Alessio Bazzica <alessiob@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Reviewed-by: Per Åhgren <peah@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32441}
This commit is contained in:
Olga Sharonova
2020-10-19 14:23:46 +02:00
committed by Commit Bot
parent d40c764ba8
commit 0607c962c1
8 changed files with 121 additions and 95 deletions

View File

@ -24,9 +24,23 @@
#include "rtc_base/ref_counted_object.h" #include "rtc_base/ref_counted_object.h"
namespace webrtc { namespace webrtc {
struct AudioMixerImpl::SourceStatus {
SourceStatus(Source* audio_source, bool is_mixed, float gain)
: audio_source(audio_source), is_mixed(is_mixed), gain(gain) {}
Source* audio_source = nullptr;
bool is_mixed = false;
float gain = 0.0f;
// A frame that will be passed to audio_source->GetAudioFrameWithInfo.
AudioFrame audio_frame;
};
namespace { namespace {
struct SourceFrame { struct SourceFrame {
SourceFrame() = default;
SourceFrame(AudioMixerImpl::SourceStatus* source_status, SourceFrame(AudioMixerImpl::SourceStatus* source_status,
AudioFrame* audio_frame, AudioFrame* audio_frame,
bool muted) bool muted)
@ -57,6 +71,7 @@ struct SourceFrame {
}; };
// ShouldMixBefore(a, b) is used to select mixer sources. // ShouldMixBefore(a, b) is used to select mixer sources.
// Returns true if `a` is preferred over `b` as a source to be mixed.
bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) { bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
if (a.muted != b.muted) { if (a.muted != b.muted) {
return b.muted; return b.muted;
@ -73,7 +88,7 @@ bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
} }
void RampAndUpdateGain( void RampAndUpdateGain(
const std::vector<SourceFrame>& mixed_sources_and_frames) { rtc::ArrayView<const SourceFrame> mixed_sources_and_frames) {
for (const auto& source_frame : mixed_sources_and_frames) { for (const auto& source_frame : mixed_sources_and_frames) {
float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f; float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f;
Ramp(source_frame.source_status->gain, target_gain, Ramp(source_frame.source_status->gain, target_gain,
@ -82,9 +97,11 @@ void RampAndUpdateGain(
} }
} }
AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList( std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>>::const_iterator
FindSourceInList(
AudioMixerImpl::Source const* audio_source, AudioMixerImpl::Source const* audio_source,
AudioMixerImpl::SourceStatusList const* audio_source_list) { std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>> const*
audio_source_list) {
return std::find_if( return std::find_if(
audio_source_list->begin(), audio_source_list->end(), audio_source_list->begin(), audio_source_list->end(),
[audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) { [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) {
@ -93,14 +110,31 @@ AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList(
} }
} // namespace } // namespace
struct AudioMixerImpl::HelperContainers {
void resize(size_t size) {
audio_to_mix.resize(size);
audio_source_mixing_data_list.resize(size);
ramp_list.resize(size);
preferred_rates.resize(size);
}
std::vector<AudioFrame*> audio_to_mix;
std::vector<SourceFrame> audio_source_mixing_data_list;
std::vector<SourceFrame> ramp_list;
std::vector<int> preferred_rates;
};
AudioMixerImpl::AudioMixerImpl( AudioMixerImpl::AudioMixerImpl(
std::unique_ptr<OutputRateCalculator> output_rate_calculator, std::unique_ptr<OutputRateCalculator> output_rate_calculator,
bool use_limiter) bool use_limiter)
: output_rate_calculator_(std::move(output_rate_calculator)), : output_rate_calculator_(std::move(output_rate_calculator)),
output_frequency_(0),
sample_size_(0),
audio_source_list_(), audio_source_list_(),
frame_combiner_(use_limiter) {} helper_containers_(std::make_unique<HelperContainers>()),
frame_combiner_(use_limiter) {
const int kTypicalMaxNumberOfMixedStreams = 3;
audio_source_list_.reserve(kTypicalMaxNumberOfMixedStreams);
helper_containers_->resize(kTypicalMaxNumberOfMixedStreams);
}
AudioMixerImpl::~AudioMixerImpl() {} AudioMixerImpl::~AudioMixerImpl() {}
@ -121,40 +155,23 @@ rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
void AudioMixerImpl::Mix(size_t number_of_channels, void AudioMixerImpl::Mix(size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) { AudioFrame* audio_frame_for_mixing) {
RTC_DCHECK(number_of_channels >= 1); RTC_DCHECK(number_of_channels >= 1);
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
CalculateOutputFrequency();
{
MutexLock lock(&mutex_);
const size_t number_of_streams = audio_source_list_.size();
frame_combiner_.Combine(GetAudioFromSources(), number_of_channels,
OutputFrequency(), number_of_streams,
audio_frame_for_mixing);
}
return;
}
void AudioMixerImpl::CalculateOutputFrequency() {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
std::vector<int> preferred_rates; size_t number_of_streams = audio_source_list_.size();
std::transform(audio_source_list_.begin(), audio_source_list_.end(), std::transform(audio_source_list_.begin(), audio_source_list_.end(),
std::back_inserter(preferred_rates), helper_containers_->preferred_rates.begin(),
[&](std::unique_ptr<SourceStatus>& a) { [&](std::unique_ptr<SourceStatus>& a) {
return a->audio_source->PreferredSampleRate(); return a->audio_source->PreferredSampleRate();
}); });
output_frequency_ = int output_frequency = output_rate_calculator_->CalculateOutputRateFromRange(
output_rate_calculator_->CalculateOutputRate(preferred_rates); rtc::ArrayView<const int>(helper_containers_->preferred_rates.data(),
sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; number_of_streams));
}
int AudioMixerImpl::OutputFrequency() const { frame_combiner_.Combine(GetAudioFromSources(output_frequency),
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); number_of_channels, output_frequency,
return output_frequency_; number_of_streams, audio_frame_for_mixing);
} }
bool AudioMixerImpl::AddSource(Source* audio_source) { bool AudioMixerImpl::AddSource(Source* audio_source) {
@ -164,6 +181,7 @@ bool AudioMixerImpl::AddSource(Source* audio_source) {
audio_source_list_.end()) audio_source_list_.end())
<< "Source already added to mixer"; << "Source already added to mixer";
audio_source_list_.emplace_back(new SourceStatus(audio_source, false, 0)); audio_source_list_.emplace_back(new SourceStatus(audio_source, false, 0));
helper_containers_->resize(audio_source_list_.size());
return true; return true;
} }
@ -175,35 +193,37 @@ void AudioMixerImpl::RemoveSource(Source* audio_source) {
audio_source_list_.erase(iter); audio_source_list_.erase(iter);
} }
AudioFrameList AudioMixerImpl::GetAudioFromSources() { rtc::ArrayView<AudioFrame* const> AudioMixerImpl::GetAudioFromSources(
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); int output_frequency) {
AudioFrameList result;
std::vector<SourceFrame> audio_source_mixing_data_list;
std::vector<SourceFrame> ramp_list;
// Get audio from the audio sources and put it in the SourceFrame vector. // Get audio from the audio sources and put it in the SourceFrame vector.
int audio_source_mixing_data_count = 0;
for (auto& source_and_status : audio_source_list_) { for (auto& source_and_status : audio_source_list_) {
const auto audio_frame_info = const auto audio_frame_info =
source_and_status->audio_source->GetAudioFrameWithInfo( source_and_status->audio_source->GetAudioFrameWithInfo(
OutputFrequency(), &source_and_status->audio_frame); output_frequency, &source_and_status->audio_frame);
if (audio_frame_info == Source::AudioFrameInfo::kError) { if (audio_frame_info == Source::AudioFrameInfo::kError) {
RTC_LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source"; RTC_LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source";
continue; continue;
} }
audio_source_mixing_data_list.emplace_back( helper_containers_
source_and_status.get(), &source_and_status->audio_frame, ->audio_source_mixing_data_list[audio_source_mixing_data_count++] =
audio_frame_info == Source::AudioFrameInfo::kMuted); SourceFrame(source_and_status.get(), &source_and_status->audio_frame,
audio_frame_info == Source::AudioFrameInfo::kMuted);
} }
rtc::ArrayView<SourceFrame> audio_source_mixing_data_view(
helper_containers_->audio_source_mixing_data_list.data(),
audio_source_mixing_data_count);
// Sort frames by sorting function. // Sort frames by sorting function.
std::sort(audio_source_mixing_data_list.begin(), std::sort(audio_source_mixing_data_view.begin(),
audio_source_mixing_data_list.end(), ShouldMixBefore); audio_source_mixing_data_view.end(), ShouldMixBefore);
int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
int ramp_list_lengh = 0;
int audio_to_mix_count = 0;
// Go through list in order and put unmuted frames in result list. // Go through list in order and put unmuted frames in result list.
for (const auto& p : audio_source_mixing_data_list) { for (const auto& p : audio_source_mixing_data_view) {
// Filter muted. // Filter muted.
if (p.muted) { if (p.muted) {
p.source_status->is_mixed = false; p.source_status->is_mixed = false;
@ -214,19 +234,21 @@ AudioFrameList AudioMixerImpl::GetAudioFromSources() {
bool is_mixed = false; bool is_mixed = false;
if (max_audio_frame_counter > 0) { if (max_audio_frame_counter > 0) {
--max_audio_frame_counter; --max_audio_frame_counter;
result.push_back(p.audio_frame); helper_containers_->audio_to_mix[audio_to_mix_count++] = p.audio_frame;
ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1); helper_containers_->ramp_list[ramp_list_lengh++] =
SourceFrame(p.source_status, p.audio_frame, false, -1);
is_mixed = true; is_mixed = true;
} }
p.source_status->is_mixed = is_mixed; p.source_status->is_mixed = is_mixed;
} }
RampAndUpdateGain(ramp_list); RampAndUpdateGain(rtc::ArrayView<SourceFrame>(
return result; helper_containers_->ramp_list.data(), ramp_list_lengh));
return rtc::ArrayView<AudioFrame* const>(
helper_containers_->audio_to_mix.data(), audio_to_mix_count);
} }
bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
AudioMixerImpl::Source* audio_source) const { AudioMixerImpl::Source* audio_source) const {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
const auto iter = FindSourceInList(audio_source, &audio_source_list_); const auto iter = FindSourceInList(audio_source, &audio_source_list_);

View File

@ -16,6 +16,7 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "api/array_view.h"
#include "api/audio/audio_frame.h" #include "api/audio/audio_frame.h"
#include "api/audio/audio_mixer.h" #include "api/audio/audio_mixer.h"
#include "api/scoped_refptr.h" #include "api/scoped_refptr.h"
@ -28,22 +29,9 @@
namespace webrtc { namespace webrtc {
typedef std::vector<AudioFrame*> AudioFrameList;
class AudioMixerImpl : public AudioMixer { class AudioMixerImpl : public AudioMixer {
public: public:
struct SourceStatus { struct SourceStatus;
SourceStatus(Source* audio_source, bool is_mixed, float gain)
: audio_source(audio_source), is_mixed(is_mixed), gain(gain) {}
Source* audio_source = nullptr;
bool is_mixed = false;
float gain = 0.0f;
// A frame that will be passed to audio_source->GetAudioFrameWithInfo.
AudioFrame audio_frame;
};
using SourceStatusList = std::vector<std::unique_ptr<SourceStatus>>;
// AudioProcessing only accepts 10 ms frames. // AudioProcessing only accepts 10 ms frames.
static const int kFrameDurationInMs = 10; static const int kFrameDurationInMs = 10;
@ -75,32 +63,29 @@ class AudioMixerImpl : public AudioMixer {
bool use_limiter); bool use_limiter);
private: private:
// Set mixing frequency through OutputFrequencyCalculator. struct HelperContainers;
void CalculateOutputFrequency();
// Get mixing frequency.
int OutputFrequency() const;
// Compute what audio sources to mix from audio_source_list_. Ramp // Compute what audio sources to mix from audio_source_list_. Ramp
// in and out. Update mixed status. Mixes up to // in and out. Update mixed status. Mixes up to
// kMaximumAmountOfMixedAudioSources audio sources. // kMaximumAmountOfMixedAudioSources audio sources.
AudioFrameList GetAudioFromSources() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); rtc::ArrayView<AudioFrame* const> GetAudioFromSources(int output_frequency)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// The critical section lock guards audio source insertion and // The critical section lock guards audio source insertion and
// removal, which can be done from any thread. The race checker // removal, which can be done from any thread. The race checker
// checks that mixing is done sequentially. // checks that mixing is done sequentially.
mutable Mutex mutex_; mutable Mutex mutex_;
rtc::RaceChecker race_checker_;
std::unique_ptr<OutputRateCalculator> output_rate_calculator_; std::unique_ptr<OutputRateCalculator> output_rate_calculator_;
// The current sample frequency and sample size when mixing.
int output_frequency_ RTC_GUARDED_BY(race_checker_);
size_t sample_size_ RTC_GUARDED_BY(race_checker_);
// List of all audio sources. Note all lists are disjunct // List of all audio sources.
SourceStatusList audio_source_list_ RTC_GUARDED_BY(mutex_); // May be mixed. std::vector<std::unique_ptr<SourceStatus>> audio_source_list_
RTC_GUARDED_BY(mutex_);
const std::unique_ptr<HelperContainers> helper_containers_
RTC_GUARDED_BY(mutex_);
// Component that handles actual adding of audio frames. // Component that handles actual adding of audio frames.
FrameCombiner frame_combiner_ RTC_GUARDED_BY(race_checker_); FrameCombiner frame_combiner_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioMixerImpl); RTC_DISALLOW_COPY_AND_ASSIGN(AudioMixerImpl);
}; };

View File

@ -105,7 +105,8 @@ class MockMixerAudioSource : public ::testing::NiceMock<AudioMixer::Source> {
class CustomRateCalculator : public OutputRateCalculator { class CustomRateCalculator : public OutputRateCalculator {
public: public:
explicit CustomRateCalculator(int rate) : rate_(rate) {} explicit CustomRateCalculator(int rate) : rate_(rate) {}
int CalculateOutputRate(const std::vector<int>& preferred_rates) override { int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_rates) override {
return rate_; return rate_;
} }
@ -598,8 +599,8 @@ TEST(AudioMixer, MultipleChannelsManyParticipants) {
class HighOutputRateCalculator : public OutputRateCalculator { class HighOutputRateCalculator : public OutputRateCalculator {
public: public:
static const int kDefaultFrequency = 76000; static const int kDefaultFrequency = 76000;
int CalculateOutputRate( int CalculateOutputRateFromRange(
const std::vector<int>& preferred_sample_rates) override { rtc::ArrayView<const int> preferred_sample_rates) override {
return kDefaultFrequency; return kDefaultFrequency;
} }
~HighOutputRateCalculator() override {} ~HighOutputRateCalculator() override {}

View File

@ -18,14 +18,14 @@
namespace webrtc { namespace webrtc {
int DefaultOutputRateCalculator::CalculateOutputRate( int DefaultOutputRateCalculator::CalculateOutputRateFromRange(
const std::vector<int>& preferred_sample_rates) { rtc::ArrayView<const int> preferred_sample_rates) {
if (preferred_sample_rates.empty()) { if (preferred_sample_rates.empty()) {
return DefaultOutputRateCalculator::kDefaultFrequency; return DefaultOutputRateCalculator::kDefaultFrequency;
} }
using NativeRate = AudioProcessing::NativeRate; using NativeRate = AudioProcessing::NativeRate;
const int maximal_frequency = *std::max_element( const int maximal_frequency = *std::max_element(
preferred_sample_rates.begin(), preferred_sample_rates.end()); preferred_sample_rates.cbegin(), preferred_sample_rates.cend());
RTC_DCHECK_LE(NativeRate::kSampleRate8kHz, maximal_frequency); RTC_DCHECK_LE(NativeRate::kSampleRate8kHz, maximal_frequency);
RTC_DCHECK_GE(NativeRate::kSampleRate48kHz, maximal_frequency); RTC_DCHECK_GE(NativeRate::kSampleRate48kHz, maximal_frequency);

View File

@ -13,6 +13,7 @@
#include <vector> #include <vector>
#include "api/array_view.h"
#include "modules/audio_mixer/output_rate_calculator.h" #include "modules/audio_mixer/output_rate_calculator.h"
namespace webrtc { namespace webrtc {
@ -25,8 +26,8 @@ class DefaultOutputRateCalculator : public OutputRateCalculator {
// sample rates. A native rate is one in // sample rates. A native rate is one in
// AudioProcessing::NativeRate. If |preferred_sample_rates| is // AudioProcessing::NativeRate. If |preferred_sample_rates| is
// empty, returns |kDefaultFrequency|. // empty, returns |kDefaultFrequency|.
int CalculateOutputRate( int CalculateOutputRateFromRange(
const std::vector<int>& preferred_sample_rates) override; rtc::ArrayView<const int> preferred_sample_rates) override;
~DefaultOutputRateCalculator() override {} ~DefaultOutputRateCalculator() override {}
}; };

View File

@ -35,7 +35,7 @@ using MixingBuffer =
std::array<std::array<float, FrameCombiner::kMaximumChannelSize>, std::array<std::array<float, FrameCombiner::kMaximumChannelSize>,
FrameCombiner::kMaximumNumberOfChannels>; FrameCombiner::kMaximumNumberOfChannels>;
void SetAudioFrameFields(const std::vector<AudioFrame*>& mix_list, void SetAudioFrameFields(rtc::ArrayView<const AudioFrame* const> mix_list,
size_t number_of_channels, size_t number_of_channels,
int sample_rate, int sample_rate,
size_t number_of_streams, size_t number_of_streams,
@ -61,7 +61,7 @@ void SetAudioFrameFields(const std::vector<AudioFrame*>& mix_list,
} }
} }
void MixFewFramesWithNoLimiter(const std::vector<AudioFrame*>& mix_list, void MixFewFramesWithNoLimiter(rtc::ArrayView<const AudioFrame* const> mix_list,
AudioFrame* audio_frame_for_mixing) { AudioFrame* audio_frame_for_mixing) {
if (mix_list.empty()) { if (mix_list.empty()) {
audio_frame_for_mixing->Mute(); audio_frame_for_mixing->Mute();
@ -74,7 +74,7 @@ void MixFewFramesWithNoLimiter(const std::vector<AudioFrame*>& mix_list,
audio_frame_for_mixing->mutable_data()); audio_frame_for_mixing->mutable_data());
} }
void MixToFloatFrame(const std::vector<AudioFrame*>& mix_list, void MixToFloatFrame(rtc::ArrayView<const AudioFrame* const> mix_list,
size_t samples_per_channel, size_t samples_per_channel,
size_t number_of_channels, size_t number_of_channels,
MixingBuffer* mixing_buffer) { MixingBuffer* mixing_buffer) {
@ -140,7 +140,7 @@ FrameCombiner::FrameCombiner(bool use_limiter)
FrameCombiner::~FrameCombiner() = default; FrameCombiner::~FrameCombiner() = default;
void FrameCombiner::Combine(const std::vector<AudioFrame*>& mix_list, void FrameCombiner::Combine(rtc::ArrayView<AudioFrame* const> mix_list,
size_t number_of_channels, size_t number_of_channels,
int sample_rate, int sample_rate,
size_t number_of_streams, size_t number_of_streams,
@ -195,9 +195,10 @@ void FrameCombiner::Combine(const std::vector<AudioFrame*>& mix_list,
InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing); InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
} }
void FrameCombiner::LogMixingStats(const std::vector<AudioFrame*>& mix_list, void FrameCombiner::LogMixingStats(
int sample_rate, rtc::ArrayView<const AudioFrame* const> mix_list,
size_t number_of_streams) const { int sample_rate,
size_t number_of_streams) const {
// Log every second. // Log every second.
uma_logging_counter_++; uma_logging_counter_++;
if (uma_logging_counter_ > 1000 / AudioMixerImpl::kFrameDurationInMs) { if (uma_logging_counter_ > 1000 / AudioMixerImpl::kFrameDurationInMs) {

View File

@ -14,6 +14,7 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "api/array_view.h"
#include "api/audio/audio_frame.h" #include "api/audio/audio_frame.h"
#include "modules/audio_processing/agc2/limiter.h" #include "modules/audio_processing/agc2/limiter.h"
@ -32,7 +33,7 @@ class FrameCombiner {
// because 'mix_list' can be empty. The parameter // because 'mix_list' can be empty. The parameter
// 'number_of_streams' is used for determining whether to pass the // 'number_of_streams' is used for determining whether to pass the
// data through a limiter. // data through a limiter.
void Combine(const std::vector<AudioFrame*>& mix_list, void Combine(rtc::ArrayView<AudioFrame* const> mix_list,
size_t number_of_channels, size_t number_of_channels,
int sample_rate, int sample_rate,
size_t number_of_streams, size_t number_of_streams,
@ -46,7 +47,7 @@ class FrameCombiner {
kMaximumNumberOfChannels>; kMaximumNumberOfChannels>;
private: private:
void LogMixingStats(const std::vector<AudioFrame*>& mix_list, void LogMixingStats(rtc::ArrayView<const AudioFrame* const> mix_list,
int sample_rate, int sample_rate,
size_t number_of_streams) const; size_t number_of_streams) const;

View File

@ -13,14 +13,29 @@
#include <vector> #include <vector>
#include "api/array_view.h"
namespace webrtc { namespace webrtc {
// Decides the sample rate of a mixing iteration given the preferred // Decides the sample rate of a mixing iteration given the preferred
// sample rates of the sources. // sample rates of the sources.
class OutputRateCalculator { class OutputRateCalculator {
public: public:
virtual int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_sample_rates) {
// TODO(olka): Temporary workaround to reslove client dependencies.
std::vector<int> sample_rates(preferred_sample_rates.cbegin(),
preferred_sample_rates.cend());
return CalculateOutputRate(sample_rates);
}
// TODO(olka) to be removed as soon as the clients are switched to
// CalculateOutputRateFromRange()
virtual int CalculateOutputRate( virtual int CalculateOutputRate(
const std::vector<int>& preferred_sample_rates) = 0; const std::vector<int>& preferred_sample_rates) {
return CalculateOutputRateFromRange(preferred_sample_rates);
}
virtual ~OutputRateCalculator() {} virtual ~OutputRateCalculator() {}
}; };