Delete AudioMonitor and related code.

Bug: webrtc:8760
Change-Id: I0b11ec66b0f2576f52866864ba046191034a4d2d
Reviewed-on: https://webrtc-review.googlesource.com/39003
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Fredrik Solenberg <solenberg@webrtc.org>
Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org>
Reviewed-by: Noah Richards <noahric@chromium.org>
Cr-Commit-Position: refs/heads/master@{#21801}
This commit is contained in:
Niels Möller
2018-01-30 09:33:03 +01:00
committed by Commit Bot
parent 04164cc5ac
commit f120cba82d
26 changed files with 17 additions and 311 deletions

View File

@ -16,25 +16,13 @@
namespace webrtc { namespace webrtc {
namespace voe { namespace voe {
// Number of bars on the indicator.
// Note that the number of elements is specified because we are indexing it
// in the range of 0-32
constexpr int8_t kPermutation[33] = {0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6,
6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9};
AudioLevel::AudioLevel() AudioLevel::AudioLevel()
: abs_max_(0), count_(0), current_level_(0), current_level_full_range_(0) { : abs_max_(0), count_(0), current_level_full_range_(0) {
WebRtcSpl_Init(); WebRtcSpl_Init();
} }
AudioLevel::~AudioLevel() {} AudioLevel::~AudioLevel() {}
int8_t AudioLevel::Level() const {
rtc::CritScope cs(&crit_sect_);
return current_level_;
}
int16_t AudioLevel::LevelFullRange() const { int16_t AudioLevel::LevelFullRange() const {
rtc::CritScope cs(&crit_sect_); rtc::CritScope cs(&crit_sect_);
return current_level_full_range_; return current_level_full_range_;
@ -44,7 +32,6 @@ void AudioLevel::Clear() {
rtc::CritScope cs(&crit_sect_); rtc::CritScope cs(&crit_sect_);
abs_max_ = 0; abs_max_ = 0;
count_ = 0; count_ = 0;
current_level_ = 0;
current_level_full_range_ = 0; current_level_full_range_ = 0;
} }
@ -78,18 +65,6 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame, double duration) {
count_ = 0; count_ = 0;
// Highest value for a int16_t is 0x7fff = 32767
// Divide with 1000 to get in the range of 0-32 which is the range of the
// permutation vector
int32_t position = abs_max_ / 1000;
// Make it less likely that the bar stays at position 0. I.e. only if it's
// in the range 0-250 (instead of 0-1000)
if ((position == 0) && (abs_max_ > 250)) {
position = 1;
}
current_level_ = kPermutation[position];
// Decay the absolute maximum (divide by 4) // Decay the absolute maximum (divide by 4)
abs_max_ >>= 2; abs_max_ >>= 2;
} }

View File

@ -25,8 +25,7 @@ class AudioLevel {
~AudioLevel(); ~AudioLevel();
// Called on "API thread(s)" from APIs like VoEBase::CreateChannel(), // Called on "API thread(s)" from APIs like VoEBase::CreateChannel(),
// VoEBase::StopSend(), VoEVolumeControl::GetSpeechOutputLevel(). // VoEBase::StopSend()
int8_t Level() const;
int16_t LevelFullRange() const; int16_t LevelFullRange() const;
void Clear(); void Clear();
// See the description for "totalAudioEnergy" in the WebRTC stats spec // See the description for "totalAudioEnergy" in the WebRTC stats spec
@ -46,7 +45,6 @@ class AudioLevel {
int16_t abs_max_ RTC_GUARDED_BY(crit_sect_); int16_t abs_max_ RTC_GUARDED_BY(crit_sect_);
int16_t count_ RTC_GUARDED_BY(crit_sect_); int16_t count_ RTC_GUARDED_BY(crit_sect_);
int8_t current_level_ RTC_GUARDED_BY(crit_sect_);
int16_t current_level_full_range_ RTC_GUARDED_BY(crit_sect_); int16_t current_level_full_range_ RTC_GUARDED_BY(crit_sect_);
double total_energy_ RTC_GUARDED_BY(crit_sect_) = 0.0; double total_energy_ RTC_GUARDED_BY(crit_sect_) = 0.0;

View File

@ -221,11 +221,6 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
return stats; return stats;
} }
int AudioReceiveStream::GetOutputLevel() const {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_proxy_->GetSpeechOutputLevel();
}
void AudioReceiveStream::SetSink(AudioSinkInterface* sink) { void AudioReceiveStream::SetSink(AudioSinkInterface* sink) {
RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_DCHECK_RUN_ON(&worker_thread_checker_);
channel_proxy_->SetSink(sink); channel_proxy_->SetSink(sink);

View File

@ -61,7 +61,6 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream,
void Start() override; void Start() override;
void Stop() override; void Stop() override;
webrtc::AudioReceiveStream::Stats GetStats() const override; webrtc::AudioReceiveStream::Stats GetStats() const override;
int GetOutputLevel() const override;
void SetSink(AudioSinkInterface* sink) override; void SetSink(AudioSinkInterface* sink) override;
void SetGain(float gain) override; void SetGain(float gain) override;
std::vector<webrtc::RtpSource> GetSources() const override; std::vector<webrtc::RtpSource> GetSources() const override;

View File

@ -149,9 +149,6 @@ AudioState::Stats AudioState::GetAudioInputStats() const {
result.audio_level = audio_level.LevelFullRange(); result.audio_level = audio_level.LevelFullRange();
RTC_DCHECK_LE(0, result.audio_level); RTC_DCHECK_LE(0, result.audio_level);
RTC_DCHECK_GE(32767, result.audio_level); RTC_DCHECK_GE(32767, result.audio_level);
result.quantized_audio_level = audio_level.Level();
RTC_DCHECK_LE(0, result.quantized_audio_level);
RTC_DCHECK_GE(9, result.quantized_audio_level);
result.total_energy = audio_level.TotalEnergy(); result.total_energy = audio_level.TotalEnergy();
result.total_duration = audio_level.TotalDuration(); result.total_duration = audio_level.TotalDuration();
return result; return result;

View File

@ -234,7 +234,6 @@ TEST(AudioStateTest, InputLevelStats) {
kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level); kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level);
auto stats = audio_state->GetAudioInputStats(); auto stats = audio_state->GetAudioInputStats();
EXPECT_EQ(0, stats.audio_level); EXPECT_EQ(0, stats.audio_level);
EXPECT_EQ(0, stats.quantized_audio_level);
EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.0)); EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.0));
EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.01)); EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.01));
} }
@ -250,7 +249,6 @@ TEST(AudioStateTest, InputLevelStats) {
} }
auto stats = audio_state->GetAudioInputStats(); auto stats = audio_state->GetAudioInputStats();
EXPECT_EQ(32767, stats.audio_level); EXPECT_EQ(32767, stats.audio_level);
EXPECT_EQ(9, stats.quantized_audio_level);
EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.01)); EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.01));
EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.11)); EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.11));
} }

View File

@ -1011,10 +1011,6 @@ int32_t Channel::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
return 0; return 0;
} }
int Channel::GetSpeechOutputLevel() const {
return _outputAudioLevel.Level();
}
int Channel::GetSpeechOutputLevelFullRange() const { int Channel::GetSpeechOutputLevelFullRange() const {
return _outputAudioLevel.LevelFullRange(); return _outputAudioLevel.LevelFullRange();
} }

View File

@ -34,6 +34,10 @@
#include "rtc_base/task_queue.h" #include "rtc_base/task_queue.h"
#include "rtc_base/thread_checker.h" #include "rtc_base/thread_checker.h"
// TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
// warnings about use of unsigned short, and non-const reference arguments.
// These need cleanup, in a separate cl.
namespace rtc { namespace rtc {
class TimestampWrapAroundHandler; class TimestampWrapAroundHandler;
} }
@ -57,7 +61,7 @@ class TelephoneEventHandler;
struct SenderInfo; struct SenderInfo;
struct CallStatistics { struct CallStatistics {
unsigned short fractionLost; unsigned short fractionLost; // NOLINT
unsigned int cumulativeLost; unsigned int cumulativeLost;
unsigned int extendedMax; unsigned int extendedMax;
unsigned int jitterSamples; unsigned int jitterSamples;
@ -174,7 +178,7 @@ class Channel
void StopSend(); void StopSend();
// Codecs // Codecs
int32_t GetRecCodec(CodecInst& codec); int32_t GetRecCodec(CodecInst& codec); // NOLINT
void SetBitRate(int bitrate_bps, int64_t probing_interval_ms); void SetBitRate(int bitrate_bps, int64_t probing_interval_ms);
bool EnableAudioNetworkAdaptor(const std::string& config_string); bool EnableAudioNetworkAdaptor(const std::string& config_string);
void DisableAudioNetworkAdaptor(); void DisableAudioNetworkAdaptor();
@ -190,7 +194,6 @@ class Channel
// Muting, Volume and Level. // Muting, Volume and Level.
void SetInputMute(bool enable); void SetInputMute(bool enable);
void SetChannelOutputVolumeScaling(float scaling); void SetChannelOutputVolumeScaling(float scaling);
int GetSpeechOutputLevel() const;
int GetSpeechOutputLevelFullRange() const; int GetSpeechOutputLevelFullRange() const;
// See description of "totalAudioEnergy" in the WebRTC stats spec: // See description of "totalAudioEnergy" in the WebRTC stats spec:
// https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
@ -198,14 +201,14 @@ class Channel
double GetTotalOutputDuration() const; double GetTotalOutputDuration() const;
// Stats. // Stats.
int GetNetworkStatistics(NetworkStatistics& stats); int GetNetworkStatistics(NetworkStatistics& stats); // NOLINT
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const; void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
ANAStats GetANAStatistics() const; ANAStats GetANAStatistics() const;
// Audio+Video Sync. // Audio+Video Sync.
uint32_t GetDelayEstimate() const; uint32_t GetDelayEstimate() const;
int SetMinimumPlayoutDelay(int delayMs); int SetMinimumPlayoutDelay(int delayMs);
int GetPlayoutTimestamp(unsigned int& timestamp); int GetPlayoutTimestamp(unsigned int& timestamp); // NOLINT
int GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const; int GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const;
// DTMF. // DTMF.
@ -226,7 +229,7 @@ class Channel
void SetRTCPStatus(bool enable); void SetRTCPStatus(bool enable);
int SetRTCP_CNAME(const char cName[256]); int SetRTCP_CNAME(const char cName[256]);
int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks); int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
int GetRTPStatistics(CallStatistics& stats); int GetRTPStatistics(CallStatistics& stats); // NOLINT
void SetNACKStatus(bool enable, int maxNumberOfPackets); void SetNACKStatus(bool enable, int maxNumberOfPackets);
// From AudioPacketizationCallback in the ACM // From AudioPacketizationCallback in the ACM
@ -265,7 +268,6 @@ class Channel
bool Playing() const { return channel_state_.Get().playing; } bool Playing() const { return channel_state_.Get().playing; }
bool Sending() const { return channel_state_.Get().sending; } bool Sending() const { return channel_state_.Get().sending; }
RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); } RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); }
int8_t OutputEnergyLevel() const { return _outputAudioLevel.Level(); }
// ProcessAndEncodeAudio() posts a task on the shared encoder task queue, // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
// which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
@ -309,7 +311,7 @@ class Channel
void Init(); void Init();
void Terminate(); void Terminate();
int GetRemoteSSRC(unsigned int& ssrc); int GetRemoteSSRC(unsigned int& ssrc); // NOLINT
void OnUplinkPacketLossRate(float packet_loss_rate); void OnUplinkPacketLossRate(float packet_loss_rate);
bool InputMute() const; bool InputMute() const;

View File

@ -137,11 +137,6 @@ ANAStats ChannelProxy::GetANAStatistics() const {
return channel_->GetANAStatistics(); return channel_->GetANAStatistics();
} }
int ChannelProxy::GetSpeechOutputLevel() const {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return channel_->GetSpeechOutputLevel();
}
int ChannelProxy::GetSpeechOutputLevelFullRange() const { int ChannelProxy::GetSpeechOutputLevelFullRange() const {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return channel_->GetSpeechOutputLevelFullRange(); return channel_->GetSpeechOutputLevelFullRange();

View File

@ -78,7 +78,6 @@ class ChannelProxy : public RtpPacketSinkInterface {
virtual NetworkStatistics GetNetworkStatistics() const; virtual NetworkStatistics GetNetworkStatistics() const;
virtual AudioDecodingCallStats GetDecodingCallStatistics() const; virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
virtual ANAStats GetANAStatistics() const; virtual ANAStats GetANAStatistics() const;
virtual int GetSpeechOutputLevel() const;
virtual int GetSpeechOutputLevelFullRange() const; virtual int GetSpeechOutputLevelFullRange() const;
// See description of "totalAudioEnergy" in the WebRTC stats spec: // See description of "totalAudioEnergy" in the WebRTC stats spec:
// https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy

View File

@ -54,7 +54,6 @@ class MockVoEChannelProxy : public voe::ChannelProxy {
MOCK_CONST_METHOD0(GetNetworkStatistics, NetworkStatistics()); MOCK_CONST_METHOD0(GetNetworkStatistics, NetworkStatistics());
MOCK_CONST_METHOD0(GetDecodingCallStatistics, AudioDecodingCallStats()); MOCK_CONST_METHOD0(GetDecodingCallStatistics, AudioDecodingCallStats());
MOCK_CONST_METHOD0(GetANAStatistics, ANAStats()); MOCK_CONST_METHOD0(GetANAStatistics, ANAStats());
MOCK_CONST_METHOD0(GetSpeechOutputLevel, int());
MOCK_CONST_METHOD0(GetSpeechOutputLevelFullRange, int()); MOCK_CONST_METHOD0(GetSpeechOutputLevelFullRange, int());
MOCK_CONST_METHOD0(GetTotalOutputEnergy, double()); MOCK_CONST_METHOD0(GetTotalOutputEnergy, double());
MOCK_CONST_METHOD0(GetTotalOutputDuration, double()); MOCK_CONST_METHOD0(GetTotalOutputDuration, double());

View File

@ -124,8 +124,6 @@ class AudioReceiveStream {
virtual void Stop() = 0; virtual void Stop() = 0;
virtual Stats GetStats() const = 0; virtual Stats GetStats() const = 0;
// TODO(solenberg): Remove, once AudioMonitor is gone.
virtual int GetOutputLevel() const = 0;
// Sets an audio sink that receives unmixed audio from the receive stream. // Sets an audio sink that receives unmixed audio from the receive stream.
// Ownership of the sink is managed by the caller. // Ownership of the sink is managed by the caller.

View File

@ -39,8 +39,6 @@ class AudioState : public rtc::RefCountInterface {
struct Stats { struct Stats {
// Audio peak level (max(abs())), linearly on the interval [0,32767]. // Audio peak level (max(abs())), linearly on the interval [0,32767].
int32_t audio_level = -1; int32_t audio_level = -1;
// Audio peak level (max(abs())), logarithmically on the interval [0,9].
int8_t quantized_audio_level = -1;
// See: https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy // See: https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
double total_energy = 0.0f; double total_energy = 0.0f;
double total_duration = 0.0f; double total_duration = 0.0f;

View File

@ -382,9 +382,6 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
return true; return true;
} }
virtual bool GetActiveStreams(StreamList* streams) { return true; }
virtual int GetOutputLevel() { return 0; }
virtual bool CanInsertDtmf() { virtual bool CanInsertDtmf() {
for (std::vector<AudioCodec>::const_iterator it = send_codecs_.begin(); for (std::vector<AudioCodec>::const_iterator it = send_codecs_.begin();
it != send_codecs_.end(); ++it) { it != send_codecs_.end(); ++it) {

View File

@ -670,11 +670,6 @@ class VoiceMediaChannel : public MediaChannel {
bool enable, bool enable,
const AudioOptions* options, const AudioOptions* options,
AudioSource* source) = 0; AudioSource* source) = 0;
// Gets current energy levels for all incoming streams.
typedef std::vector<std::pair<uint32_t, int>> StreamList;
virtual bool GetActiveStreams(StreamList* actives) = 0;
// Get the current energy level of the stream sent to the speaker.
virtual int GetOutputLevel() = 0;
// Set speaker output volume of the specified ssrc. // Set speaker output volume of the specified ssrc.
virtual bool SetOutputVolume(uint32_t ssrc, double volume) = 0; virtual bool SetOutputVolume(uint32_t ssrc, double volume) = 0;
// Returns if the telephone-event has been negotiated. // Returns if the telephone-event has been negotiated.

View File

@ -68,9 +68,6 @@ class MediaEngineInterface {
const MediaConfig& config, const MediaConfig& config,
const VideoOptions& options) = 0; const VideoOptions& options) = 0;
// Gets the current microphone level, as a value between 0 and 10.
virtual int GetInputLevel() = 0;
virtual const std::vector<AudioCodec>& audio_send_codecs() = 0; virtual const std::vector<AudioCodec>& audio_send_codecs() = 0;
virtual const std::vector<AudioCodec>& audio_recv_codecs() = 0; virtual const std::vector<AudioCodec>& audio_recv_codecs() = 0;
virtual RtpCapabilities GetAudioCapabilities() = 0; virtual RtpCapabilities GetAudioCapabilities() = 0;
@ -119,7 +116,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
return video().CreateChannel(call, config, options); return video().CreateChannel(call, config, options);
} }
virtual int GetInputLevel() { return voice().GetInputLevel(); }
virtual const std::vector<AudioCodec>& audio_send_codecs() { virtual const std::vector<AudioCodec>& audio_send_codecs() {
return voice().send_codecs(); return voice().send_codecs();
} }

View File

@ -100,7 +100,6 @@ class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream {
void Stop() override { started_ = false; } void Stop() override { started_ = false; }
webrtc::AudioReceiveStream::Stats GetStats() const override; webrtc::AudioReceiveStream::Stats GetStats() const override;
int GetOutputLevel() const override { return 0; }
void SetSink(webrtc::AudioSinkInterface* sink) override; void SetSink(webrtc::AudioSinkInterface* sink) override;
void SetGain(float gain) override; void SetGain(float gain) override;
std::vector<webrtc::RtpSource> GetSources() const override { std::vector<webrtc::RtpSource> GetSources() const override {

View File

@ -593,12 +593,6 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
return true; return true;
} }
// TODO(solenberg): Remove, once AudioMonitor is gone.
int WebRtcVoiceEngine::GetInputLevel() {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return audio_state()->GetAudioInputStats().quantized_audio_level;
}
const std::vector<AudioCodec>& WebRtcVoiceEngine::send_codecs() const { const std::vector<AudioCodec>& WebRtcVoiceEngine::send_codecs() const {
RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
return send_codecs_; return send_codecs_;
@ -1193,12 +1187,6 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
return stream_->GetStats(); return stream_->GetStats();
} }
int GetOutputLevel() const {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(stream_);
return stream_->GetOutputLevel();
}
void SetRawAudioSink(std::unique_ptr<webrtc::AudioSinkInterface> sink) { void SetRawAudioSink(std::unique_ptr<webrtc::AudioSinkInterface> sink) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
raw_audio_sink_ = std::move(sink); raw_audio_sink_ = std::move(sink);
@ -1923,30 +1911,6 @@ bool WebRtcVoiceMediaChannel::SetLocalSource(uint32_t ssrc,
return true; return true;
} }
// TODO(solenberg): Remove, once AudioMonitor is gone.
bool WebRtcVoiceMediaChannel::GetActiveStreams(
AudioInfo::StreamList* actives) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
actives->clear();
for (const auto& ch : recv_streams_) {
int level = ch.second->GetOutputLevel();
if (level > 0) {
actives->push_back(std::make_pair(ch.first, level));
}
}
return true;
}
// TODO(solenberg): Remove, once AudioMonitor is gone.
int WebRtcVoiceMediaChannel::GetOutputLevel() {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
int highest = 0;
for (const auto& ch : recv_streams_) {
highest = std::max(ch.second->GetOutputLevel(), highest);
}
return highest;
}
bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) { bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
std::vector<uint32_t> ssrcs(1, ssrc); std::vector<uint32_t> ssrcs(1, ssrc);

View File

@ -59,8 +59,6 @@ class WebRtcVoiceEngine final {
const MediaConfig& config, const MediaConfig& config,
const AudioOptions& options); const AudioOptions& options);
int GetInputLevel();
const std::vector<AudioCodec>& send_codecs() const; const std::vector<AudioCodec>& send_codecs() const;
const std::vector<AudioCodec>& recv_codecs() const; const std::vector<AudioCodec>& recv_codecs() const;
RtpCapabilities GetCapabilities() const; RtpCapabilities GetCapabilities() const;
@ -174,8 +172,6 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel,
bool RemoveSendStream(uint32_t ssrc) override; bool RemoveSendStream(uint32_t ssrc) override;
bool AddRecvStream(const StreamParams& sp) override; bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override; bool RemoveRecvStream(uint32_t ssrc) override;
bool GetActiveStreams(StreamList* actives) override;
int GetOutputLevel() override;
// SSRC=0 will apply the new volume to current and future unsignaled streams. // SSRC=0 will apply the new volume to current and future unsignaled streams.
bool SetOutputVolume(uint32_t ssrc, double volume) override; bool SetOutputVolume(uint32_t ssrc, double volume) override;

View File

@ -29,7 +29,6 @@ rtc_static_library("rtc_pc_base") {
visibility = [ "*" ] visibility = [ "*" ]
defines = [] defines = []
sources = [ sources = [
"audiomonitor.cc",
"audiomonitor.h", "audiomonitor.h",
"bundlefilter.cc", "bundlefilter.cc",
"bundlefilter.h", "bundlefilter.h",
@ -65,7 +64,6 @@ rtc_static_library("rtc_pc_base") {
"srtptransport.h", "srtptransport.h",
"transportcontroller.cc", "transportcontroller.cc",
"transportcontroller.h", "transportcontroller.h",
"voicechannel.h",
] ]
deps = [ deps = [

View File

@ -1,105 +0,0 @@
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "pc/audiomonitor.h"
#include "pc/voicechannel.h"
namespace cricket {
const uint32_t MSG_MONITOR_POLL = 1;
const uint32_t MSG_MONITOR_START = 2;
const uint32_t MSG_MONITOR_STOP = 3;
const uint32_t MSG_MONITOR_SIGNAL = 4;
AudioMonitor::AudioMonitor(VoiceChannel *voice_channel,
rtc::Thread *monitor_thread) {
voice_channel_ = voice_channel;
monitoring_thread_ = monitor_thread;
monitoring_ = false;
}
AudioMonitor::~AudioMonitor() {
voice_channel_->worker_thread()->Clear(this);
monitoring_thread_->Clear(this);
}
void AudioMonitor::Start(int milliseconds) {
rate_ = milliseconds;
if (rate_ < 100)
rate_ = 100;
voice_channel_->worker_thread()->Post(RTC_FROM_HERE, this, MSG_MONITOR_START);
}
void AudioMonitor::Stop() {
voice_channel_->worker_thread()->Post(RTC_FROM_HERE, this, MSG_MONITOR_STOP);
}
void AudioMonitor::OnMessage(rtc::Message *message) {
rtc::CritScope cs(&crit_);
switch (message->message_id) {
case MSG_MONITOR_START:
assert(rtc::Thread::Current() == voice_channel_->worker_thread());
if (!monitoring_) {
monitoring_ = true;
PollVoiceChannel();
}
break;
case MSG_MONITOR_STOP:
assert(rtc::Thread::Current() == voice_channel_->worker_thread());
if (monitoring_) {
monitoring_ = false;
voice_channel_->worker_thread()->Clear(this);
}
break;
case MSG_MONITOR_POLL:
assert(rtc::Thread::Current() == voice_channel_->worker_thread());
PollVoiceChannel();
break;
case MSG_MONITOR_SIGNAL:
{
assert(rtc::Thread::Current() == monitoring_thread_);
AudioInfo info = audio_info_;
crit_.Leave();
SignalUpdate(this, info);
crit_.Enter();
}
break;
}
}
void AudioMonitor::PollVoiceChannel() {
rtc::CritScope cs(&crit_);
assert(rtc::Thread::Current() == voice_channel_->worker_thread());
// Gather connection infos
audio_info_.input_level = voice_channel_->GetInputLevel_w();
audio_info_.output_level = voice_channel_->GetOutputLevel_w();
voice_channel_->GetActiveStreams_w(&audio_info_.active_streams);
// Signal the monitoring thread, start another poll timer
monitoring_thread_->Post(RTC_FROM_HERE, this, MSG_MONITOR_SIGNAL);
voice_channel_->worker_thread()->PostDelayed(RTC_FROM_HERE, rate_, this,
MSG_MONITOR_POLL);
}
VoiceChannel *AudioMonitor::voice_channel() {
return voice_channel_;
}
rtc::Thread *AudioMonitor::monitor_thread() {
return monitoring_thread_;
}
} // namespace cricket

View File

@ -14,14 +14,11 @@
#include <vector> #include <vector>
#include <utility> #include <utility>
// For ConnectionInfo/ConnectionInfos
#include "p2p/base/port.h" #include "p2p/base/port.h"
#include "rtc_base/sigslot.h"
#include "rtc_base/thread.h"
namespace cricket { namespace cricket {
class VoiceChannel;
struct AudioInfo { struct AudioInfo {
int input_level; int input_level;
int output_level; int output_level;
@ -29,32 +26,6 @@ struct AudioInfo {
StreamList active_streams; // ssrcs contributing to output_level StreamList active_streams; // ssrcs contributing to output_level
}; };
class AudioMonitor : public rtc::MessageHandler,
public sigslot::has_slots<> {
public:
AudioMonitor(VoiceChannel* voice_channel, rtc::Thread *monitor_thread);
~AudioMonitor();
void Start(int cms);
void Stop();
VoiceChannel* voice_channel();
rtc::Thread *monitor_thread();
sigslot::signal2<AudioMonitor*, const AudioInfo&> SignalUpdate;
protected:
void OnMessage(rtc::Message *message);
void PollVoiceChannel();
AudioInfo audio_info_;
VoiceChannel* voice_channel_;
rtc::Thread* monitoring_thread_;
rtc::CriticalSection crit_;
uint32_t rate_;
bool monitoring_;
};
} // namespace cricket } // namespace cricket
#endif // PC_AUDIOMONITOR_H_ #endif // PC_AUDIOMONITOR_H_

View File

@ -1134,7 +1134,8 @@ void BaseChannel::ActivateRtcpMux() {
VoiceChannel::VoiceChannel(rtc::Thread* worker_thread, VoiceChannel::VoiceChannel(rtc::Thread* worker_thread,
rtc::Thread* network_thread, rtc::Thread* network_thread,
rtc::Thread* signaling_thread, rtc::Thread* signaling_thread,
MediaEngineInterface* media_engine, // TODO(nisse): Delete unused argument.
MediaEngineInterface* /* media_engine */,
std::unique_ptr<VoiceMediaChannel> media_channel, std::unique_ptr<VoiceMediaChannel> media_channel,
const std::string& content_name, const std::string& content_name,
bool rtcp_mux_required, bool rtcp_mux_required,
@ -1145,12 +1146,10 @@ VoiceChannel::VoiceChannel(rtc::Thread* worker_thread,
std::move(media_channel), std::move(media_channel),
content_name, content_name,
rtcp_mux_required, rtcp_mux_required,
srtp_required), srtp_required) {}
media_engine_(media_engine) {}
VoiceChannel::~VoiceChannel() { VoiceChannel::~VoiceChannel() {
TRACE_EVENT0("webrtc", "VoiceChannel::~VoiceChannel"); TRACE_EVENT0("webrtc", "VoiceChannel::~VoiceChannel");
StopAudioMonitor();
// this can't be done in the base class, since it calls a virtual // this can't be done in the base class, since it calls a virtual
DisableMedia_w(); DisableMedia_w();
Deinit(); Deinit();
@ -1186,34 +1185,6 @@ bool VoiceChannel::GetStats(VoiceMediaInfo* stats) {
media_channel(), stats)); media_channel(), stats));
} }
void VoiceChannel::StartAudioMonitor(int cms) {
audio_monitor_.reset(new AudioMonitor(this, rtc::Thread::Current()));
audio_monitor_->Start(cms);
}
void VoiceChannel::StopAudioMonitor() {
if (audio_monitor_) {
audio_monitor_->Stop();
audio_monitor_.reset();
}
}
bool VoiceChannel::IsAudioMonitorRunning() const {
return (audio_monitor_.get() != NULL);
}
int VoiceChannel::GetInputLevel_w() {
return media_engine_->GetInputLevel();
}
int VoiceChannel::GetOutputLevel_w() {
return media_channel()->GetOutputLevel();
}
void VoiceChannel::GetActiveStreams_w(AudioInfo::StreamList* actives) {
media_channel()->GetActiveStreams(actives);
}
void VoiceChannel::OnPacketReceived(bool rtcp, void VoiceChannel::OnPacketReceived(bool rtcp,
rtc::CopyOnWriteBuffer* packet, rtc::CopyOnWriteBuffer* packet,
const rtc::PacketTime& packet_time) { const rtc::PacketTime& packet_time) {

View File

@ -471,14 +471,6 @@ class VoiceChannel : public BaseChannel {
// Get statistics about the current media session. // Get statistics about the current media session.
bool GetStats(VoiceMediaInfo* stats); bool GetStats(VoiceMediaInfo* stats);
// Monitoring functions
void StartAudioMonitor(int cms);
void StopAudioMonitor();
bool IsAudioMonitorRunning() const;
int GetInputLevel_w();
int GetOutputLevel_w();
void GetActiveStreams_w(AudioInfo::StreamList* actives);
webrtc::RtpParameters GetRtpSendParameters_w(uint32_t ssrc) const; webrtc::RtpParameters GetRtpSendParameters_w(uint32_t ssrc) const;
webrtc::RTCError SetRtpSendParameters_w(uint32_t ssrc, webrtc::RTCError SetRtpSendParameters_w(uint32_t ssrc,
webrtc::RtpParameters parameters); webrtc::RtpParameters parameters);
@ -501,9 +493,7 @@ class VoiceChannel : public BaseChannel {
void OnMessage(rtc::Message* pmsg) override; void OnMessage(rtc::Message* pmsg) override;
static const int kEarlyMediaTimeout = 1000; static const int kEarlyMediaTimeout = 1000;
MediaEngineInterface* media_engine_;
bool received_media_ = false; bool received_media_ = false;
std::unique_ptr<AudioMonitor> audio_monitor_;
// Last AudioSendParameters sent down to the media_channel() via // Last AudioSendParameters sent down to the media_channel() via
// SetSendParameters. // SetSendParameters.

View File

@ -15,6 +15,7 @@
#include "media/base/streamparams.h" #include "media/base/streamparams.h"
#include "pc/audiomonitor.h" #include "pc/audiomonitor.h"
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
#include "rtc_base/timeutils.h"
namespace cricket { namespace cricket {

View File

@ -1,16 +0,0 @@
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef PC_VOICECHANNEL_H_
#define PC_VOICECHANNEL_H_
#include "pc/channel.h"
#endif // PC_VOICECHANNEL_H_