Delete unused class CurrentSpeakerMonitor.

Bug: webrtc:8760
Change-Id: Ib2f84c7d74f1f3187f02dcf697e9c16a4d5f10e3
Reviewed-on: https://webrtc-review.googlesource.com/34652
Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org>
Reviewed-by: Noah Richards <noahric@chromium.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23336}
This commit is contained in:
Niels Möller
2018-05-18 08:44:18 +02:00
committed by Commit Bot
parent c2ee8e8a46
commit 0e36a7260f
6 changed files with 0 additions and 553 deletions

View File

@ -29,13 +29,10 @@ rtc_static_library("rtc_pc_base") {
visibility = [ "*" ]
defines = []
sources = [
"audiomonitor.h",
"channel.cc",
"channel.h",
"channelmanager.cc",
"channelmanager.h",
"currentspeakermonitor.cc",
"currentspeakermonitor.h",
"dtlssrtptransport.cc",
"dtlssrtptransport.h",
"externalhmac.cc",
@ -283,7 +280,6 @@ if (rtc_include_tests) {
sources = [
"channel_unittest.cc",
"channelmanager_unittest.cc",
"currentspeakermonitor_unittest.cc",
"dtlssrtptransport_unittest.cc",
"jseptransport_unittest.cc",
"jseptransportcontroller_unittest.cc",

View File

@ -1,31 +0,0 @@
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef PC_AUDIOMONITOR_H_
#define PC_AUDIOMONITOR_H_
#include <vector>
#include <utility>
// For ConnectionInfo/ConnectionInfos
#include "p2p/base/port.h"
namespace cricket {
struct AudioInfo {
int input_level;
int output_level;
typedef std::vector<std::pair<uint32_t, int> > StreamList;
StreamList active_streams; // ssrcs contributing to output_level
};
} // namespace cricket
#endif // PC_AUDIOMONITOR_H_

View File

@ -29,7 +29,6 @@
#include "media/base/streamparams.h"
#include "p2p/base/dtlstransportinternal.h"
#include "p2p/base/packettransportinternal.h"
#include "pc/audiomonitor.h"
#include "pc/dtlssrtptransport.h"
#include "pc/mediasession.h"
#include "pc/rtptransport.h"

View File

@ -1,204 +0,0 @@
/*
* Copyright 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "pc/currentspeakermonitor.h"
#include <vector>
#include "media/base/streamparams.h"
#include "pc/audiomonitor.h"
#include "rtc_base/logging.h"
#include "rtc_base/timeutils.h"
namespace cricket {
namespace {
const int kMaxAudioLevel = 9;
// To avoid overswitching, we disable switching for a period of time after a
// switch is done.
const int kDefaultMinTimeBetweenSwitches = 1000;
}
CurrentSpeakerMonitor::CurrentSpeakerMonitor(
AudioSourceContext* audio_source_context)
: started_(false),
audio_source_context_(audio_source_context),
current_speaker_ssrc_(0),
earliest_permitted_switch_time_(0),
min_time_between_switches_(kDefaultMinTimeBetweenSwitches) {}
CurrentSpeakerMonitor::~CurrentSpeakerMonitor() {
Stop();
}
void CurrentSpeakerMonitor::Start() {
if (!started_) {
audio_source_context_->SignalAudioMonitor.connect(
this, &CurrentSpeakerMonitor::OnAudioMonitor);
audio_source_context_->SignalMediaStreamsUpdate.connect(
this, &CurrentSpeakerMonitor::OnMediaStreamsUpdate);
audio_source_context_->SignalMediaStreamsReset.connect(
this, &CurrentSpeakerMonitor::OnMediaStreamsReset);
started_ = true;
}
}
void CurrentSpeakerMonitor::Stop() {
if (started_) {
audio_source_context_->SignalAudioMonitor.disconnect(this);
audio_source_context_->SignalMediaStreamsUpdate.disconnect(this);
started_ = false;
ssrc_to_speaking_state_map_.clear();
current_speaker_ssrc_ = 0;
earliest_permitted_switch_time_ = 0;
}
}
void CurrentSpeakerMonitor::set_min_time_between_switches(
int min_time_between_switches) {
min_time_between_switches_ = min_time_between_switches;
}
void CurrentSpeakerMonitor::OnAudioMonitor(
AudioSourceContext* audio_source_context, const AudioInfo& info) {
std::map<uint32_t, int> active_ssrc_to_level_map;
cricket::AudioInfo::StreamList::const_iterator stream_list_it;
for (stream_list_it = info.active_streams.begin();
stream_list_it != info.active_streams.end(); ++stream_list_it) {
uint32_t ssrc = stream_list_it->first;
active_ssrc_to_level_map[ssrc] = stream_list_it->second;
// It's possible we haven't yet added this source to our map. If so,
// add it now with a "not speaking" state.
if (ssrc_to_speaking_state_map_.find(ssrc) ==
ssrc_to_speaking_state_map_.end()) {
ssrc_to_speaking_state_map_[ssrc] = SS_NOT_SPEAKING;
}
}
int max_level = 0;
uint32_t loudest_speaker_ssrc = 0;
// Update the speaking states of all participants based on the new audio
// level information. Also retain loudest speaker.
std::map<uint32_t, SpeakingState>::iterator state_it;
for (state_it = ssrc_to_speaking_state_map_.begin();
state_it != ssrc_to_speaking_state_map_.end(); ++state_it) {
bool is_previous_speaker = current_speaker_ssrc_ == state_it->first;
// This uses a state machine in order to gradually identify
// members as having started or stopped speaking. Matches the
// algorithm used by the hangouts js code.
std::map<uint32_t, int>::const_iterator level_it =
active_ssrc_to_level_map.find(state_it->first);
// Note that the stream map only contains streams with non-zero audio
// levels.
int level = (level_it != active_ssrc_to_level_map.end()) ?
level_it->second : 0;
switch (state_it->second) {
case SS_NOT_SPEAKING:
if (level > 0) {
// Reset level because we don't think they're really speaking.
level = 0;
state_it->second = SS_MIGHT_BE_SPEAKING;
} else {
// State unchanged.
}
break;
case SS_MIGHT_BE_SPEAKING:
if (level > 0) {
state_it->second = SS_SPEAKING;
} else {
state_it->second = SS_NOT_SPEAKING;
}
break;
case SS_SPEAKING:
if (level > 0) {
// State unchanged.
} else {
state_it->second = SS_WAS_SPEAKING_RECENTLY1;
if (is_previous_speaker) {
// Assume this is an inter-word silence and assign him the highest
// volume.
level = kMaxAudioLevel;
}
}
break;
case SS_WAS_SPEAKING_RECENTLY1:
if (level > 0) {
state_it->second = SS_SPEAKING;
} else {
state_it->second = SS_WAS_SPEAKING_RECENTLY2;
if (is_previous_speaker) {
// Assume this is an inter-word silence and assign him the highest
// volume.
level = kMaxAudioLevel;
}
}
break;
case SS_WAS_SPEAKING_RECENTLY2:
if (level > 0) {
state_it->second = SS_SPEAKING;
} else {
state_it->second = SS_NOT_SPEAKING;
}
break;
}
if (level > max_level) {
loudest_speaker_ssrc = state_it->first;
max_level = level;
} else if (level > 0 && level == max_level && is_previous_speaker) {
// Favor continuity of loudest speakers if audio levels are equal.
loudest_speaker_ssrc = state_it->first;
}
}
// We avoid over-switching by disabling switching for a period of time after
// a switch is done.
int64_t now = rtc::TimeMillis();
if (earliest_permitted_switch_time_ <= now &&
current_speaker_ssrc_ != loudest_speaker_ssrc) {
current_speaker_ssrc_ = loudest_speaker_ssrc;
RTC_LOG(LS_INFO) << "Current speaker changed to " << current_speaker_ssrc_;
earliest_permitted_switch_time_ = now + min_time_between_switches_;
SignalUpdate(this, current_speaker_ssrc_);
}
}
void CurrentSpeakerMonitor::OnMediaStreamsUpdate(
AudioSourceContext* audio_source_context,
const MediaStreams& added,
const MediaStreams& removed) {
if (audio_source_context == audio_source_context_) {
// Update the speaking state map based on added and removed streams.
for (std::vector<cricket::StreamParams>::const_iterator
it = removed.audio().begin(); it != removed.audio().end(); ++it) {
ssrc_to_speaking_state_map_.erase(it->first_ssrc());
}
for (std::vector<cricket::StreamParams>::const_iterator
it = added.audio().begin(); it != added.audio().end(); ++it) {
ssrc_to_speaking_state_map_[it->first_ssrc()] = SS_NOT_SPEAKING;
}
}
}
void CurrentSpeakerMonitor::OnMediaStreamsReset(
AudioSourceContext* audio_source_context) {
if (audio_source_context == audio_source_context_) {
ssrc_to_speaking_state_map_.clear();
}
}
} // namespace cricket

View File

@ -1,96 +0,0 @@
/*
* Copyright 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// CurrentSpeakerMonitor monitors the audio levels for a session and determines
// which participant is currently speaking.
#ifndef PC_CURRENTSPEAKERMONITOR_H_
#define PC_CURRENTSPEAKERMONITOR_H_
#include <stdint.h>
#include <map>
#include "rtc_base/sigslot.h"
namespace cricket {
struct AudioInfo;
struct MediaStreams;
class AudioSourceContext {
public:
sigslot::signal2<AudioSourceContext*, const cricket::AudioInfo&>
SignalAudioMonitor;
sigslot::signal1<AudioSourceContext*> SignalMediaStreamsReset;
sigslot::signal3<AudioSourceContext*,
const cricket::MediaStreams&,
const cricket::MediaStreams&> SignalMediaStreamsUpdate;
};
// CurrentSpeakerMonitor can be used to monitor the audio-levels from
// many audio-sources and report on changes in the loudest audio-source.
// Its a generic type and relies on an AudioSourceContext which is aware of
// the audio-sources. AudioSourceContext needs to provide two signals namely
// SignalAudioInfoMonitor - provides audio info of the all current speakers.
// SignalMediaSourcesUpdated - provides updates when a speaker leaves or joins.
// Note that the AudioSourceContext's audio monitor must be started
// before this is started.
// It's recommended that the audio monitor be started with a 100 ms period.
class CurrentSpeakerMonitor : public sigslot::has_slots<> {
public:
explicit CurrentSpeakerMonitor(AudioSourceContext* audio_source_context);
~CurrentSpeakerMonitor();
void Start();
void Stop();
// Used by tests. Note that the actual minimum time between switches
// enforced by the monitor will be the given value plus or minus the
// resolution of the system clock.
void set_min_time_between_switches(int min_time_between_switches);
// This is fired when the current speaker changes, and provides his audio
// SSRC. This only fires after the audio monitor on the underlying
// AudioSourceContext has been started.
sigslot::signal2<CurrentSpeakerMonitor*, uint32_t> SignalUpdate;
private:
void OnAudioMonitor(AudioSourceContext* audio_source_context,
const AudioInfo& info);
void OnMediaStreamsUpdate(AudioSourceContext* audio_source_context,
const MediaStreams& added,
const MediaStreams& removed);
void OnMediaStreamsReset(AudioSourceContext* audio_source_context);
// These are states that a participant will pass through so that we gradually
// recognize that they have started and stopped speaking. This avoids
// "twitchiness".
enum SpeakingState {
SS_NOT_SPEAKING,
SS_MIGHT_BE_SPEAKING,
SS_SPEAKING,
SS_WAS_SPEAKING_RECENTLY1,
SS_WAS_SPEAKING_RECENTLY2
};
bool started_;
AudioSourceContext* audio_source_context_;
std::map<uint32_t, SpeakingState> ssrc_to_speaking_state_map_;
uint32_t current_speaker_ssrc_;
// To prevent overswitching, switching is disabled for some time after a
// switch is made. This gives us the earliest time a switch is permitted.
int64_t earliest_permitted_switch_time_;
int min_time_between_switches_;
};
} // namespace cricket
#endif // PC_CURRENTSPEAKERMONITOR_H_

View File

@ -1,217 +0,0 @@
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <utility>
#include "pc/currentspeakermonitor.h"
#include "pc/audiomonitor.h"
#include "rtc_base/gunit.h"
#include "rtc_base/thread.h"
namespace cricket {
static const uint32_t kSsrc1 = 1001;
static const uint32_t kSsrc2 = 1002;
static const uint32_t kMinTimeBetweenSwitches = 10;
// Due to limited system clock resolution, the CurrentSpeakerMonitor may
// actually require more or less time between switches than that specified
// in the call to set_min_time_between_switches. To be safe, we sleep for
// 90 ms more than the min time between switches before checking for a switch.
// I am assuming system clocks do not have a coarser resolution than 90 ms.
static const uint32_t kSleepTimeBetweenSwitches = 100;
class CurrentSpeakerMonitorTest : public testing::Test,
public sigslot::has_slots<> {
public:
CurrentSpeakerMonitorTest() {
monitor_ = new CurrentSpeakerMonitor(&source_);
// Shrink the minimum time betweeen switches to 10 ms so we don't have to
// slow down our tests.
monitor_->set_min_time_between_switches(kMinTimeBetweenSwitches);
monitor_->SignalUpdate.connect(this, &CurrentSpeakerMonitorTest::OnUpdate);
current_speaker_ = 0;
num_changes_ = 0;
monitor_->Start();
}
~CurrentSpeakerMonitorTest() {
delete monitor_;
}
void SignalAudioMonitor(const AudioInfo& info) {
source_.SignalAudioMonitor(&source_, info);
}
protected:
AudioSourceContext source_;
CurrentSpeakerMonitor* monitor_;
int num_changes_;
uint32_t current_speaker_;
void OnUpdate(CurrentSpeakerMonitor* monitor, uint32_t current_speaker) {
current_speaker_ = current_speaker;
num_changes_++;
}
};
static void InitAudioInfo(AudioInfo* info, int input_level, int output_level) {
info->input_level = input_level;
info->output_level = output_level;
}
TEST_F(CurrentSpeakerMonitorTest, NoActiveStreams) {
AudioInfo info;
InitAudioInfo(&info, 0, 0);
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, 0U);
EXPECT_EQ(num_changes_, 0);
}
TEST_F(CurrentSpeakerMonitorTest, MultipleActiveStreams) {
AudioInfo info;
InitAudioInfo(&info, 0, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
// No speaker recognized because the initial sample is treated as possibly
// just noise and disregarded.
EXPECT_EQ(current_speaker_, 0U);
EXPECT_EQ(num_changes_, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
}
// See: https://code.google.com/p/webrtc/issues/detail?id=2409
TEST_F(CurrentSpeakerMonitorTest, DISABLED_RapidSpeakerChange) {
AudioInfo info;
InitAudioInfo(&info, 0, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, 0U);
EXPECT_EQ(num_changes_, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
info.active_streams.push_back(std::make_pair(kSsrc1, 9));
info.active_streams.push_back(std::make_pair(kSsrc2, 1));
SignalAudioMonitor(info);
// We expect no speaker change because of the rapid change.
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
}
// Flaky on iOS: webrtc:7057.
#if defined(WEBRTC_IOS)
#define MAYBE_SpeakerChange DISABLED_SpeakerChange
#else
#define MAYBE_SpeakerChange SpeakerChange
#endif
TEST_F(CurrentSpeakerMonitorTest, MAYBE_SpeakerChange) {
AudioInfo info;
InitAudioInfo(&info, 0, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, 0U);
EXPECT_EQ(num_changes_, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
// Wait so the changes don't come so rapidly.
rtc::Thread::SleepMs(kSleepTimeBetweenSwitches);
info.active_streams.push_back(std::make_pair(kSsrc1, 9));
info.active_streams.push_back(std::make_pair(kSsrc2, 1));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc1);
EXPECT_EQ(num_changes_, 2);
}
TEST_F(CurrentSpeakerMonitorTest, InterwordSilence) {
AudioInfo info;
InitAudioInfo(&info, 0, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, 0U);
EXPECT_EQ(num_changes_, 0);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 7));
SignalAudioMonitor(info);
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
// Wait so the changes don't come so rapidly.
rtc::Thread::SleepMs(kSleepTimeBetweenSwitches);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 0));
SignalAudioMonitor(info);
// Current speaker shouldn't have changed because we treat this as an inter-
// word silence.
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 0));
SignalAudioMonitor(info);
// Current speaker shouldn't have changed because we treat this as an inter-
// word silence.
EXPECT_EQ(current_speaker_, kSsrc2);
EXPECT_EQ(num_changes_, 1);
info.active_streams.push_back(std::make_pair(kSsrc1, 3));
info.active_streams.push_back(std::make_pair(kSsrc2, 0));
SignalAudioMonitor(info);
// At this point, we should have concluded that SSRC2 stopped speaking.
EXPECT_EQ(current_speaker_, kSsrc1);
EXPECT_EQ(num_changes_, 2);
}
} // namespace cricket