Don't allow changing ICE pool size after SetLocalDescription.

This was the decision at IETF 97
(see: https://github.com/rtcweb-wg/jsep/issues/381). It's simpler to not
allow this (since there's no real need for it) rather than try to decide
complex rules for it.

BUG=webrtc:6864

Review-Url: https://codereview.webrtc.org/2566833002
Cr-Commit-Position: refs/heads/master@{#15559}
This commit is contained in:
deadbeef
2016-12-12 18:49:32 -08:00
committed by Commit bot
parent 25ed435afe
commit 6de92f9255
5 changed files with 87 additions and 39 deletions

View File

@ -1288,6 +1288,14 @@ PeerConnectionInterface::RTCConfiguration PeerConnection::GetConfiguration() {
bool PeerConnection::SetConfiguration(const RTCConfiguration& configuration) {
TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration");
if (session_->local_description() &&
configuration.ice_candidate_pool_size !=
configuration_.ice_candidate_pool_size) {
LOG(LS_ERROR) << "Can't change candidate pool size after calling "
"SetLocalDescription.";
return false;
}
// TODO(deadbeef): Return false and log an error if there are any unsupported
// modifications.
if (port_allocator_) {
@ -1295,6 +1303,7 @@ bool PeerConnection::SetConfiguration(const RTCConfiguration& configuration) {
RTC_FROM_HERE,
rtc::Bind(&PeerConnection::ReconfigurePortAllocator_n, this,
configuration))) {
LOG(LS_ERROR) << "Failed to apply configuration to PortAllocator.";
return false;
}
}
@ -2386,10 +2395,9 @@ bool PeerConnection::ReconfigurePortAllocator_n(
ConvertIceTransportTypeToCandidateFilter(configuration.type));
// Call this last since it may create pooled allocator sessions using the
// candidate filter set above.
port_allocator_->SetConfiguration(stun_servers, turn_servers,
configuration.ice_candidate_pool_size,
configuration.prune_turn_ports);
return true;
return port_allocator_->SetConfiguration(
stun_servers, turn_servers, configuration.ice_candidate_pool_size,
configuration.prune_turn_ports);
}
bool PeerConnection::StartRtcEventLog_w(rtc::PlatformFile file,

View File

@ -2203,6 +2203,26 @@ TEST_F(PeerConnectionInterfaceTest,
EXPECT_EQ(1UL, session->stun_servers().size());
}
// Test that after SetLocalDescription, changing the pool size is not allowed.
TEST_F(PeerConnectionInterfaceTest,
CantChangePoolSizeAfterSetLocalDescription) {
CreatePeerConnection();
// Start by setting a size of 1.
PeerConnectionInterface::RTCConfiguration config;
config.ice_candidate_pool_size = 1;
EXPECT_TRUE(pc_->SetConfiguration(config));
// Set remote offer; can still change pool size at this point.
CreateOfferAsRemoteDescription();
config.ice_candidate_pool_size = 2;
EXPECT_TRUE(pc_->SetConfiguration(config));
// Set local answer; now it's too late.
CreateAnswerAsLocalDescription();
config.ice_candidate_pool_size = 3;
EXPECT_FALSE(pc_->SetConfiguration(config));
}
// Test that PeerConnection::Close changes the states to closed and all remote
// tracks change state to ended.
TEST_F(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) {

View File

@ -29,7 +29,7 @@ PortAllocatorSession::PortAllocatorSession(const std::string& content_name,
RTC_DCHECK(ice_ufrag.empty() == ice_pwd.empty());
}
void PortAllocator::SetConfiguration(
bool PortAllocator::SetConfiguration(
const ServerAddresses& stun_servers,
const std::vector<RelayServerConfig>& turn_servers,
int candidate_pool_size,
@ -40,31 +40,48 @@ void PortAllocator::SetConfiguration(
turn_servers_ = turn_servers;
prune_turn_ports_ = prune_turn_ports;
bool candidate_pool_drain_began =
static_cast<int>(pooled_sessions_.size()) != candidate_pool_size_;
if (candidate_pool_drain_began &&
candidate_pool_size != candidate_pool_size_) {
LOG(LS_ERROR) << "Trying to change candidate pool size after pool started "
"to be drained.";
return false;
}
if (candidate_pool_size < 0) {
LOG(LS_ERROR) << "Can't set negative pool size.";
return false;
}
candidate_pool_size_ = candidate_pool_size;
// If sessions need to be recreated, only recreate as many as the current
// pool size if the pool has begun to be drained.
int sessions_needed = candidate_pool_drain_began
? static_cast<int>(pooled_sessions_.size())
: candidate_pool_size_;
// If ICE servers changed, throw away any existing pooled sessions and create
// new ones.
if (ice_servers_changed) {
pooled_sessions_.clear();
allocated_pooled_session_count_ = 0;
}
// If |size| is less than the number of allocated sessions, get rid of the
// extras.
while (allocated_pooled_session_count_ > candidate_pool_size &&
!pooled_sessions_.empty()) {
// If |sessions_needed| is less than the number of pooled sessions, get rid
// of the extras.
while (sessions_needed < static_cast<int>(pooled_sessions_.size())) {
pooled_sessions_.front().reset(nullptr);
pooled_sessions_.pop_front();
--allocated_pooled_session_count_;
}
// If |size| is greater than the number of allocated sessions, create new
// sessions.
while (allocated_pooled_session_count_ < candidate_pool_size) {
// If |sessions_needed| is greater than the number of pooled sessions,
// create new sessions.
while (static_cast<int>(pooled_sessions_.size()) < sessions_needed) {
PortAllocatorSession* pooled_session = CreateSessionInternal("", 0, "", "");
pooled_session->StartGettingPorts();
pooled_sessions_.push_back(
std::unique_ptr<PortAllocatorSession>(pooled_session));
++allocated_pooled_session_count_;
}
target_pooled_session_count_ = candidate_pool_size;
return true;
}
std::unique_ptr<PortAllocatorSession> PortAllocator::CreateSession(

View File

@ -298,7 +298,9 @@ class PortAllocator : public sigslot::has_slots<> {
//
// If the servers are not changing but the candidate pool size is,
// pooled sessions will be either created or destroyed as necessary.
void SetConfiguration(const ServerAddresses& stun_servers,
//
// Returns true if the configuration could successfully be changed.
bool SetConfiguration(const ServerAddresses& stun_servers,
const std::vector<RelayServerConfig>& turn_servers,
int candidate_pool_size,
bool prune_turn_ports);
@ -309,7 +311,7 @@ class PortAllocator : public sigslot::has_slots<> {
return turn_servers_;
}
int candidate_pool_size() const { return target_pooled_session_count_; }
int candidate_pool_size() const { return candidate_pool_size_; }
// Sets the network types to ignore.
// Values are defined by the AdapterType enum.
@ -412,11 +414,7 @@ class PortAllocator : public sigslot::has_slots<> {
private:
ServerAddresses stun_servers_;
std::vector<RelayServerConfig> turn_servers_;
// The last size passed into SetConfiguration.
int target_pooled_session_count_ = 0;
// This variable represents the total number of pooled sessions
// both owned by this class and taken by TakePooledSession.
int allocated_pooled_session_count_ = 0;
int candidate_pool_size_ = 0; // Last value passed into SetConfiguration.
std::deque<std::unique_ptr<PortAllocatorSession>> pooled_sessions_;
bool prune_turn_ports_ = false;

View File

@ -32,9 +32,15 @@ class PortAllocatorTest : public testing::Test, public sigslot::has_slots<> {
protected:
void SetConfigurationWithPoolSize(int candidate_pool_size) {
allocator_->SetConfiguration(cricket::ServerAddresses(),
std::vector<cricket::RelayServerConfig>(),
candidate_pool_size, false);
EXPECT_TRUE(allocator_->SetConfiguration(
cricket::ServerAddresses(), std::vector<cricket::RelayServerConfig>(),
candidate_pool_size, false));
}
void SetConfigurationWithPoolSizeExpectFailure(int candidate_pool_size) {
EXPECT_FALSE(allocator_->SetConfiguration(
cricket::ServerAddresses(), std::vector<cricket::RelayServerConfig>(),
candidate_pool_size, false));
}
std::unique_ptr<cricket::FakePortAllocatorSession> CreateSession(
@ -104,14 +110,16 @@ TEST_F(PortAllocatorTest, CreateSession) {
TEST_F(PortAllocatorTest, SetConfigurationUpdatesIceServers) {
cricket::ServerAddresses stun_servers_1 = {stun_server_1};
std::vector<cricket::RelayServerConfig> turn_servers_1 = {turn_server_1};
allocator_->SetConfiguration(stun_servers_1, turn_servers_1, 0, false);
EXPECT_TRUE(
allocator_->SetConfiguration(stun_servers_1, turn_servers_1, 0, false));
EXPECT_EQ(stun_servers_1, allocator_->stun_servers());
EXPECT_EQ(turn_servers_1, allocator_->turn_servers());
// Update with a different set of servers.
cricket::ServerAddresses stun_servers_2 = {stun_server_2};
std::vector<cricket::RelayServerConfig> turn_servers_2 = {turn_server_2};
allocator_->SetConfiguration(stun_servers_2, turn_servers_2, 0, false);
EXPECT_TRUE(
allocator_->SetConfiguration(stun_servers_2, turn_servers_2, 0, false));
EXPECT_EQ(stun_servers_2, allocator_->stun_servers());
EXPECT_EQ(turn_servers_2, allocator_->turn_servers());
}
@ -128,9 +136,8 @@ TEST_F(PortAllocatorTest, SetConfigurationUpdatesCandidatePoolSize) {
}
// A negative pool size should just be treated as zero.
TEST_F(PortAllocatorTest, SetConfigurationWithNegativePoolSizeDoesntCrash) {
SetConfigurationWithPoolSize(-1);
// No asserts; we're just testing that this doesn't crash.
TEST_F(PortAllocatorTest, SetConfigurationWithNegativePoolSizeFails) {
SetConfigurationWithPoolSizeExpectFailure(-1);
}
// Test that if the candidate pool size is nonzero, pooled sessions are
@ -162,18 +169,16 @@ TEST_F(PortAllocatorTest, SetConfigurationDestroysPooledSessions) {
EXPECT_EQ(1, GetAllPooledSessionsReturnCount());
}
// Test that if the candidate pool size is reduced and increased, but reducing
// didn't actually destroy any sessions (because they were already given away),
// increasing the size to its initial value doesn't create a new session.
TEST_F(PortAllocatorTest, SetConfigurationDoesntCreateExtraSessions) {
// Test that after the pool starts to be drained, changing the pool size is not
// allowed.
TEST_F(PortAllocatorTest, CantChangePoolSizeAfterTakePooledSession) {
SetConfigurationWithPoolSize(1);
TakePooledSession();
SetConfigurationWithPoolSize(0);
SetConfigurationWithPoolSize(1);
EXPECT_EQ(0, GetAllPooledSessionsReturnCount());
SetConfigurationWithPoolSizeExpectFailure(2);
SetConfigurationWithPoolSizeExpectFailure(0);
}
// According to JSEP, exising pooled sessions should be destroyed and new
// According to JSEP, existing pooled sessions should be destroyed and new
// ones created when the ICE servers change.
TEST_F(PortAllocatorTest,
SetConfigurationRecreatesPooledSessionsWhenIceServersChange) {