Remove ContinualGatheringPolicy::GATHER_CONTINUALLY_AND_RECOVER.
This policy is not implemented. Bug: None Change-Id: I6c162d61c2488a4726c20df5c14439f83633a198 Reviewed-on: https://webrtc-review.googlesource.com/76041 Commit-Queue: Qingsi Wang <qingsi@google.com> Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23591}
This commit is contained in:
@ -51,9 +51,6 @@ enum ContinualGatheringPolicy {
|
|||||||
GATHER_ONCE = 0,
|
GATHER_ONCE = 0,
|
||||||
// The most recent port allocator session will keep on running.
|
// The most recent port allocator session will keep on running.
|
||||||
GATHER_CONTINUALLY,
|
GATHER_CONTINUALLY,
|
||||||
// The most recent port allocator session will keep on running, and it will
|
|
||||||
// try to recover connectivity if the channel becomes disconnected.
|
|
||||||
GATHER_CONTINUALLY_AND_RECOVER,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// ICE Nomination mode.
|
// ICE Nomination mode.
|
||||||
@ -78,8 +75,7 @@ struct IceConfig {
|
|||||||
ContinualGatheringPolicy continual_gathering_policy = GATHER_ONCE;
|
ContinualGatheringPolicy continual_gathering_policy = GATHER_ONCE;
|
||||||
|
|
||||||
bool gather_continually() const {
|
bool gather_continually() const {
|
||||||
return continual_gathering_policy == GATHER_CONTINUALLY ||
|
return continual_gathering_policy == GATHER_CONTINUALLY;
|
||||||
continual_gathering_policy == GATHER_CONTINUALLY_AND_RECOVER;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Whether we should prioritize Relay/Relay candidate when nothing
|
// Whether we should prioritize Relay/Relay candidate when nothing
|
||||||
|
@ -3011,54 +3011,6 @@ TEST_F(P2PTransportChannelMultihomedTest,
|
|||||||
DestroyChannels();
|
DestroyChannels();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
TODO(honghaiz) Once continual gathering fully supports
|
|
||||||
GATHER_CONTINUALLY_AND_RECOVER, put this test back.
|
|
||||||
|
|
||||||
// Tests that if the backup connections are lost and then the interface with the
|
|
||||||
// selected connection is gone, continual gathering will restore the
|
|
||||||
// connectivity.
|
|
||||||
TEST_F(P2PTransportChannelMultihomedTest,
|
|
||||||
TestBackupConnectionLostThenInterfaceGone) {
|
|
||||||
rtc::ScopedFakeClock clock;
|
|
||||||
auto& wifi = kAlternateAddrs;
|
|
||||||
auto& cellular = kPublicAddrs;
|
|
||||||
AddAddress(0, wifi[0], "test_wifi0", rtc::ADAPTER_TYPE_WIFI);
|
|
||||||
AddAddress(0, cellular[0], "test_cell0", rtc::ADAPTER_TYPE_CELLULAR);
|
|
||||||
AddAddress(1, wifi[1], "test_wifi1", rtc::ADAPTER_TYPE_WIFI);
|
|
||||||
AddAddress(1, cellular[1], "test_cell1", rtc::ADAPTER_TYPE_CELLULAR);
|
|
||||||
// Use only local ports for simplicity.
|
|
||||||
SetAllocatorFlags(0, kOnlyLocalPorts);
|
|
||||||
SetAllocatorFlags(1, kOnlyLocalPorts);
|
|
||||||
|
|
||||||
// Set continual gathering policy.
|
|
||||||
IceConfig config = CreateIceConfig(1000, GATHER_CONTINUALLY_AND_RECOVER);
|
|
||||||
// Create channels and let them go writable, as usual.
|
|
||||||
CreateChannels(config, config);
|
|
||||||
EXPECT_TRUE_SIMULATED_WAIT(ep1_ch1()->receiving() && ep1_ch1()->writable() &&
|
|
||||||
ep2_ch1()->receiving() && ep2_ch1()->writable(),
|
|
||||||
kMediumTimeout, clock);
|
|
||||||
EXPECT_TRUE(ep1_ch1()->selected_connection() &&
|
|
||||||
ep2_ch1()->selected_connection() &&
|
|
||||||
LocalCandidate(ep1_ch1())->address().EqualIPs(wifi[0]) &&
|
|
||||||
RemoteCandidate(ep1_ch1())->address().EqualIPs(wifi[1]));
|
|
||||||
|
|
||||||
// First destroy all backup connection.
|
|
||||||
DestroyAllButBestConnection(ep1_ch1());
|
|
||||||
|
|
||||||
SIMULATED_WAIT(false, 10, clock);
|
|
||||||
// Then the interface of the best connection goes away.
|
|
||||||
RemoveAddress(0, wifi[0]);
|
|
||||||
EXPECT_TRUE_SIMULATED_WAIT(
|
|
||||||
ep1_ch1()->selected_connection() && ep2_ch1()->selected_connection() &&
|
|
||||||
LocalCandidate(ep1_ch1())->address().EqualIPs(cellular[0]) &&
|
|
||||||
RemoteCandidate(ep1_ch1())->address().EqualIPs(wifi[1]),
|
|
||||||
kMediumTimeout, clock);
|
|
||||||
|
|
||||||
DestroyChannels();
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Tests that the backup connection will be restored after it is destroyed.
|
// Tests that the backup connection will be restored after it is destroyed.
|
||||||
TEST_F(P2PTransportChannelMultihomedTest, TestRestoreBackupConnection) {
|
TEST_F(P2PTransportChannelMultihomedTest, TestRestoreBackupConnection) {
|
||||||
rtc::ScopedFakeClock clock;
|
rtc::ScopedFakeClock clock;
|
||||||
|
@ -4987,8 +4987,6 @@ bool PeerConnection::GetTransportDescription(
|
|||||||
cricket::IceConfig PeerConnection::ParseIceConfig(
|
cricket::IceConfig PeerConnection::ParseIceConfig(
|
||||||
const PeerConnectionInterface::RTCConfiguration& config) const {
|
const PeerConnectionInterface::RTCConfiguration& config) const {
|
||||||
cricket::ContinualGatheringPolicy gathering_policy;
|
cricket::ContinualGatheringPolicy gathering_policy;
|
||||||
// TODO(honghaiz): Add the third continual gathering policy in
|
|
||||||
// PeerConnectionInterface and map it to GATHER_CONTINUALLY_AND_RECOVER.
|
|
||||||
switch (config.continual_gathering_policy) {
|
switch (config.continual_gathering_policy) {
|
||||||
case PeerConnectionInterface::GATHER_ONCE:
|
case PeerConnectionInterface::GATHER_ONCE:
|
||||||
gathering_policy = cricket::GATHER_ONCE;
|
gathering_policy = cricket::GATHER_ONCE;
|
||||||
|
Reference in New Issue
Block a user