Remove WebRTC-ExponentialNackBackoff field trial from NackRequester.

This flag has gone unused for a long time, time to clean it up.
While we're here, convert NackRequester to use unit types.

Bug: webrtc:8624
Change-Id: I1f314f9b5b6771d4f9c351a7a9a887130b86907c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/267408
Commit-Queue: Erik Språng <sprang@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#37400}
This commit is contained in:
Erik Språng
2022-07-01 16:46:56 +02:00
committed by WebRTC LUCI CQ
parent 68fef2f3e9
commit 609aef3149
3 changed files with 54 additions and 132 deletions

View File

@ -24,22 +24,22 @@
namespace webrtc {
namespace {
const int kMaxPacketAge = 10000;
const int kMaxNackPackets = 1000;
const int kDefaultRttMs = 100;
const int kMaxNackRetries = 10;
const int kMaxReorderedPackets = 128;
const int kNumReorderingBuckets = 10;
const int kDefaultSendNackDelayMs = 0;
constexpr int kMaxPacketAge = 10'000;
constexpr int kMaxNackPackets = 1000;
constexpr TimeDelta kDefaultRtt = TimeDelta::Millis(100);
constexpr int kMaxNackRetries = 10;
constexpr int kMaxReorderedPackets = 128;
constexpr int kNumReorderingBuckets = 10;
constexpr TimeDelta kDefaultSendNackDelay = TimeDelta::Zero();
int64_t GetSendNackDelay(const FieldTrialsView& field_trials) {
TimeDelta GetSendNackDelay(const FieldTrialsView& field_trials) {
int64_t delay_ms = strtol(
field_trials.Lookup("WebRTC-SendNackDelayMs").c_str(), nullptr, 10);
if (delay_ms > 0 && delay_ms <= 20) {
RTC_LOG(LS_INFO) << "SendNackDelay is set to " << delay_ms;
return delay_ms;
return TimeDelta::Millis(delay_ms);
}
return kDefaultSendNackDelayMs;
return kDefaultSendNackDelay;
}
} // namespace
@ -91,49 +91,21 @@ ScopedNackPeriodicProcessorRegistration::
}
NackRequester::NackInfo::NackInfo()
: seq_num(0), send_at_seq_num(0), sent_at_time(-1), retries(0) {}
: seq_num(0),
send_at_seq_num(0),
created_at_time(Timestamp::MinusInfinity()),
sent_at_time(Timestamp::MinusInfinity()),
retries(0) {}
NackRequester::NackInfo::NackInfo(uint16_t seq_num,
uint16_t send_at_seq_num,
int64_t created_at_time)
Timestamp created_at_time)
: seq_num(seq_num),
send_at_seq_num(send_at_seq_num),
created_at_time(created_at_time),
sent_at_time(-1),
sent_at_time(Timestamp::MinusInfinity()),
retries(0) {}
NackRequester::BackoffSettings::BackoffSettings(TimeDelta min_retry,
TimeDelta max_rtt,
double base)
: min_retry_interval(min_retry), max_rtt(max_rtt), base(base) {}
absl::optional<NackRequester::BackoffSettings>
NackRequester::BackoffSettings::ParseFromFieldTrials(
const FieldTrialsView& field_trials) {
// Matches magic number in RTPSender::OnReceivedNack().
const TimeDelta kDefaultMinRetryInterval = TimeDelta::Millis(5);
// Upper bound on link-delay considered for exponential backoff.
// Selected so that cumulative delay with 1.25 base and 10 retries ends up
// below 3s, since above that there will be a FIR generated instead.
const TimeDelta kDefaultMaxRtt = TimeDelta::Millis(160);
// Default base for exponential backoff, adds 25% RTT delay for each retry.
const double kDefaultBase = 1.25;
FieldTrialParameter<bool> enabled("enabled", false);
FieldTrialParameter<TimeDelta> min_retry("min_retry",
kDefaultMinRetryInterval);
FieldTrialParameter<TimeDelta> max_rtt("max_rtt", kDefaultMaxRtt);
FieldTrialParameter<double> base("base", kDefaultBase);
ParseFieldTrial({&enabled, &min_retry, &max_rtt, &base},
field_trials.Lookup("WebRTC-ExponentialNackBackoff"));
if (enabled) {
return NackRequester::BackoffSettings(min_retry.Get(), max_rtt.Get(),
base.Get());
}
return absl::nullopt;
}
NackRequester::NackRequester(TaskQueueBase* current_queue,
NackPeriodicProcessor* periodic_processor,
Clock* clock,
@ -146,10 +118,9 @@ NackRequester::NackRequester(TaskQueueBase* current_queue,
keyframe_request_sender_(keyframe_request_sender),
reordering_histogram_(kNumReorderingBuckets, kMaxReorderedPackets),
initialized_(false),
rtt_ms_(kDefaultRttMs),
rtt_(kDefaultRtt),
newest_seq_num_(0),
send_nack_delay_ms_(GetSendNackDelay(field_trials)),
backoff_settings_(BackoffSettings::ParseFromFieldTrials(field_trials)),
send_nack_delay_(GetSendNackDelay(field_trials)),
processor_registration_(this, periodic_processor) {
RTC_DCHECK(clock_);
RTC_DCHECK(nack_sender_);
@ -262,7 +233,7 @@ void NackRequester::ClearUpTo(uint16_t seq_num) {
void NackRequester::UpdateRtt(int64_t rtt_ms) {
RTC_DCHECK_RUN_ON(worker_thread_);
rtt_ms_ = rtt_ms;
rtt_ = TimeDelta::Millis(rtt_ms);
}
bool NackRequester::RemovePacketsUntilKeyFrame() {
@ -314,7 +285,7 @@ void NackRequester::AddPacketsToNack(uint16_t seq_num_start,
if (recovered_list_.find(seq_num) != recovered_list_.end())
continue;
NackInfo nack_info(seq_num, seq_num + WaitNumberOfPackets(0.5),
clock_->TimeInMilliseconds());
clock_->CurrentTime());
RTC_DCHECK(nack_list_.find(seq_num) == nack_list_.end());
nack_list_[seq_num] = nack_info;
}
@ -329,30 +300,16 @@ std::vector<uint16_t> NackRequester::GetNackBatch(NackFilterOptions options) {
std::vector<uint16_t> nack_batch;
auto it = nack_list_.begin();
while (it != nack_list_.end()) {
TimeDelta resend_delay = TimeDelta::Millis(rtt_ms_);
if (backoff_settings_) {
resend_delay =
std::max(resend_delay, backoff_settings_->min_retry_interval);
if (it->second.retries > 1) {
TimeDelta exponential_backoff =
std::min(TimeDelta::Millis(rtt_ms_), backoff_settings_->max_rtt) *
std::pow(backoff_settings_->base, it->second.retries - 1);
resend_delay = std::max(resend_delay, exponential_backoff);
}
}
bool delay_timed_out =
now.ms() - it->second.created_at_time >= send_nack_delay_ms_;
bool nack_on_rtt_passed =
now.ms() - it->second.sent_at_time >= resend_delay.ms();
bool delay_timed_out = now - it->second.created_at_time >= send_nack_delay_;
bool nack_on_rtt_passed = now - it->second.sent_at_time >= rtt_;
bool nack_on_seq_num_passed =
it->second.sent_at_time == -1 &&
it->second.sent_at_time.IsInfinite() &&
AheadOrAt(newest_seq_num_, it->second.send_at_seq_num);
if (delay_timed_out && ((consider_seq_num && nack_on_seq_num_passed) ||
(consider_timestamp && nack_on_rtt_passed))) {
nack_batch.emplace_back(it->second.seq_num);
++it->second.retries;
it->second.sent_at_time = now.ms();
it->second.sent_at_time = now;
if (it->second.retries >= kMaxNackRetries) {
RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
<< " removed from NACK list due to max retries.";

View File

@ -21,6 +21,7 @@
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/histogram.h"
#include "rtc_base/numerics/sequence_number_util.h"
@ -95,28 +96,15 @@ class NackRequester final : public NackRequesterBase {
NackInfo();
NackInfo(uint16_t seq_num,
uint16_t send_at_seq_num,
int64_t created_at_time);
Timestamp created_at_time);
uint16_t seq_num;
uint16_t send_at_seq_num;
int64_t created_at_time;
int64_t sent_at_time;
Timestamp created_at_time;
Timestamp sent_at_time;
int retries;
};
struct BackoffSettings {
BackoffSettings(TimeDelta min_retry, TimeDelta max_rtt, double base);
static absl::optional<BackoffSettings> ParseFromFieldTrials(
const FieldTrialsView& field_trials);
// Min time between nacks.
const TimeDelta min_retry_interval;
// Upper bound on link-delay considered for exponential backoff.
const TimeDelta max_rtt;
// Base for the exponential backoff.
const double base;
};
void AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end)
RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
@ -152,13 +140,11 @@ class NackRequester final : public NackRequesterBase {
RTC_GUARDED_BY(worker_thread_);
video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(worker_thread_);
bool initialized_ RTC_GUARDED_BY(worker_thread_);
int64_t rtt_ms_ RTC_GUARDED_BY(worker_thread_);
TimeDelta rtt_ RTC_GUARDED_BY(worker_thread_);
uint16_t newest_seq_num_ RTC_GUARDED_BY(worker_thread_);
// Adds a delay before send nack on packet received.
const int64_t send_nack_delay_ms_;
const absl::optional<BackoffSettings> backoff_settings_;
const TimeDelta send_nack_delay_;
ScopedNackPeriodicProcessorRegistration processor_registration_;

View File

@ -24,16 +24,12 @@ namespace webrtc {
// TODO(bugs.webrtc.org/11594): Use the use the GlobalSimulatedTimeController
// instead of RunLoop. At the moment we mix use of the Clock and the underlying
// implementation of RunLoop, which is realtime.
class TestNackRequester : public ::testing::TestWithParam<bool>,
class TestNackRequester : public ::testing::Test,
public NackSender,
public KeyFrameRequestSender {
protected:
TestNackRequester()
: clock_(new SimulatedClock(0)),
field_trial_(GetParam()
? "WebRTC-ExponentialNackBackoff/enabled:true/"
: "WebRTC-ExponentialNackBackoff/enabled:false/"),
keyframes_requested_(0) {}
: clock_(new SimulatedClock(0)), keyframes_requested_(0) {}
void SetUp() override {}
@ -84,9 +80,10 @@ class TestNackRequester : public ::testing::TestWithParam<bool>,
RTC_DCHECK(!nack_module_.get());
nack_periodic_processor_ =
std::make_unique<NackPeriodicProcessor>(interval);
test::ScopedKeyValueConfig empty_field_trials_;
nack_module_ = std::make_unique<NackRequester>(
TaskQueueBase::Current(), nack_periodic_processor_.get(), clock_.get(),
this, this, field_trial_);
this, this, empty_field_trials_);
nack_module_->UpdateRtt(kDefaultRttMs);
return *nack_module_.get();
}
@ -95,7 +92,6 @@ class TestNackRequester : public ::testing::TestWithParam<bool>,
rtc::AutoThread main_thread_;
test::RunLoop loop_;
std::unique_ptr<SimulatedClock> clock_;
test::ScopedKeyValueConfig field_trial_;
std::unique_ptr<NackPeriodicProcessor> nack_periodic_processor_;
std::unique_ptr<NackRequester> nack_module_;
std::vector<uint16_t> sent_nacks_;
@ -104,7 +100,7 @@ class TestNackRequester : public ::testing::TestWithParam<bool>,
bool timed_out_ = false;
};
TEST_P(TestNackRequester, NackOnePacket) {
TEST_F(TestNackRequester, NackOnePacket) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(1, false, false);
nack_module.OnReceivedPacket(3, false, false);
@ -112,7 +108,7 @@ TEST_P(TestNackRequester, NackOnePacket) {
EXPECT_EQ(2, sent_nacks_[0]);
}
TEST_P(TestNackRequester, WrappingSeqNum) {
TEST_F(TestNackRequester, WrappingSeqNum) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(0xfffe, false, false);
nack_module.OnReceivedPacket(1, false, false);
@ -121,7 +117,7 @@ TEST_P(TestNackRequester, WrappingSeqNum) {
EXPECT_EQ(0, sent_nacks_[1]);
}
TEST_P(TestNackRequester, WrappingSeqNumClearToKeyframe) {
TEST_F(TestNackRequester, WrappingSeqNumClearToKeyframe) {
NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(10));
nack_module.OnReceivedPacket(0xfffe, false, false);
nack_module.OnReceivedPacket(1, false, false);
@ -186,7 +182,7 @@ TEST_P(TestNackRequester, WrappingSeqNumClearToKeyframe) {
EXPECT_EQ(1006, sent_nacks_[502]);
}
TEST_P(TestNackRequester, ResendNack) {
TEST_F(TestNackRequester, ResendNack) {
NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
nack_module.OnReceivedPacket(1, false, false);
nack_module.OnReceivedPacket(3, false, false);
@ -194,25 +190,12 @@ TEST_P(TestNackRequester, ResendNack) {
ASSERT_EQ(expected_nacks_sent, sent_nacks_.size());
EXPECT_EQ(2, sent_nacks_[0]);
if (GetParam()) {
// Retry has to wait at least 5ms by default.
nack_module.UpdateRtt(1);
clock_->AdvanceTimeMilliseconds(4);
Flush(); // Too early.
EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
nack_module.UpdateRtt(1);
clock_->AdvanceTimeMilliseconds(1);
WaitForSendNack(); // Fast retransmit allowed.
EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
clock_->AdvanceTimeMilliseconds(1);
WaitForSendNack(); // Now allowed.
EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
} else {
nack_module.UpdateRtt(1);
clock_->AdvanceTimeMilliseconds(1);
WaitForSendNack(); // Fast retransmit allowed.
EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
}
// N:th try has to wait b^(N-1) * rtt by default.
const double b = GetParam() ? 1.25 : 1.0;
// Each try has to wait rtt by default.
for (int i = 2; i < 10; ++i) {
// Change RTT, above the 40ms max for exponential backoff.
TimeDelta rtt = TimeDelta::Millis(160); // + (i * 10 - 40)
@ -220,7 +203,7 @@ TEST_P(TestNackRequester, ResendNack) {
// RTT gets capped at 160ms in backoff calculations.
TimeDelta expected_backoff_delay =
std::pow(b, i - 1) * std::min(rtt, TimeDelta::Millis(160));
(i - 1) * std::min(rtt, TimeDelta::Millis(160));
// Move to one millisecond before next allowed NACK.
clock_->AdvanceTimeMilliseconds(expected_backoff_delay.ms() - 1);
@ -240,7 +223,7 @@ TEST_P(TestNackRequester, ResendNack) {
EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
}
TEST_P(TestNackRequester, ResendPacketMaxRetries) {
TEST_F(TestNackRequester, ResendPacketMaxRetries) {
NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
nack_module.OnReceivedPacket(1, false, false);
nack_module.OnReceivedPacket(3, false, false);
@ -261,7 +244,7 @@ TEST_P(TestNackRequester, ResendPacketMaxRetries) {
EXPECT_EQ(10u, sent_nacks_.size());
}
TEST_P(TestNackRequester, TooLargeNackList) {
TEST_F(TestNackRequester, TooLargeNackList) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(0, false, false);
nack_module.OnReceivedPacket(1001, false, false);
@ -275,7 +258,7 @@ TEST_P(TestNackRequester, TooLargeNackList) {
EXPECT_EQ(1, keyframes_requested_);
}
TEST_P(TestNackRequester, TooLargeNackListWithKeyFrame) {
TEST_F(TestNackRequester, TooLargeNackListWithKeyFrame) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(0, false, false);
nack_module.OnReceivedPacket(1, true, false);
@ -290,7 +273,7 @@ TEST_P(TestNackRequester, TooLargeNackListWithKeyFrame) {
EXPECT_EQ(1, keyframes_requested_);
}
TEST_P(TestNackRequester, ClearUpTo) {
TEST_F(TestNackRequester, ClearUpTo) {
NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
nack_module.OnReceivedPacket(0, false, false);
nack_module.OnReceivedPacket(100, false, false);
@ -304,7 +287,7 @@ TEST_P(TestNackRequester, ClearUpTo) {
EXPECT_EQ(50, sent_nacks_[0]);
}
TEST_P(TestNackRequester, ClearUpToWrap) {
TEST_F(TestNackRequester, ClearUpToWrap) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(0xfff0, false, false);
nack_module.OnReceivedPacket(0xf, false, false);
@ -318,7 +301,7 @@ TEST_P(TestNackRequester, ClearUpToWrap) {
EXPECT_EQ(0, sent_nacks_[0]);
}
TEST_P(TestNackRequester, PacketNackCount) {
TEST_F(TestNackRequester, PacketNackCount) {
NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
EXPECT_EQ(0, nack_module.OnReceivedPacket(0, false, false));
EXPECT_EQ(0, nack_module.OnReceivedPacket(2, false, false));
@ -341,7 +324,7 @@ TEST_P(TestNackRequester, PacketNackCount) {
EXPECT_EQ(0, nack_module.OnReceivedPacket(4, false, false));
}
TEST_P(TestNackRequester, NackListFullAndNoOverlapWithKeyframes) {
TEST_F(TestNackRequester, NackListFullAndNoOverlapWithKeyframes) {
NackRequester& nack_module = CreateNackModule();
const int kMaxNackPackets = 1000;
const unsigned int kFirstGap = kMaxNackPackets - 20;
@ -357,7 +340,7 @@ TEST_P(TestNackRequester, NackListFullAndNoOverlapWithKeyframes) {
EXPECT_EQ(kSecondGap, sent_nacks_.size());
}
TEST_P(TestNackRequester, HandleFecRecoveredPacket) {
TEST_F(TestNackRequester, HandleFecRecoveredPacket) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(1, false, false);
nack_module.OnReceivedPacket(4, false, true);
@ -366,17 +349,13 @@ TEST_P(TestNackRequester, HandleFecRecoveredPacket) {
EXPECT_EQ(2u, sent_nacks_.size());
}
TEST_P(TestNackRequester, SendNackWithoutDelay) {
TEST_F(TestNackRequester, SendNackWithoutDelay) {
NackRequester& nack_module = CreateNackModule();
nack_module.OnReceivedPacket(0, false, false);
nack_module.OnReceivedPacket(100, false, false);
EXPECT_EQ(99u, sent_nacks_.size());
}
INSTANTIATE_TEST_SUITE_P(WithAndWithoutBackoff,
TestNackRequester,
::testing::Values(true, false));
class TestNackRequesterWithFieldTrial : public ::testing::Test,
public NackSender,
public KeyFrameRequestSender {