Add ability to control TaskQueuePacedSender holdback window.

Holdback window can be specified as absolute time and in terms of packet
send times. Example:
WebRTC-TaskQueuePacer/Enabled,holdback_window:20ms,holdback_packet:3/

If current conditions have us running with 2000kbps pacing rate and
1250byte (10kbit) packets, each packet send time is 5ms.
The holdback window would then be min(20ms, 3*5ms) = 15ms.

The default is like before 1ms and packets no take into account when
TQ pacer is used, parameters have no effect with legacy process thread
pacer.

Bug: webrtc:10809
Change-Id: I800de05107e2d4df461eabaaf1ca04fb4c5de51e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/233421
Commit-Queue: Erik Språng <sprang@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35266}
This commit is contained in:
Erik Språng
2021-10-26 16:19:03 +02:00
committed by WebRTC LUCI CQ
parent 7bb853f549
commit 0f86c1f125
7 changed files with 602 additions and 459 deletions

View File

@ -36,9 +36,11 @@ TaskQueuePacedSender::TaskQueuePacedSender(
RtcEventLog* event_log,
const WebRtcKeyValueConfig* field_trials,
TaskQueueFactory* task_queue_factory,
TimeDelta hold_back_window)
TimeDelta max_hold_back_window,
int max_hold_back_window_in_packets)
: clock_(clock),
hold_back_window_(hold_back_window),
max_hold_back_window_(max_hold_back_window),
max_hold_back_window_in_packets_(max_hold_back_window_in_packets),
pacing_controller_(clock,
packet_sender,
event_log,
@ -48,9 +50,12 @@ TaskQueuePacedSender::TaskQueuePacedSender(
stats_update_scheduled_(false),
last_stats_time_(Timestamp::MinusInfinity()),
is_shutdown_(false),
packet_size_(/*alpha=*/0.95),
task_queue_(task_queue_factory->CreateTaskQueue(
"TaskQueuePacedSender",
TaskQueueFactory::Priority::NORMAL)) {}
TaskQueueFactory::Priority::NORMAL)) {
packet_size_.Apply(1, 0);
}
TaskQueuePacedSender::~TaskQueuePacedSender() {
// Post an immediate task to mark the queue as shutting down.
@ -144,6 +149,7 @@ void TaskQueuePacedSender::EnqueuePackets(
task_queue_.PostTask([this, packets_ = std::move(packets)]() mutable {
RTC_DCHECK_RUN_ON(&task_queue_);
for (auto& packet : packets_) {
packet_size_.Apply(1, packet->size());
RTC_DCHECK_GE(packet->capture_time_ms(), 0);
pacing_controller_.EnqueuePacket(std::move(packet));
}
@ -227,6 +233,17 @@ void TaskQueuePacedSender::MaybeProcessPackets(
next_process_time = pacing_controller_.NextSendTime();
}
TimeDelta hold_back_window = max_hold_back_window_;
DataRate pacing_rate = pacing_controller_.pacing_rate();
DataSize avg_packet_size = DataSize::Bytes(packet_size_.filtered());
if (max_hold_back_window_in_packets_ > 0 && !pacing_rate.IsZero() &&
!avg_packet_size.IsZero()) {
TimeDelta avg_packet_send_time = avg_packet_size / pacing_rate;
hold_back_window =
std::min(hold_back_window,
avg_packet_send_time * max_hold_back_window_in_packets_);
}
absl::optional<TimeDelta> time_to_next_process;
if (pacing_controller_.IsProbing() &&
next_process_time != next_process_time_) {
@ -241,11 +258,11 @@ void TaskQueuePacedSender::MaybeProcessPackets(
(next_process_time - now).RoundDownTo(TimeDelta::Millis(1)));
}
} else if (next_process_time_.IsMinusInfinity() ||
next_process_time <= next_process_time_ - hold_back_window_) {
next_process_time <= next_process_time_ - hold_back_window) {
// Schedule a new task since there is none currently scheduled
// (`next_process_time_` is infinite), or the new process time is at least
// one holdback window earlier than whatever is currently scheduled.
time_to_next_process = std::max(next_process_time - now, hold_back_window_);
time_to_next_process = std::max(next_process_time - now, hold_back_window);
}
if (time_to_next_process) {