Add ability to control TaskQueuePacedSender holdback window.

Holdback window can be specified as absolute time and in terms of packet
send times. Example:
WebRTC-TaskQueuePacer/Enabled,holdback_window:20ms,holdback_packet:3/

If current conditions have us running with 2000kbps pacing rate and
1250byte (10kbit) packets, each packet send time is 5ms.
The holdback window would then be min(20ms, 3*5ms) = 15ms.

The default is like before 1ms and packets no take into account when
TQ pacer is used, parameters have no effect with legacy process thread
pacer.

Bug: webrtc:10809
Change-Id: I800de05107e2d4df461eabaaf1ca04fb4c5de51e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/233421
Commit-Queue: Erik Språng <sprang@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35266}
This commit is contained in:
Erik Språng
2021-10-26 16:19:03 +02:00
committed by WebRTC LUCI CQ
parent 7bb853f549
commit 0f86c1f125
7 changed files with 602 additions and 459 deletions

View File

@ -75,6 +75,15 @@ bool IsRelayed(const rtc::NetworkRoute& route) {
} // namespace
RtpTransportControllerSend::PacerSettings::PacerSettings(
const WebRtcKeyValueConfig* trials)
: tq_disabled("Disabled"),
holdback_window("holdback_window", PacingController::kMinSleepTime),
holdback_packets("holdback_packets", -1) {
ParseFieldTrial({&tq_disabled, &holdback_window, &holdback_packets},
trials->Lookup("WebRTC-TaskQueuePacer"));
}
RtpTransportControllerSend::RtpTransportControllerSend(
Clock* clock,
webrtc::RtcEventLog* event_log,
@ -89,8 +98,8 @@ RtpTransportControllerSend::RtpTransportControllerSend(
bitrate_configurator_(bitrate_config),
pacer_started_(false),
process_thread_(std::move(process_thread)),
use_task_queue_pacer_(!IsDisabled(trials, "WebRTC-TaskQueuePacer")),
process_thread_pacer_(use_task_queue_pacer_
pacer_settings_(trials),
process_thread_pacer_(pacer_settings_.use_task_queue_pacer()
? nullptr
: new PacedSender(clock,
&packet_router_,
@ -98,14 +107,14 @@ RtpTransportControllerSend::RtpTransportControllerSend(
trials,
process_thread_.get())),
task_queue_pacer_(
use_task_queue_pacer_
? new TaskQueuePacedSender(
clock,
&packet_router_,
event_log,
trials,
task_queue_factory,
/*hold_back_window = */ PacingController::kMinSleepTime)
pacer_settings_.use_task_queue_pacer()
? new TaskQueuePacedSender(clock,
&packet_router_,
event_log,
trials,
task_queue_factory,
pacer_settings_.holdback_window.Get(),
pacer_settings_.holdback_packets.Get())
: nullptr),
observer_(nullptr),
controller_factory_override_(controller_factory),
@ -194,14 +203,14 @@ void RtpTransportControllerSend::UpdateControlState() {
}
RtpPacketPacer* RtpTransportControllerSend::pacer() {
if (use_task_queue_pacer_) {
if (pacer_settings_.use_task_queue_pacer()) {
return task_queue_pacer_.get();
}
return process_thread_pacer_.get();
}
const RtpPacketPacer* RtpTransportControllerSend::pacer() const {
if (use_task_queue_pacer_) {
if (pacer_settings_.use_task_queue_pacer()) {
return task_queue_pacer_.get();
}
return process_thread_pacer_.get();
@ -226,7 +235,7 @@ RtpTransportControllerSend::transport_feedback_observer() {
}
RtpPacketSender* RtpTransportControllerSend::packet_sender() {
if (use_task_queue_pacer_) {
if (pacer_settings_.use_task_queue_pacer()) {
return task_queue_pacer_.get();
}
return process_thread_pacer_.get();
@ -503,7 +512,7 @@ void RtpTransportControllerSend::IncludeOverheadInPacedSender() {
void RtpTransportControllerSend::EnsureStarted() {
if (!pacer_started_) {
pacer_started_ = true;
if (use_task_queue_pacer_) {
if (pacer_settings_.use_task_queue_pacer()) {
task_queue_pacer_->EnsureStarted();
} else {
process_thread_->Start();