Preparing for task queue in congenstion controller

This cl prepares for a later CL introducing a new send side congestion
controller that will run on a task queue. It mostly consists of minor
fixes but adds some new interfaces that are unused in practice.

Bug: webrtc:8415
Change-Id: I1b58d0180a18eb15320d18733dac0dfe2e0f902a
Reviewed-on: https://webrtc-review.googlesource.com/53321
Commit-Queue: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Björn Terelius <terelius@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22099}
This commit is contained in:
Sebastian Jansson
2018-02-20 10:46:39 +01:00
committed by Commit Bot
parent 645898a454
commit 439f0bc69a
17 changed files with 159 additions and 121 deletions

View File

@ -340,11 +340,11 @@ TEST_F(BitrateControllerTest, OneBitrateObserverMultipleReportBlocks) {
report_blocks.clear();
// All packets lost on stream with few packets, no back-off.
report_blocks.push_back(CreateReportBlock(1, 2, 1, sequence_number[0]));
report_blocks.push_back(CreateReportBlock(1, 2, 0, sequence_number[0]));
report_blocks.push_back(CreateReportBlock(1, 3, 255, sequence_number[1]));
bandwidth_observer_->OnReceivedRtcpReceiverReport(report_blocks, 50, time_ms);
EXPECT_EQ(bitrate_observer_.last_bitrate_, last_bitrate);
EXPECT_EQ(WeightedLoss(20, 1, 1, 255), bitrate_observer_.last_fraction_loss_);
EXPECT_EQ(WeightedLoss(20, 0, 1, 255), bitrate_observer_.last_fraction_loss_);
EXPECT_EQ(50, bitrate_observer_.last_rtt_);
last_bitrate = bitrate_observer_.last_bitrate_;
sequence_number[0] += 20;

View File

@ -105,7 +105,7 @@ bool ReadBweLossExperimentParameters(float* low_loss_threshold,
} // namespace
SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log)
: lost_packets_since_last_loss_update_Q8_(0),
: lost_packets_since_last_loss_update_(0),
expected_packets_since_last_loss_update_(0),
current_bitrate_bps_(0),
min_bitrate_configured_(congestion_controller::GetMinBitrateBps()),
@ -125,6 +125,7 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log)
initially_lost_packets_(0),
bitrate_at_2_seconds_kbps_(0),
uma_update_state_(kNoUpdate),
uma_rtt_state_(kNoUpdate),
rampup_uma_stats_updated_(kNumUmaRampupMetrics, false),
event_log_(event_log),
last_rtc_event_log_ms_(-1),
@ -206,24 +207,28 @@ void SendSideBandwidthEstimation::UpdateDelayBasedEstimate(
}
void SendSideBandwidthEstimation::UpdateReceiverBlock(uint8_t fraction_loss,
int64_t rtt,
int64_t rtt_ms,
int number_of_packets,
int64_t now_ms) {
const int kRoundingConstant = 128;
int packets_lost = (static_cast<int>(fraction_loss) * number_of_packets +
kRoundingConstant) >>
8;
UpdatePacketsLost(packets_lost, number_of_packets, now_ms);
UpdateRtt(rtt_ms, now_ms);
}
void SendSideBandwidthEstimation::UpdatePacketsLost(int packets_lost,
int number_of_packets,
int64_t now_ms) {
last_feedback_ms_ = now_ms;
if (first_report_time_ms_ == -1)
first_report_time_ms_ = now_ms;
// Update RTT if we were able to compute an RTT based on this RTCP.
// FlexFEC doesn't send RTCP SR, which means we won't be able to compute RTT.
if (rtt > 0)
last_round_trip_time_ms_ = rtt;
// Check sequence number diff and weight loss report
if (number_of_packets > 0) {
// Calculate number of lost packets.
const int num_lost_packets_Q8 = fraction_loss * number_of_packets;
// Accumulate reports.
lost_packets_since_last_loss_update_Q8_ += num_lost_packets_Q8;
lost_packets_since_last_loss_update_ += packets_lost;
expected_packets_since_last_loss_update_ += number_of_packets;
// Don't generate a loss rate until it can be based on enough packets.
@ -231,21 +236,22 @@ void SendSideBandwidthEstimation::UpdateReceiverBlock(uint8_t fraction_loss,
return;
has_decreased_since_last_fraction_loss_ = false;
last_fraction_loss_ = lost_packets_since_last_loss_update_Q8_ /
expected_packets_since_last_loss_update_;
int64_t lost_q8 = lost_packets_since_last_loss_update_ << 8;
int64_t expected = expected_packets_since_last_loss_update_;
last_fraction_loss_ = std::min<int>(lost_q8 / expected, 255);
// Reset accumulators.
lost_packets_since_last_loss_update_Q8_ = 0;
lost_packets_since_last_loss_update_ = 0;
expected_packets_since_last_loss_update_ = 0;
last_packet_report_ms_ = now_ms;
UpdateEstimate(now_ms);
}
UpdateUmaStats(now_ms, rtt, (fraction_loss * number_of_packets) >> 8);
UpdateUmaStatsPacketsLost(now_ms, packets_lost);
}
void SendSideBandwidthEstimation::UpdateUmaStats(int64_t now_ms,
int64_t rtt,
int lost_packets) {
void SendSideBandwidthEstimation::UpdateUmaStatsPacketsLost(int64_t now_ms,
int packets_lost) {
int bitrate_kbps = static_cast<int>((current_bitrate_bps_ + 500) / 1000);
for (size_t i = 0; i < kNumUmaRampupMetrics; ++i) {
if (!rampup_uma_stats_updated_[i] &&
@ -256,14 +262,12 @@ void SendSideBandwidthEstimation::UpdateUmaStats(int64_t now_ms,
}
}
if (IsInStartPhase(now_ms)) {
initially_lost_packets_ += lost_packets;
initially_lost_packets_ += packets_lost;
} else if (uma_update_state_ == kNoUpdate) {
uma_update_state_ = kFirstDone;
bitrate_at_2_seconds_kbps_ = bitrate_kbps;
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitiallyLostPackets",
initially_lost_packets_, 0, 100, 50);
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialRtt", static_cast<int>(rtt), 0,
2000, 50);
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialBandwidthEstimate",
bitrate_at_2_seconds_kbps_, 0, 2000, 50);
} else if (uma_update_state_ == kFirstDone &&
@ -276,6 +280,19 @@ void SendSideBandwidthEstimation::UpdateUmaStats(int64_t now_ms,
}
}
void SendSideBandwidthEstimation::UpdateRtt(int64_t rtt_ms, int64_t now_ms) {
// Update RTT if we were able to compute an RTT based on this RTCP.
// FlexFEC doesn't send RTCP SR, which means we won't be able to compute RTT.
if (rtt_ms > 0)
last_round_trip_time_ms_ = rtt_ms;
if (!IsInStartPhase(now_ms) && uma_rtt_state_ == kNoUpdate) {
uma_rtt_state_ = kDone;
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialRtt", static_cast<int>(rtt_ms), 0,
2000, 50);
}
}
void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
uint32_t new_bitrate = current_bitrate_bps_;
// We trust the REMB and/or delay-based estimate during the first 2 seconds if
@ -357,7 +374,7 @@ void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
new_bitrate *= 0.8;
// Reset accumulators since we've already acted on missing feedback and
// shouldn't to act again on these old lost packets.
lost_packets_since_last_loss_update_Q8_ = 0;
lost_packets_since_last_loss_update_ = 0;
expected_packets_since_last_loss_update_ = 0;
last_timeout_ms_ = now_ms;
}

View File

@ -42,10 +42,18 @@ class SendSideBandwidthEstimation {
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdateReceiverBlock(uint8_t fraction_loss,
int64_t rtt,
int64_t rtt_ms,
int number_of_packets,
int64_t now_ms);
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdatePacketsLost(int packets_lost,
int number_of_packets,
int64_t now_ms);
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdateRtt(int64_t rtt, int64_t now_ms);
void SetBitrates(int send_bitrate,
int min_bitrate,
int max_bitrate);
@ -58,7 +66,7 @@ class SendSideBandwidthEstimation {
bool IsInStartPhase(int64_t now_ms) const;
void UpdateUmaStats(int64_t now_ms, int64_t rtt, int lost_packets);
void UpdateUmaStatsPacketsLost(int64_t now_ms, int packets_lost);
// Updates history of min bitrates.
// After this method returns min_bitrate_history_.front().second contains the
@ -72,7 +80,7 @@ class SendSideBandwidthEstimation {
std::deque<std::pair<int64_t, uint32_t> > min_bitrate_history_;
// incoming filters
int lost_packets_since_last_loss_update_Q8_;
int lost_packets_since_last_loss_update_;
int expected_packets_since_last_loss_update_;
uint32_t current_bitrate_bps_;
@ -95,6 +103,7 @@ class SendSideBandwidthEstimation {
int initially_lost_packets_;
int bitrate_at_2_seconds_kbps_;
UmaState uma_update_state_;
UmaState uma_rtt_state_;
std::vector<bool> rampup_uma_stats_updated_;
RtcEventLog* event_log_;
int64_t last_rtc_event_log_ms_;

View File

@ -15,7 +15,6 @@
namespace webrtc {
namespace {
constexpr int kWindowMs = 500;
constexpr int kDeltaTimeMs = 2000;
}
IntervalBudget::IntervalBudget(int initial_target_rate_kbps)
@ -35,7 +34,6 @@ void IntervalBudget::set_target_rate_kbps(int target_rate_kbps) {
}
void IntervalBudget::IncreaseBudget(int64_t delta_time_ms) {
RTC_DCHECK_LT(delta_time_ms, kDeltaTimeMs);
int bytes = target_rate_kbps_ * delta_time_ms / 8;
if (bytes_remaining_ < 0 || can_build_up_underuse_) {
// We overused last interval, compensate this interval.
@ -56,6 +54,8 @@ size_t IntervalBudget::bytes_remaining() const {
}
int IntervalBudget::budget_level_percent() const {
if (max_bytes_in_budget_ == 0)
return 0;
return bytes_remaining_ * 100 / max_bytes_in_budget_;
}

View File

@ -31,6 +31,7 @@ class MockPacedSender : public PacedSender {
bool retransmission));
MOCK_METHOD1(CreateProbeCluster, void(int));
MOCK_METHOD1(SetEstimatedBitrate, void(uint32_t));
MOCK_METHOD2(SetPacingRates, void(uint32_t, uint32_t));
MOCK_CONST_METHOD0(QueueInMs, int64_t());
MOCK_CONST_METHOD0(QueueInPackets, int());
MOCK_CONST_METHOD0(ExpectedQueueTimeMs, int64_t());

View File

@ -116,6 +116,7 @@ void PacedSender::Pause() {
paused_ = true;
packets_->SetPauseState(true, clock_->TimeInMilliseconds());
}
rtc::CritScope cs(&process_thread_lock_);
// Tell the process thread to call our TimeUntilNextProcess() method to get
// a new (longer) estimate for when to call Process().
if (process_thread_)
@ -130,6 +131,7 @@ void PacedSender::Resume() {
paused_ = false;
packets_->SetPauseState(false, clock_->TimeInMilliseconds());
}
rtc::CritScope cs(&process_thread_lock_);
// Tell the process thread to call our TimeUntilNextProcess() method to
// refresh the estimate for when to call Process().
if (process_thread_)
@ -167,6 +169,14 @@ void PacedSender::SetSendBitrateLimits(int min_send_bitrate_bps,
std::min(estimated_bitrate_bps_ / 1000, max_padding_bitrate_kbps_));
}
void PacedSender::SetPacingRates(uint32_t pacing_rate_bps,
uint32_t padding_rate_bps) {
rtc::CritScope cs(&critsect_);
RTC_DCHECK(pacing_rate_bps > 0);
pacing_bitrate_kbps_ = pacing_rate_bps / 1000;
padding_budget_->set_target_rate_kbps(padding_rate_bps / 1000);
}
void PacedSender::InsertPacket(RtpPacketSender::Priority priority,
uint32_t ssrc,
uint16_t sequence_number,
@ -174,8 +184,8 @@ void PacedSender::InsertPacket(RtpPacketSender::Priority priority,
size_t bytes,
bool retransmission) {
rtc::CritScope cs(&critsect_);
RTC_DCHECK(estimated_bitrate_bps_ > 0)
<< "SetEstimatedBitrate must be called before InsertPacket.";
RTC_DCHECK(pacing_bitrate_kbps_ > 0)
<< "SetPacingRate must be called before InsertPacket.";
int64_t now_ms = clock_->TimeInMilliseconds();
prober_->OnIncomingPacket(bytes);
@ -291,7 +301,7 @@ void PacedSender::Process() {
pacing_info = prober_->CurrentCluster();
recommended_probe_size = prober_->RecommendedMinProbeSize();
}
while (!packets_->Empty()) {
while (!packets_->Empty() && !paused_) {
// Since we need to release the lock in order to send, we first pop the
// element from the priority queue but keep it in storage, so that we can
// reinsert it if send fails.
@ -319,8 +329,9 @@ void PacedSender::Process() {
int padding_needed =
static_cast<int>(is_probing ? (recommended_probe_size - bytes_sent)
: padding_budget_->bytes_remaining());
if (padding_needed > 0)
if (padding_needed > 0) {
bytes_sent += SendPadding(padding_needed, pacing_info);
}
}
}
if (is_probing) {
@ -333,6 +344,7 @@ void PacedSender::Process() {
void PacedSender::ProcessThreadAttached(ProcessThread* process_thread) {
RTC_LOG(LS_INFO) << "ProcessThreadAttached 0x" << std::hex << process_thread;
rtc::CritScope cs(&process_thread_lock_);
process_thread_ = process_thread;
}

View File

@ -86,23 +86,16 @@ class PacedSender : public Pacer {
// effect.
void SetProbingEnabled(bool enabled);
// Sets the estimated capacity of the network. Must be called once before
// packets can be sent.
// |bitrate_bps| is our estimate of what we are allowed to send on average.
// We will pace out bursts of packets at a bitrate of
// |bitrate_bps| * kDefaultPaceMultiplier.
// Deprecated, SetPacingRates should be used instead.
void SetEstimatedBitrate(uint32_t bitrate_bps) override;
// Sets the minimum send bitrate and maximum padding bitrate requested by send
// streams.
// |min_send_bitrate_bps| might be higher that the estimated available network
// bitrate and if so, the pacer will send with |min_send_bitrate_bps|.
// |max_padding_bitrate_bps| might be higher than the estimate available
// network bitrate and if so, the pacer will send padding packets to reach
// the min of the estimated available bitrate and |max_padding_bitrate_bps|.
// Deprecated, SetPacingRates should be used instead.
void SetSendBitrateLimits(int min_send_bitrate_bps,
int max_padding_bitrate_bps);
// Sets the pacing rates. Must be called once before packets can be sent.
void SetPacingRates(uint32_t pacing_rate_bps,
uint32_t padding_rate_bps) override;
// Returns true if we send the packet now, else it will add the packet
// information to the queue and call TimeToSendPacket when it's time to send.
void InsertPacket(RtpPacketSender::Priority priority,
@ -131,12 +124,7 @@ class PacedSender : public Pacer {
// packets in the queue, given the current size and bitrate, ignoring prio.
virtual int64_t ExpectedQueueTimeMs() const;
// Returns time in milliseconds when the current application-limited region
// started or empty result if the sender is currently not application-limited.
//
// Application Limited Region (ALR) refers to operating in a state where the
// traffic on network is limited due to application not having enough
// traffic to meet the current channel capacity.
// Deprecated, alr detection will be moved out of the pacer.
virtual rtc::Optional<int64_t> GetApplicationLimitedRegionStartTime() const;
// Returns the number of milliseconds until the module want a worker thread
@ -148,6 +136,7 @@ class PacedSender : public Pacer {
// Called when the prober is associated with a process thread.
void ProcessThreadAttached(ProcessThread* process_thread) override;
// Deprecated, SetPacingRates should be used instead.
void SetPacingFactor(float pacing_factor);
void SetQueueTimeLimit(int limit_ms);
@ -195,9 +184,16 @@ class PacedSender : public Pacer {
const std::unique_ptr<PacketQueueInterface> packets_
RTC_PT_GUARDED_BY(critsect_);
uint64_t packet_counter_ RTC_GUARDED_BY(critsect_);
ProcessThread* process_thread_ = nullptr;
float pacing_factor_ RTC_GUARDED_BY(critsect_);
// Lock to avoid race when attaching process thread. This can happen due to
// the Call class setting network state on SendSideCongestionController, which
// in turn calls Pause/Resume on Pacedsender, before actually starting the
// pacer process thread. If SendSideCongestionController is running on a task
// queue separate from the thread used by Call, this causes a race.
rtc::CriticalSection process_thread_lock_;
ProcessThread* process_thread_ RTC_GUARDED_BY(process_thread_lock_) = nullptr;
int64_t queue_time_limit RTC_GUARDED_BY(critsect_);
bool account_for_audio_ RTC_GUARDED_BY(critsect_);
};

View File

@ -31,6 +31,8 @@ constexpr unsigned kSecondClusterBps = 1800000;
// values. This results in probing slightly higher than the target bitrate.
// For 1.8 Mbps, this comes to be about 120 kbps with 1200 probe packets.
constexpr int kBitrateProbingError = 150000;
const float kPaceMultiplier = 2.5f;
} // namespace
namespace webrtc {
@ -116,7 +118,7 @@ class PacedSenderTest : public testing::TestWithParam<std::string> {
// have to enable probing, either by creating a new PacedSender instance or
// by calling SetProbingEnabled(true).
send_bucket_->SetProbingEnabled(false);
send_bucket_->SetEstimatedBitrate(kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier, 0);
clock_.AdvanceTimeMilliseconds(send_bucket_->TimeUntilNextProcess());
}
@ -171,7 +173,7 @@ TEST_P(PacedSenderTest, QueuePacket) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -220,7 +222,7 @@ TEST_P(PacedSenderTest, PaceQueuedPackets) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -305,14 +307,14 @@ TEST_P(PacedSenderTest, Padding) {
uint32_t ssrc = 12345;
uint16_t sequence_number = 1234;
send_bucket_->SetEstimatedBitrate(kTargetBitrateBps);
send_bucket_->SetSendBitrateLimits(kTargetBitrateBps, kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
// Due to the multiplicative factor we can send 5 packets during a send
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -342,8 +344,8 @@ TEST_P(PacedSenderTest, Padding) {
}
TEST_P(PacedSenderTest, NoPaddingBeforeNormalPacket) {
send_bucket_->SetEstimatedBitrate(kTargetBitrateBps);
send_bucket_->SetSendBitrateLimits(kTargetBitrateBps, kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
EXPECT_CALL(callback_, TimeToSendPadding(_, _)).Times(0);
send_bucket_->Process();
@ -370,8 +372,8 @@ TEST_P(PacedSenderTest, VerifyPaddingUpToBitrate) {
int64_t capture_time_ms = 56789;
const int kTimeStep = 5;
const int64_t kBitrateWindow = 100;
send_bucket_->SetEstimatedBitrate(kTargetBitrateBps);
send_bucket_->SetSendBitrateLimits(kTargetBitrateBps, kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
int64_t start_time = clock_.TimeInMilliseconds();
while (clock_.TimeInMilliseconds() - start_time < kBitrateWindow) {
@ -398,11 +400,8 @@ TEST_P(PacedSenderTest, VerifyAverageBitrateVaryingMediaPayload) {
PacedSenderPadding callback;
send_bucket_.reset(new PacedSender(&clock_, &callback, nullptr));
send_bucket_->SetProbingEnabled(false);
send_bucket_->SetEstimatedBitrate(kTargetBitrateBps);
send_bucket_->SetSendBitrateLimits(
0 /*allocated_bitrate_bps*/,
kTargetBitrateBps * 2 /* max_padding_bitrate_bps */);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
int64_t start_time = clock_.TimeInMilliseconds();
size_t media_bytes = 0;
@ -433,7 +432,7 @@ TEST_P(PacedSenderTest, Priority) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -487,7 +486,7 @@ TEST_P(PacedSenderTest, RetransmissionPriority) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
send_bucket_->Process();
EXPECT_EQ(0u, send_bucket_->QueueSizePackets());
@ -548,7 +547,7 @@ TEST_P(PacedSenderTest, HighPrioDoesntAffectBudget) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
SendAndExpectPacket(PacedSender::kLowPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -582,7 +581,7 @@ TEST_P(PacedSenderTest, Pause) {
// interval. (network capacity * multiplier / (8 bits per byte *
// (packet size * #send intervals per second)
const size_t packets_to_send_per_interval =
kTargetBitrateBps * PacedSender::kDefaultPaceMultiplier / (8 * 250 * 200);
kTargetBitrateBps * kPaceMultiplier / (8 * 250 * 200);
for (size_t i = 0; i < packets_to_send_per_interval; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), 250, false);
@ -741,10 +740,10 @@ TEST_P(PacedSenderTest, ExpectedQueueTimeMs) {
uint16_t sequence_number = 1234;
const size_t kNumPackets = 60;
const size_t kPacketSize = 1200;
const int32_t kMaxBitrate = PacedSender::kDefaultPaceMultiplier * 30000;
const int32_t kMaxBitrate = kPaceMultiplier * 30000;
EXPECT_EQ(0, send_bucket_->ExpectedQueueTimeMs());
send_bucket_->SetEstimatedBitrate(30000);
send_bucket_->SetPacingRates(30000 * kPaceMultiplier, 0);
for (size_t i = 0; i < kNumPackets; ++i) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), kPacketSize, false);
@ -778,7 +777,7 @@ TEST_P(PacedSenderTest, QueueTimeGrowsOverTime) {
uint16_t sequence_number = 1234;
EXPECT_EQ(0, send_bucket_->QueueInMs());
send_bucket_->SetEstimatedBitrate(30000);
send_bucket_->SetPacingRates(30000 * kPaceMultiplier, 0);
SendAndExpectPacket(PacedSender::kNormalPriority,
ssrc,
sequence_number,
@ -802,7 +801,7 @@ TEST_P(PacedSenderTest, ProbingWithInsertedPackets) {
send_bucket_.reset(new PacedSender(&clock_, &packet_sender, nullptr));
send_bucket_->CreateProbeCluster(kFirstClusterBps);
send_bucket_->CreateProbeCluster(kSecondClusterBps);
send_bucket_->SetEstimatedBitrate(kInitialBitrateBps);
send_bucket_->SetPacingRates(kInitialBitrateBps * kPaceMultiplier, 0);
for (int i = 0; i < 10; ++i) {
send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
@ -847,7 +846,7 @@ TEST_P(PacedSenderTest, ProbingWithPaddingSupport) {
PacedSenderProbing packet_sender;
send_bucket_.reset(new PacedSender(&clock_, &packet_sender, nullptr));
send_bucket_->CreateProbeCluster(kFirstClusterBps);
send_bucket_->SetEstimatedBitrate(kInitialBitrateBps);
send_bucket_->SetPacingRates(kInitialBitrateBps * kPaceMultiplier, 0);
for (int i = 0; i < 3; ++i) {
send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
@ -935,8 +934,7 @@ TEST_P(PacedSenderTest, PaddingOveruse) {
const size_t kPacketSize = 1200;
send_bucket_->Process();
send_bucket_->SetEstimatedBitrate(60000);
send_bucket_->SetSendBitrateLimits(60000, 0);
send_bucket_->SetPacingRates(60000 * kPaceMultiplier, 0);
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), kPacketSize, false);
@ -945,7 +943,7 @@ TEST_P(PacedSenderTest, PaddingOveruse) {
// Add 30kbit padding. When increasing budget, media budget will increase from
// negative (overuse) while padding budget will increase from 0.
clock_.AdvanceTimeMilliseconds(5);
send_bucket_->SetSendBitrateLimits(60000, 30000);
send_bucket_->SetPacingRates(60000 * kPaceMultiplier, 30000);
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
clock_.TimeInMilliseconds(), kPacketSize, false);
@ -963,7 +961,7 @@ TEST_F(PacedSenderTest, AverageQueueTime) {
const size_t kPacketSize = 1200;
const int kBitrateBps = 10 * kPacketSize * 8; // 10 packets per second.
send_bucket_->SetEstimatedBitrate(kBitrateBps);
send_bucket_->SetPacingRates(kBitrateBps * kPaceMultiplier, 0);
EXPECT_EQ(0, send_bucket_->AverageQueueTimeMs());
@ -1008,7 +1006,8 @@ TEST_P(PacedSenderTest, ProbeClusterId) {
uint16_t sequence_number = 1234;
const size_t kPacketSize = 1200;
send_bucket_->SetSendBitrateLimits(kTargetBitrateBps, kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
send_bucket_->SetProbingEnabled(true);
for (int i = 0; i < 10; ++i) {
send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
@ -1054,7 +1053,8 @@ TEST_P(PacedSenderTest, AvoidBusyLoopOnSendFailure) {
uint16_t sequence_number = 1234;
const size_t kPacketSize = kFirstClusterBps / (8000 / 10);
send_bucket_->SetSendBitrateLimits(kTargetBitrateBps, kTargetBitrateBps);
send_bucket_->SetPacingRates(kTargetBitrateBps * kPaceMultiplier,
kTargetBitrateBps);
send_bucket_->SetProbingEnabled(true);
send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
sequence_number, clock_.TimeInMilliseconds(),

View File

@ -18,6 +18,8 @@ namespace webrtc {
class Pacer : public Module, public RtpPacketSender {
public:
virtual void SetEstimatedBitrate(uint32_t bitrate_bps) {}
virtual void SetPacingRates(uint32_t pacing_rate_bps,
uint32_t padding_rate_bps) {}
virtual void SetEstimatedBitrateAndCongestionWindow(
uint32_t bitrate_bps,
bool in_probe_rtt,

View File

@ -15,7 +15,6 @@
#include <vector>
#include "modules/include/module_common_types.h"
#include "modules/pacing/alr_detector.h"
#include "modules/pacing/bitrate_prober.h"
#include "modules/pacing/interval_budget.h"
#include "modules/utility/include/process_thread.h"

View File

@ -98,7 +98,6 @@ bool PacketRouter::TimeToSendPacket(uint32_t ssrc,
int64_t capture_timestamp,
bool retransmission,
const PacedPacketInfo& pacing_info) {
RTC_DCHECK_RUNS_SERIALIZED(&pacer_race_);
rtc::CritScope cs(&modules_crit_);
for (auto* rtp_module : rtp_send_modules_) {
if (!rtp_module->SendingMedia())
@ -114,7 +113,6 @@ bool PacketRouter::TimeToSendPacket(uint32_t ssrc,
size_t PacketRouter::TimeToSendPadding(size_t bytes_to_send,
const PacedPacketInfo& pacing_info) {
RTC_DCHECK_RUNS_SERIALIZED(&pacer_race_);
size_t total_bytes_sent = 0;
rtc::CritScope cs(&modules_crit_);
// Rtp modules are ordered by which stream can most benefit from padding.
@ -223,7 +221,6 @@ bool PacketRouter::SendRemb(int64_t bitrate_bps,
}
bool PacketRouter::SendTransportFeedback(rtcp::TransportFeedback* packet) {
RTC_DCHECK_RUNS_SERIALIZED(&pacer_race_);
rtc::CritScope cs(&modules_crit_);
// Prefer send modules.
for (auto* rtp_module : rtp_send_modules_) {

View File

@ -91,7 +91,6 @@ class PacketRouter : public PacedSender::PacketSender,
void UnsetActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_);
void DetermineActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_);
rtc::RaceChecker pacer_race_;
rtc::CriticalSection modules_crit_;
// Rtp and Rtcp modules of the rtp senders.
std::list<RtpRtcp*> rtp_send_modules_ RTC_GUARDED_BY(modules_crit_);

View File

@ -477,47 +477,47 @@ TEST_P(BweSimulation, Evaluation8) {
RunPauseResumeFlows(GetParam());
}
// Following test cases begin with "GccComparison" run the
// evaluation test cases for both GCC and other calling RMCAT.
// Following test cases begin with "GoogCcComparison" run the
// evaluation test cases for both GoogCc and other calling RMCAT.
TEST_P(BweSimulation, GccComparison1) {
TEST_P(BweSimulation, GoogCcComparison1) {
RunVariableCapacity1SingleFlow(GetParam());
BweTest gcc_test(false);
gcc_test.RunVariableCapacity1SingleFlow(kSendSideEstimator);
BweTest goog_cc_test(false);
goog_cc_test.RunVariableCapacity1SingleFlow(kSendSideEstimator);
}
TEST_P(BweSimulation, GccComparison2) {
TEST_P(BweSimulation, GoogCcComparison2) {
const size_t kNumFlows = 2;
RunVariableCapacity2MultipleFlows(GetParam(), kNumFlows);
BweTest gcc_test(false);
gcc_test.RunVariableCapacity2MultipleFlows(kSendSideEstimator, kNumFlows);
BweTest goog_cc_test(false);
goog_cc_test.RunVariableCapacity2MultipleFlows(kSendSideEstimator, kNumFlows);
}
TEST_P(BweSimulation, GccComparison3) {
TEST_P(BweSimulation, GoogCcComparison3) {
RunBidirectionalFlow(GetParam());
BweTest gcc_test(false);
gcc_test.RunBidirectionalFlow(kSendSideEstimator);
BweTest goog_cc_test(false);
goog_cc_test.RunBidirectionalFlow(kSendSideEstimator);
}
TEST_P(BweSimulation, GccComparison4) {
TEST_P(BweSimulation, GoogCcComparison4) {
RunSelfFairness(GetParam());
BweTest gcc_test(false);
gcc_test.RunSelfFairness(GetParam());
BweTest goog_cc_test(false);
goog_cc_test.RunSelfFairness(GetParam());
}
TEST_P(BweSimulation, GccComparison5) {
TEST_P(BweSimulation, GoogCcComparison5) {
RunRoundTripTimeFairness(GetParam());
BweTest gcc_test(false);
gcc_test.RunRoundTripTimeFairness(kSendSideEstimator);
BweTest goog_cc_test(false);
goog_cc_test.RunRoundTripTimeFairness(kSendSideEstimator);
}
TEST_P(BweSimulation, GccComparison6) {
TEST_P(BweSimulation, GoogCcComparison6) {
RunLongTcpFairness(GetParam());
BweTest gcc_test(false);
gcc_test.RunLongTcpFairness(kSendSideEstimator);
BweTest goog_cc_test(false);
goog_cc_test.RunLongTcpFairness(kSendSideEstimator);
}
TEST_P(BweSimulation, GccComparison7) {
TEST_P(BweSimulation, GoogCcComparison7) {
const int kNumTcpFiles = 10;
std::vector<int> tcp_file_sizes_bytes =
@ -528,24 +528,24 @@ TEST_P(BweSimulation, GccComparison7) {
RunMultipleShortTcpFairness(GetParam(), tcp_file_sizes_bytes,
tcp_starting_times_ms);
BweTest gcc_test(false);
gcc_test.RunMultipleShortTcpFairness(kSendSideEstimator, tcp_file_sizes_bytes,
tcp_starting_times_ms);
BweTest goog_cc_test(false);
goog_cc_test.RunMultipleShortTcpFairness(
kSendSideEstimator, tcp_file_sizes_bytes, tcp_starting_times_ms);
}
TEST_P(BweSimulation, GccComparison8) {
TEST_P(BweSimulation, GoogCcComparison8) {
RunPauseResumeFlows(GetParam());
BweTest gcc_test(false);
gcc_test.RunPauseResumeFlows(kSendSideEstimator);
BweTest goog_cc_test(false);
goog_cc_test.RunPauseResumeFlows(kSendSideEstimator);
}
TEST_P(BweSimulation, GccComparisonChoke) {
TEST_P(BweSimulation, GoogCcComparisonChoke) {
int array[] = {1000, 500, 1000};
std::vector<int> capacities_kbps(array, array + 3);
RunChoke(GetParam(), capacities_kbps);
BweTest gcc_test(false);
gcc_test.RunChoke(kSendSideEstimator, capacities_kbps);
BweTest goog_cc_test(false);
goog_cc_test.RunChoke(kSendSideEstimator, capacities_kbps);
}
} // namespace bwe

View File

@ -181,7 +181,8 @@ enum BandwidthEstimatorType {
kBbrEstimator
};
const char* const bwe_names[] = {"Null", "NADA", "REMB", "GCC", "TCP", "BBR"};
const char* const bwe_names[] = {"Null", "NADA", "REMB",
"GoogCc", "TCP", "BBR"};
int64_t GetAbsSendTimeInMs(uint32_t abs_send_time);

View File

@ -24,6 +24,9 @@
namespace webrtc {
namespace testing {
namespace bwe {
namespace {
const float kPaceMultiplier = 2.5f;
}
void PacketSender::Pause() {
running_ = false;
@ -164,7 +167,7 @@ PacedVideoSender::PacedVideoSender(PacketProcessorListener* listener,
? static_cast<Pacer*>(new BbrPacedSender(&clock_, this, nullptr))
: static_cast<Pacer*>(new PacedSender(&clock_, this, nullptr))) {
modules_.push_back(pacer_.get());
pacer_->SetEstimatedBitrate(source->bits_per_second());
pacer_->SetPacingRates(source->bits_per_second() * kPaceMultiplier, 0);
}
PacedVideoSender::~PacedVideoSender() {
@ -312,7 +315,7 @@ void PacedVideoSender::OnNetworkChanged(uint32_t target_bitrate_bps,
uint8_t fraction_lost,
int64_t rtt) {
VideoSender::OnNetworkChanged(target_bitrate_bps, fraction_lost, rtt);
pacer_->SetEstimatedBitrate(target_bitrate_bps);
pacer_->SetPacingRates(target_bitrate_bps * kPaceMultiplier, 0);
}
void PacedVideoSender::OnNetworkChanged(uint32_t bitrate_for_encoder_bps,

View File

@ -231,7 +231,6 @@ if (!build_with_chromium) {
"../modules/audio_coding:ana_debug_dump_proto",
"../modules/audio_coding:audio_network_adaptor",
"../modules/audio_coding:neteq_tools",
"../modules/congestion_controller:estimators",
"../modules/rtp_rtcp:rtp_rtcp_format",
"../rtc_base:checks",
"../rtc_base:rtc_base_approved",
@ -240,6 +239,8 @@ if (!build_with_chromium) {
# TODO(kwiberg): Remove this dependency.
"../api/audio_codecs:audio_codecs_api",
"../modules/congestion_controller",
"../modules/congestion_controller:delay_based_bwe",
"../modules/congestion_controller:estimators",
"../modules/pacing",
"../modules/rtp_rtcp",
"../system_wrappers:system_wrappers_default",

View File

@ -33,6 +33,7 @@
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "modules/congestion_controller/acknowledged_bitrate_estimator.h"
#include "modules/congestion_controller/bitrate_estimator.h"
#include "modules/congestion_controller/delay_based_bwe.h"
#include "modules/congestion_controller/include/receive_side_congestion_controller.h"
#include "modules/congestion_controller/include/send_side_congestion_controller.h"
#include "modules/include/module_common_types.h"