Use bayesian estimate of acked bitrate.

This helps a lot to avoid reducing the bitrate too quickly when there's a short period of very few packets delivered, followed by the rate resuming at the regular rate. It specifically avoids the BWE going down to super low values as a response delay spikes.

BUG=webrtc:6566
R=terelius@webrtc.org

Review URL: https://codereview.webrtc.org/2422063002 .

Cr-Commit-Position: refs/heads/master@{#14802}
This commit is contained in:
Stefan Holmer
2016-10-27 17:19:20 +02:00
parent 9890a5861f
commit 492ee28b73
7 changed files with 189 additions and 62 deletions

View File

@ -990,9 +990,10 @@ void EventLogAnalyzer::CreateBweSimulationGraph(Plot* plot) {
return std::numeric_limits<int64_t>::max();
};
RateStatistics acked_bitrate(1000, 8000);
RateStatistics acked_bitrate(250, 8000);
int64_t time_us = std::min(NextRtpTime(), NextRtcpTime());
int64_t last_update_us = 0;
while (time_us != std::numeric_limits<int64_t>::max()) {
clock.AdvanceTimeMicroseconds(time_us - clock.TimeInMicroseconds());
if (clock.TimeInMicroseconds() >= NextRtcpTime()) {
@ -1037,11 +1038,13 @@ void EventLogAnalyzer::CreateBweSimulationGraph(Plot* plot) {
RTC_DCHECK_EQ(clock.TimeInMicroseconds(), NextProcessTime());
cc.Process();
}
if (observer.GetAndResetBitrateUpdated()) {
if (observer.GetAndResetBitrateUpdated() ||
time_us - last_update_us >= 1e6) {
uint32_t y = observer.last_bitrate_bps() / 1000;
float x = static_cast<float>(clock.TimeInMicroseconds() - begin_time_) /
1000000;
time_series.points.emplace_back(x, y);
last_update_us = time_us;
}
time_us = std::min({NextRtpTime(), NextRtcpTime(), NextProcessTime()});
}