Replace DataSize and DataRate factories with newer versions

This is search and replace change:
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataSize::Bytes<\(.*\)>()/DataSize::Bytes(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataSize::bytes/DataSize::Bytes/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::BitsPerSec<\(.*\)>()/DataRate::BitsPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::BytesPerSec<\(.*\)>()/DataRate::BytesPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::KilobitsPerSec<\(.*\)>()/DataRate::KilobitsPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::bps/DataRate::BitsPerSec/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::kbps/DataRate::KilobitsPerSec/g"
git cl format

Bug: webrtc:9709
Change-Id: I65aaca69474ba038c1fe2dd8dc30d3f8e7b94c29
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/168647
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30545}
This commit is contained in:
Danil Chapovalov
2020-02-17 18:46:07 +01:00
committed by Commit Bot
parent 701bd172d8
commit cad3e0e2fa
102 changed files with 947 additions and 798 deletions

View File

@ -73,7 +73,7 @@ AimdRateControl::AimdRateControl(const WebRtcKeyValueConfig* key_value_config)
AimdRateControl::AimdRateControl(const WebRtcKeyValueConfig* key_value_config,
bool send_side)
: min_configured_bitrate_(congestion_controller::GetMinBitrate()),
max_configured_bitrate_(DataRate::kbps(30000)),
max_configured_bitrate_(DataRate::KilobitsPerSec(30000)),
current_bitrate_(max_configured_bitrate_),
latest_estimated_throughput_(current_bitrate_),
link_capacity_(),
@ -137,7 +137,7 @@ bool AimdRateControl::ValidEstimate() const {
TimeDelta AimdRateControl::GetFeedbackInterval() const {
// Estimate how often we can send RTCP if we allocate up to 5% of bandwidth
// to feedback.
const DataSize kRtcpSize = DataSize::bytes(80);
const DataSize kRtcpSize = DataSize::Bytes(80);
const DataRate rtcp_bitrate = current_bitrate_ * 0.05;
const TimeDelta interval = kRtcpSize / rtcp_bitrate;
const TimeDelta kMinFeedbackInterval = TimeDelta::Millis(200);
@ -165,7 +165,7 @@ bool AimdRateControl::InitialTimeToReduceFurther(Timestamp at_time) const {
if (!initial_backoff_interval_) {
return ValidEstimate() &&
TimeToReduceFurther(at_time,
LatestEstimate() / 2 - DataRate::bps(1));
LatestEstimate() / 2 - DataRate::BitsPerSec(1));
}
// TODO(terelius): We could use the RTT (clamped to suitable limits) instead
// of a fixed bitrate_reduction_interval.
@ -232,7 +232,7 @@ double AimdRateControl::GetNearMaxIncreaseRateBpsPerSecond() const {
RTC_DCHECK(!current_bitrate_.IsZero());
const TimeDelta kFrameInterval = TimeDelta::Seconds(1) / 30;
DataSize frame_size = current_bitrate_ * kFrameInterval;
const DataSize kPacketSize = DataSize::bytes(1200);
const DataSize kPacketSize = DataSize::Bytes(1200);
double packets_per_frame = std::ceil(frame_size / kPacketSize);
DataSize avg_packet_size = frame_size / packets_per_frame;
@ -380,7 +380,7 @@ DataRate AimdRateControl::ClampBitrate(DataRate new_bitrate,
// We allow a bit more lag at very low rates to not too easily get stuck if
// the encoder produces uneven outputs.
const DataRate max_bitrate =
1.5 * estimated_throughput + DataRate::kbps(10);
1.5 * estimated_throughput + DataRate::KilobitsPerSec(10);
if (new_bitrate > current_bitrate_ && new_bitrate > max_bitrate) {
new_bitrate = std::max(current_bitrate_, max_bitrate);
}
@ -404,7 +404,7 @@ DataRate AimdRateControl::MultiplicativeRateIncrease(
alpha = pow(alpha, std::min(time_since_last_update.seconds<double>(), 1.0));
}
DataRate multiplicative_increase =
std::max(current_bitrate * (alpha - 1.0), DataRate::bps(1000));
std::max(current_bitrate * (alpha - 1.0), DataRate::BitsPerSec(1000));
return multiplicative_increase;
}
@ -413,7 +413,7 @@ DataRate AimdRateControl::AdditiveRateIncrease(Timestamp at_time,
double time_period_seconds = (at_time - last_time).seconds<double>();
double data_rate_increase_bps =
GetNearMaxIncreaseRateBpsPerSecond() * time_period_seconds;
return DataRate::bps(data_rate_increase_bps);
return DataRate::BitsPerSec(data_rate_increase_bps);
}
void AimdRateControl::ChangeState(const RateControlInput& input,

View File

@ -47,7 +47,7 @@ AimdRateControlStates CreateAimdRateControlStates(bool send_side = false) {
absl::optional<DataRate> OptionalRateFromOptionalBps(
absl::optional<int> bitrate_bps) {
if (bitrate_bps) {
return DataRate::bps(*bitrate_bps);
return DataRate::BitsPerSec(*bitrate_bps);
} else {
return absl::nullopt;
}
@ -61,7 +61,7 @@ void UpdateRateControl(const AimdRateControlStates& states,
states.aimd_rate_control->Update(&input, Timestamp::Millis(now_ms));
}
void SetEstimate(const AimdRateControlStates& states, int bitrate_bps) {
states.aimd_rate_control->SetEstimate(DataRate::bps(bitrate_bps),
states.aimd_rate_control->SetEstimate(DataRate::BitsPerSec(bitrate_bps),
states.simulated_clock->CurrentTime());
}
@ -161,7 +161,7 @@ TEST(AimdRateControlTest, BweNotLimitedByDecreasingAckedBitrate) {
TEST(AimdRateControlTest, DefaultPeriodUntilFirstOveruse) {
// Smoothing experiment disabled
auto states = CreateAimdRateControlStates();
states.aimd_rate_control->SetStartBitrate(DataRate::kbps(300));
states.aimd_rate_control->SetStartBitrate(DataRate::KilobitsPerSec(300));
EXPECT_EQ(kDefaultPeriodMsNoSmoothingExp,
states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
states.simulated_clock->AdvanceTimeMilliseconds(100);
@ -175,7 +175,7 @@ TEST(AimdRateControlTest, MinPeriodUntilFirstOveruseSmoothingExp) {
// Smoothing experiment enabled
test::ScopedFieldTrials override_field_trials(kSmoothingExpFieldTrial);
auto states = CreateAimdRateControlStates();
states.aimd_rate_control->SetStartBitrate(DataRate::kbps(300));
states.aimd_rate_control->SetStartBitrate(DataRate::KilobitsPerSec(300));
EXPECT_EQ(kMinBwePeriodMsSmoothingExp,
states.aimd_rate_control->GetExpectedBandwidthPeriod().ms());
states.simulated_clock->AdvanceTimeMilliseconds(100);

View File

@ -23,7 +23,7 @@ int GetMinBitrateBps() {
}
DataRate GetMinBitrate() {
return DataRate::bps(GetMinBitrateBps());
return DataRate::BitsPerSec(GetMinBitrateBps());
}
} // namespace congestion_controller

View File

@ -27,7 +27,7 @@ namespace {
absl::optional<DataRate> OptionalRateFromOptionalBps(
absl::optional<int> bitrate_bps) {
if (bitrate_bps) {
return DataRate::bps(*bitrate_bps);
return DataRate::BitsPerSec(*bitrate_bps);
} else {
return absl::nullopt;
}
@ -201,7 +201,7 @@ RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) {
<< " bps. Mean send delta: " << best_it->send_mean_ms
<< " ms, mean recv delta: " << best_it->recv_mean_ms
<< " ms, num probes: " << best_it->count;
remote_rate_.SetEstimate(DataRate::bps(probe_bitrate_bps),
remote_rate_.SetEstimate(DataRate::BitsPerSec(probe_bitrate_bps),
Timestamp::Millis(now_ms));
return ProbeResult::kBitrateUpdated;
}
@ -335,9 +335,9 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo(
} else if (detector_.State() == BandwidthUsage::kBwOverusing) {
absl::optional<uint32_t> incoming_rate =
incoming_bitrate_.Rate(arrival_time_ms);
if (incoming_rate &&
remote_rate_.TimeToReduceFurther(Timestamp::Millis(now_ms),
DataRate::bps(*incoming_rate))) {
if (incoming_rate && remote_rate_.TimeToReduceFurther(
Timestamp::Millis(now_ms),
DataRate::BitsPerSec(*incoming_rate))) {
update_estimate = true;
}
}
@ -426,6 +426,6 @@ void RemoteBitrateEstimatorAbsSendTime::SetMinBitrate(int min_bitrate_bps) {
// Called from both the configuration thread and the network thread. Shouldn't
// be called from the network thread in the future.
rtc::CritScope lock(&crit_);
remote_rate_.SetMinBitrate(DataRate::bps(min_bitrate_bps));
remote_rate_.SetMinBitrate(DataRate::BitsPerSec(min_bitrate_bps));
}
} // namespace webrtc

View File

@ -31,7 +31,7 @@ namespace {
absl::optional<DataRate> OptionalRateFromOptionalBps(
absl::optional<int> bitrate_bps) {
if (bitrate_bps) {
return DataRate::bps(*bitrate_bps);
return DataRate::BitsPerSec(*bitrate_bps);
} else {
return absl::nullopt;
}
@ -143,10 +143,11 @@ void RemoteBitrateEstimatorSingleStream::IncomingPacket(
if (estimator->detector.State() == BandwidthUsage::kBwOverusing) {
absl::optional<uint32_t> incoming_bitrate_bps =
incoming_bitrate_.Rate(now_ms);
if (incoming_bitrate_bps && (prior_state != BandwidthUsage::kBwOverusing ||
GetRemoteRate()->TimeToReduceFurther(
Timestamp::Millis(now_ms),
DataRate::bps(*incoming_bitrate_bps)))) {
if (incoming_bitrate_bps &&
(prior_state != BandwidthUsage::kBwOverusing ||
GetRemoteRate()->TimeToReduceFurther(
Timestamp::Millis(now_ms),
DataRate::BitsPerSec(*incoming_bitrate_bps)))) {
// The first overuse should immediately trigger a new estimate.
// We also have to update the estimate immediately if we are overusing
// and the target bitrate is too high compared to what we are receiving.
@ -264,7 +265,7 @@ AimdRateControl* RemoteBitrateEstimatorSingleStream::GetRemoteRate() {
void RemoteBitrateEstimatorSingleStream::SetMinBitrate(int min_bitrate_bps) {
rtc::CritScope cs(&crit_sect_);
remote_rate_->SetMinBitrate(DataRate::bps(min_bitrate_bps));
remote_rate_->SetMinBitrate(DataRate::BitsPerSec(min_bitrate_bps));
}
} // namespace webrtc

View File

@ -122,7 +122,7 @@ void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms,
packet_result.sent_packet.send_time = abs_send_timestamp_;
// TODO(webrtc:10742): Take IP header and transport overhead into account.
packet_result.sent_packet.size =
DataSize::bytes(header.headerLength + payload_size);
DataSize::Bytes(header.headerLength + payload_size);
packet_result.sent_packet.sequence_number = seq;
network_state_estimator_->OnReceivedPacket(packet_result);
}