Unify the FillAudioEncoderTimeSeries with existing processing functions.
Use lambdas instead of function objects. BUG=webrtc:7323 Review-Url: https://codereview.webrtc.org/2743933004 Cr-Commit-Position: refs/heads/master@{#17208}
This commit is contained in:
@ -132,128 +132,124 @@ constexpr float kRightMargin = 0.02f;
|
||||
constexpr float kBottomMargin = 0.02f;
|
||||
constexpr float kTopMargin = 0.05f;
|
||||
|
||||
class PacketSizeBytes {
|
||||
public:
|
||||
using DataType = LoggedRtpPacket;
|
||||
using ResultType = size_t;
|
||||
size_t operator()(const LoggedRtpPacket& packet) {
|
||||
return packet.total_length;
|
||||
}
|
||||
};
|
||||
|
||||
class SequenceNumberDiff {
|
||||
public:
|
||||
using DataType = LoggedRtpPacket;
|
||||
using ResultType = int64_t;
|
||||
int64_t operator()(const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
return WrappingDifference(new_packet.header.sequenceNumber,
|
||||
old_packet.header.sequenceNumber, 1ul << 16);
|
||||
}
|
||||
};
|
||||
|
||||
class NetworkDelayDiff {
|
||||
public:
|
||||
class AbsSendTime {
|
||||
public:
|
||||
using DataType = LoggedRtpPacket;
|
||||
using ResultType = double;
|
||||
double operator()(const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
if (old_packet.header.extension.hasAbsoluteSendTime &&
|
||||
new_packet.header.extension.hasAbsoluteSendTime) {
|
||||
int64_t send_time_diff = WrappingDifference(
|
||||
new_packet.header.extension.absoluteSendTime,
|
||||
old_packet.header.extension.absoluteSendTime, 1ul << 24);
|
||||
int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
|
||||
return static_cast<double>(recv_time_diff -
|
||||
AbsSendTimeToMicroseconds(send_time_diff)) /
|
||||
1000;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class CaptureTime {
|
||||
public:
|
||||
using DataType = LoggedRtpPacket;
|
||||
using ResultType = double;
|
||||
double operator()(const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
int64_t send_time_diff = WrappingDifference(
|
||||
new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32);
|
||||
int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
|
||||
|
||||
const double kVideoSampleRate = 90000;
|
||||
// TODO(terelius): We treat all streams as video for now, even though
|
||||
// audio might be sampled at e.g. 16kHz, because it is really difficult to
|
||||
// figure out the true sampling rate of a stream. The effect is that the
|
||||
// delay will be scaled incorrectly for non-video streams.
|
||||
|
||||
double delay_change =
|
||||
static_cast<double>(recv_time_diff) / 1000 -
|
||||
static_cast<double>(send_time_diff) / kVideoSampleRate * 1000;
|
||||
if (delay_change < -10000 || 10000 < delay_change) {
|
||||
LOG(LS_WARNING) << "Very large delay change. Timestamps correct?";
|
||||
LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp
|
||||
<< ", received time " << old_packet.timestamp;
|
||||
LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp
|
||||
<< ", received time " << new_packet.timestamp;
|
||||
LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = "
|
||||
<< static_cast<double>(recv_time_diff) / 1000000 << "s";
|
||||
LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = "
|
||||
<< static_cast<double>(send_time_diff) /
|
||||
kVideoSampleRate
|
||||
<< "s";
|
||||
}
|
||||
return delay_change;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Extractor>
|
||||
class Accumulated {
|
||||
public:
|
||||
using DataType = typename Extractor::DataType;
|
||||
using ResultType = typename Extractor::ResultType;
|
||||
ResultType operator()(const DataType& old_packet,
|
||||
const DataType& new_packet) {
|
||||
sum += extract(old_packet, new_packet);
|
||||
return sum;
|
||||
}
|
||||
|
||||
private:
|
||||
Extractor extract;
|
||||
ResultType sum = 0;
|
||||
};
|
||||
|
||||
// For each element in data, use |Extractor| to extract a y-coordinate and
|
||||
// store the result in a TimeSeries.
|
||||
template <typename Extractor>
|
||||
void Pointwise(const std::vector<typename Extractor::DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
Extractor extract;
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
|
||||
float y = extract(data[i]);
|
||||
result->points.emplace_back(x, y);
|
||||
rtc::Optional<double> NetworkDelayDiff_AbsSendTime(
|
||||
const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
if (old_packet.header.extension.hasAbsoluteSendTime &&
|
||||
new_packet.header.extension.hasAbsoluteSendTime) {
|
||||
int64_t send_time_diff = WrappingDifference(
|
||||
new_packet.header.extension.absoluteSendTime,
|
||||
old_packet.header.extension.absoluteSendTime, 1ul << 24);
|
||||
int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
|
||||
double delay_change_us =
|
||||
recv_time_diff - AbsSendTimeToMicroseconds(send_time_diff);
|
||||
return rtc::Optional<double>(delay_change_us / 1000);
|
||||
} else {
|
||||
return rtc::Optional<double>();
|
||||
}
|
||||
}
|
||||
|
||||
// For each pair of adjacent elements in |data|, use |Extractor| to extract a
|
||||
rtc::Optional<double> NetworkDelayDiff_CaptureTime(
|
||||
const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
int64_t send_time_diff = WrappingDifference(
|
||||
new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32);
|
||||
int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
|
||||
|
||||
const double kVideoSampleRate = 90000;
|
||||
// TODO(terelius): We treat all streams as video for now, even though
|
||||
// audio might be sampled at e.g. 16kHz, because it is really difficult to
|
||||
// figure out the true sampling rate of a stream. The effect is that the
|
||||
// delay will be scaled incorrectly for non-video streams.
|
||||
|
||||
double delay_change =
|
||||
static_cast<double>(recv_time_diff) / 1000 -
|
||||
static_cast<double>(send_time_diff) / kVideoSampleRate * 1000;
|
||||
if (delay_change < -10000 || 10000 < delay_change) {
|
||||
LOG(LS_WARNING) << "Very large delay change. Timestamps correct?";
|
||||
LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp
|
||||
<< ", received time " << old_packet.timestamp;
|
||||
LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp
|
||||
<< ", received time " << new_packet.timestamp;
|
||||
LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = "
|
||||
<< static_cast<double>(recv_time_diff) / 1000000 << "s";
|
||||
LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = "
|
||||
<< static_cast<double>(send_time_diff) / kVideoSampleRate
|
||||
<< "s";
|
||||
}
|
||||
return rtc::Optional<double>(delay_change);
|
||||
}
|
||||
|
||||
// For each element in data, use |get_y()| to extract a y-coordinate and
|
||||
// store the result in a TimeSeries.
|
||||
template <typename DataType>
|
||||
void ProcessPoints(
|
||||
rtc::FunctionView<rtc::Optional<float>(const DataType&)> get_y,
|
||||
const std::vector<DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
|
||||
rtc::Optional<float> y = get_y(data[i]);
|
||||
if (y)
|
||||
result->points.emplace_back(x, *y);
|
||||
}
|
||||
}
|
||||
|
||||
// For each pair of adjacent elements in |data|, use |get_y| to extract a
|
||||
// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
|
||||
// will be the time of the second element in the pair.
|
||||
template <typename Extractor>
|
||||
void Pairwise(const std::vector<typename Extractor::DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
Extractor extract;
|
||||
template <typename DataType, typename ResultType>
|
||||
void ProcessPairs(
|
||||
rtc::FunctionView<rtc::Optional<ResultType>(const DataType&,
|
||||
const DataType&)> get_y,
|
||||
const std::vector<DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
for (size_t i = 1; i < data.size(); i++) {
|
||||
float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
|
||||
float y = extract(data[i - 1], data[i]);
|
||||
result->points.emplace_back(x, y);
|
||||
rtc::Optional<ResultType> y = get_y(data[i - 1], data[i]);
|
||||
if (y)
|
||||
result->points.emplace_back(x, static_cast<float>(*y));
|
||||
}
|
||||
}
|
||||
|
||||
// For each element in data, use |extract()| to extract a y-coordinate and
|
||||
// store the result in a TimeSeries.
|
||||
template <typename DataType, typename ResultType>
|
||||
void AccumulatePoints(
|
||||
rtc::FunctionView<rtc::Optional<ResultType>(const DataType&)> extract,
|
||||
const std::vector<DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
ResultType sum = 0;
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
|
||||
rtc::Optional<ResultType> y = extract(data[i]);
|
||||
if (y) {
|
||||
sum += *y;
|
||||
result->points.emplace_back(x, static_cast<float>(sum));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For each pair of adjacent elements in |data|, use |extract()| to extract a
|
||||
// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
|
||||
// will be the time of the second element in the pair.
|
||||
template <typename DataType, typename ResultType>
|
||||
void AccumulatePairs(
|
||||
rtc::FunctionView<rtc::Optional<ResultType>(const DataType&,
|
||||
const DataType&)> extract,
|
||||
const std::vector<DataType>& data,
|
||||
uint64_t begin_time,
|
||||
TimeSeries* result) {
|
||||
ResultType sum = 0;
|
||||
for (size_t i = 1; i < data.size(); i++) {
|
||||
float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
|
||||
rtc::Optional<ResultType> y = extract(data[i - 1], data[i]);
|
||||
if (y)
|
||||
sum += *y;
|
||||
result->points.emplace_back(x, static_cast<float>(sum));
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,33 +257,37 @@ void Pairwise(const std::vector<typename Extractor::DataType>& data,
|
||||
// A data point is generated every |step| microseconds from |begin_time|
|
||||
// to |end_time|. The value of each data point is the average of the data
|
||||
// during the preceeding |window_duration_us| microseconds.
|
||||
template <typename Extractor>
|
||||
void MovingAverage(const std::vector<typename Extractor::DataType>& data,
|
||||
uint64_t begin_time,
|
||||
uint64_t end_time,
|
||||
uint64_t window_duration_us,
|
||||
uint64_t step,
|
||||
float y_scaling,
|
||||
webrtc::plotting::TimeSeries* result) {
|
||||
template <typename DataType, typename ResultType>
|
||||
void MovingAverage(
|
||||
rtc::FunctionView<rtc::Optional<ResultType>(const DataType&)> extract,
|
||||
const std::vector<DataType>& data,
|
||||
uint64_t begin_time,
|
||||
uint64_t end_time,
|
||||
uint64_t window_duration_us,
|
||||
uint64_t step,
|
||||
webrtc::plotting::TimeSeries* result) {
|
||||
size_t window_index_begin = 0;
|
||||
size_t window_index_end = 0;
|
||||
typename Extractor::ResultType sum_in_window = 0;
|
||||
Extractor extract;
|
||||
ResultType sum_in_window = 0;
|
||||
|
||||
for (uint64_t t = begin_time; t < end_time + step; t += step) {
|
||||
while (window_index_end < data.size() &&
|
||||
data[window_index_end].timestamp < t) {
|
||||
sum_in_window += extract(data[window_index_end]);
|
||||
rtc::Optional<ResultType> value = extract(data[window_index_end]);
|
||||
if (value)
|
||||
sum_in_window += *value;
|
||||
++window_index_end;
|
||||
}
|
||||
while (window_index_begin < data.size() &&
|
||||
data[window_index_begin].timestamp < t - window_duration_us) {
|
||||
sum_in_window -= extract(data[window_index_begin]);
|
||||
rtc::Optional<ResultType> value = extract(data[window_index_begin]);
|
||||
if (value)
|
||||
sum_in_window -= *value;
|
||||
++window_index_begin;
|
||||
}
|
||||
float window_duration_s = static_cast<float>(window_duration_us) / 1000000;
|
||||
float x = static_cast<float>(t - begin_time) / 1000000;
|
||||
float y = sum_in_window / window_duration_s * y_scaling;
|
||||
float y = sum_in_window / window_duration_s;
|
||||
result->points.emplace_back(x, y);
|
||||
}
|
||||
}
|
||||
@ -561,21 +561,6 @@ std::string EventLogAnalyzer::GetStreamName(StreamId stream_id) const {
|
||||
return name.str();
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::FillAudioEncoderTimeSeries(
|
||||
Plot* plot,
|
||||
rtc::FunctionView<rtc::Optional<float>(
|
||||
const AudioNetworkAdaptationEvent& ana_event)> get_y) const {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
for (auto& ana_event : audio_network_adaptation_events_) {
|
||||
rtc::Optional<float> y = get_y(ana_event);
|
||||
if (y) {
|
||||
float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
|
||||
plot->series_list_.back().points.emplace_back(x, *y);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction,
|
||||
Plot* plot) {
|
||||
for (auto& kv : rtp_packets_) {
|
||||
@ -590,7 +575,11 @@ void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction,
|
||||
TimeSeries time_series;
|
||||
time_series.label = GetStreamName(stream_id);
|
||||
time_series.style = BAR_GRAPH;
|
||||
Pointwise<PacketSizeBytes>(packet_stream, begin_time_, &time_series);
|
||||
ProcessPoints<LoggedRtpPacket>(
|
||||
[](const LoggedRtpPacket& packet) -> rtc::Optional<float> {
|
||||
return rtc::Optional<float>(packet.total_length);
|
||||
},
|
||||
packet_stream, begin_time_, &time_series);
|
||||
plot->series_list_.push_back(std::move(time_series));
|
||||
}
|
||||
|
||||
@ -736,7 +725,15 @@ void EventLogAnalyzer::CreateSequenceNumberGraph(Plot* plot) {
|
||||
TimeSeries time_series;
|
||||
time_series.label = GetStreamName(stream_id);
|
||||
time_series.style = BAR_GRAPH;
|
||||
Pairwise<SequenceNumberDiff>(packet_stream, begin_time_, &time_series);
|
||||
ProcessPairs<LoggedRtpPacket, float>(
|
||||
[](const LoggedRtpPacket& old_packet,
|
||||
const LoggedRtpPacket& new_packet) {
|
||||
int64_t diff =
|
||||
WrappingDifference(new_packet.header.sequenceNumber,
|
||||
old_packet.header.sequenceNumber, 1ul << 16);
|
||||
return rtc::Optional<float>(diff);
|
||||
},
|
||||
packet_stream, begin_time_, &time_series);
|
||||
plot->series_list_.push_back(std::move(time_series));
|
||||
}
|
||||
|
||||
@ -820,15 +817,17 @@ void EventLogAnalyzer::CreateDelayChangeGraph(Plot* plot) {
|
||||
TimeSeries capture_time_data;
|
||||
capture_time_data.label = GetStreamName(stream_id) + " capture-time";
|
||||
capture_time_data.style = BAR_GRAPH;
|
||||
Pairwise<NetworkDelayDiff::CaptureTime>(packet_stream, begin_time_,
|
||||
&capture_time_data);
|
||||
ProcessPairs<LoggedRtpPacket, double>(NetworkDelayDiff_CaptureTime,
|
||||
packet_stream, begin_time_,
|
||||
&capture_time_data);
|
||||
plot->series_list_.push_back(std::move(capture_time_data));
|
||||
|
||||
TimeSeries send_time_data;
|
||||
send_time_data.label = GetStreamName(stream_id) + " abs-send-time";
|
||||
send_time_data.style = BAR_GRAPH;
|
||||
Pairwise<NetworkDelayDiff::AbsSendTime>(packet_stream, begin_time_,
|
||||
&send_time_data);
|
||||
ProcessPairs<LoggedRtpPacket, double>(NetworkDelayDiff_AbsSendTime,
|
||||
packet_stream, begin_time_,
|
||||
&send_time_data);
|
||||
plot->series_list_.push_back(std::move(send_time_data));
|
||||
}
|
||||
|
||||
@ -853,15 +852,17 @@ void EventLogAnalyzer::CreateAccumulatedDelayChangeGraph(Plot* plot) {
|
||||
TimeSeries capture_time_data;
|
||||
capture_time_data.label = GetStreamName(stream_id) + " capture-time";
|
||||
capture_time_data.style = LINE_GRAPH;
|
||||
Pairwise<Accumulated<NetworkDelayDiff::CaptureTime>>(
|
||||
packet_stream, begin_time_, &capture_time_data);
|
||||
AccumulatePairs<LoggedRtpPacket, double>(NetworkDelayDiff_CaptureTime,
|
||||
packet_stream, begin_time_,
|
||||
&capture_time_data);
|
||||
plot->series_list_.push_back(std::move(capture_time_data));
|
||||
|
||||
TimeSeries send_time_data;
|
||||
send_time_data.label = GetStreamName(stream_id) + " abs-send-time";
|
||||
send_time_data.style = LINE_GRAPH;
|
||||
Pairwise<Accumulated<NetworkDelayDiff::AbsSendTime>>(
|
||||
packet_stream, begin_time_, &send_time_data);
|
||||
AccumulatePairs<LoggedRtpPacket, double>(NetworkDelayDiff_AbsSendTime,
|
||||
packet_stream, begin_time_,
|
||||
&send_time_data);
|
||||
plot->series_list_.push_back(std::move(send_time_data));
|
||||
}
|
||||
|
||||
@ -985,10 +986,12 @@ void EventLogAnalyzer::CreateStreamBitrateGraph(
|
||||
TimeSeries time_series;
|
||||
time_series.label = GetStreamName(stream_id);
|
||||
time_series.style = LINE_GRAPH;
|
||||
double bytes_to_kilobits = 8.0 / 1000;
|
||||
MovingAverage<PacketSizeBytes>(packet_stream, begin_time_, end_time_,
|
||||
window_duration_, step_, bytes_to_kilobits,
|
||||
&time_series);
|
||||
MovingAverage<LoggedRtpPacket, double>(
|
||||
[](const LoggedRtpPacket& packet) {
|
||||
return rtc::Optional<double>(packet.total_length * 8.0 / 1000.0);
|
||||
},
|
||||
packet_stream, begin_time_, end_time_, window_duration_, step_,
|
||||
&time_series);
|
||||
plot->series_list_.push_back(std::move(time_series));
|
||||
}
|
||||
|
||||
@ -1289,28 +1292,36 @@ void EventLogAnalyzer::CreateTimestampGraph(Plot* plot) {
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder target bitrate";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) -> rtc::Optional<float> {
|
||||
if (ana_event.config.bitrate_bps)
|
||||
return rtc::Optional<float>(
|
||||
static_cast<float>(*ana_event.config.bitrate_bps));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder target bitrate";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
|
||||
plot->SetTitle("Reported audio encoder target bitrate");
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder frame length";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
if (ana_event.config.frame_length_ms)
|
||||
return rtc::Optional<float>(
|
||||
static_cast<float>(*ana_event.config.frame_length_ms));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder frame length";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
|
||||
plot->SetTitle("Reported audio encoder frame length");
|
||||
@ -1318,14 +1329,18 @@ void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
|
||||
Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
if (ana_event.config.uplink_packet_loss_fraction)
|
||||
return rtc::Optional<float>(static_cast<float>(
|
||||
*ana_event.config.uplink_packet_loss_fraction));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
|
||||
kTopMargin);
|
||||
@ -1333,42 +1348,54 @@ void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder FEC";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
if (ana_event.config.enable_fec)
|
||||
return rtc::Optional<float>(
|
||||
static_cast<float>(*ana_event.config.enable_fec));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder FEC";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
|
||||
plot->SetTitle("Reported audio encoder FEC");
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder DTX";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
if (ana_event.config.enable_dtx)
|
||||
return rtc::Optional<float>(
|
||||
static_cast<float>(*ana_event.config.enable_dtx));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder DTX";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
|
||||
plot->SetTitle("Reported audio encoder DTX");
|
||||
}
|
||||
|
||||
void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
|
||||
FillAudioEncoderTimeSeries(
|
||||
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
plot->series_list_.push_back(TimeSeries());
|
||||
plot->series_list_.back().style = LINE_DOT_GRAPH;
|
||||
plot->series_list_.back().label = "Audio encoder number of channels";
|
||||
ProcessPoints<AudioNetworkAdaptationEvent>(
|
||||
[](const AudioNetworkAdaptationEvent& ana_event) {
|
||||
if (ana_event.config.num_channels)
|
||||
return rtc::Optional<float>(
|
||||
static_cast<float>(*ana_event.config.num_channels));
|
||||
return rtc::Optional<float>();
|
||||
});
|
||||
plot->series_list_.back().label = "Audio encoder number of channels";
|
||||
},
|
||||
audio_network_adaptation_events_, begin_time_,
|
||||
&plot->series_list_.back());
|
||||
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
|
||||
plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
|
||||
kBottomMargin, kTopMargin);
|
||||
|
||||
@ -140,11 +140,6 @@ class EventLogAnalyzer {
|
||||
|
||||
std::string GetStreamName(StreamId) const;
|
||||
|
||||
void FillAudioEncoderTimeSeries(
|
||||
Plot* plot,
|
||||
rtc::FunctionView<rtc::Optional<float>(
|
||||
const AudioNetworkAdaptationEvent& ana_event)> get_y) const;
|
||||
|
||||
const ParsedRtcEventLog& parsed_log_;
|
||||
|
||||
// A list of SSRCs we are interested in analysing.
|
||||
|
||||
Reference in New Issue
Block a user