Add NetEq delay plotting to event_log_visualizer

This CL adds the capability to analyze and plot how NetEq behaves in
response to a network trace.

BUG=webrtc:7467

Review-Url: https://codereview.webrtc.org/2876423002
Cr-Commit-Position: refs/heads/master@{#18590}
This commit is contained in:
henrik.lundin
2017-06-14 06:09:58 -07:00
committed by Commit Bot
parent 3c81a1afd8
commit 3c938fc5ea
8 changed files with 531 additions and 0 deletions

View File

@ -1160,6 +1160,8 @@ rtc_source_set("neteq_tools") {
"neteq/tools/fake_decode_from_file.h",
"neteq/tools/input_audio_file.cc",
"neteq/tools/input_audio_file.h",
"neteq/tools/neteq_delay_analyzer.cc",
"neteq/tools/neteq_delay_analyzer.h",
"neteq/tools/neteq_replacement_input.cc",
"neteq/tools/neteq_replacement_input.h",
"neteq/tools/resample_input_audio_file.cc",

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include <algorithm>
#include <limits>
#include <utility>
namespace webrtc {
namespace test {
namespace {
// Helper function for NetEqDelayAnalyzer::CreateGraphs. Returns the
// interpolated value of a function at the point x. Vector x_vec contains the
// sample points, and y_vec contains the function values at these points. The
// return value is a linear interpolation between y_vec values.
double LinearInterpolate(double x,
const std::vector<int64_t>& x_vec,
const std::vector<int64_t>& y_vec) {
// Find first element which is larger than x.
auto it = std::upper_bound(x_vec.begin(), x_vec.end(), x);
if (it == x_vec.end()) {
--it;
}
const size_t upper_ix = it - x_vec.begin();
size_t lower_ix;
if (upper_ix == 0 || x_vec[upper_ix] <= x) {
lower_ix = upper_ix;
} else {
lower_ix = upper_ix - 1;
}
double y;
if (lower_ix == upper_ix) {
y = y_vec[lower_ix];
} else {
RTC_DCHECK_NE(x_vec[lower_ix], x_vec[upper_ix]);
y = (x - x_vec[lower_ix]) * (y_vec[upper_ix] - y_vec[lower_ix]) /
(x_vec[upper_ix] - x_vec[lower_ix]) +
y_vec[lower_ix];
}
return y;
}
} // namespace
void NetEqDelayAnalyzer::AfterInsertPacket(
const test::NetEqInput::PacketData& packet,
NetEq* neteq) {
data_.insert(
std::make_pair(packet.header.timestamp, TimingData(packet.time_ms)));
}
void NetEqDelayAnalyzer::BeforeGetAudio(NetEq* neteq) {
last_sync_buffer_ms_ = neteq->SyncBufferSizeMs();
}
void NetEqDelayAnalyzer::AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool /*muted*/,
NetEq* neteq) {
get_audio_time_ms_.push_back(time_now_ms);
// Check what timestamps were decoded in the last GetAudio call.
std::vector<uint32_t> dec_ts = neteq->LastDecodedTimestamps();
// Find those timestamps in data_, insert their decoding time and sync
// delay.
for (uint32_t ts : dec_ts) {
auto it = data_.find(ts);
if (it == data_.end()) {
// This is a packet that was split out from another packet. Skip it.
continue;
}
auto& it_timing = it->second;
RTC_CHECK(!it_timing.decode_get_audio_count)
<< "Decode time already written";
it_timing.decode_get_audio_count = rtc::Optional<int64_t>(get_audio_count_);
RTC_CHECK(!it_timing.sync_delay_ms) << "Decode time already written";
it_timing.sync_delay_ms = rtc::Optional<int64_t>(last_sync_buffer_ms_);
it_timing.target_delay_ms = rtc::Optional<int>(neteq->TargetDelayMs());
it_timing.current_delay_ms =
rtc::Optional<int>(neteq->FilteredCurrentDelayMs());
}
last_sample_rate_hz_ = audio_frame.sample_rate_hz_;
++get_audio_count_;
}
void NetEqDelayAnalyzer::CreateGraphs(
std::vector<float>* send_time_s,
std::vector<float>* arrival_delay_ms,
std::vector<float>* corrected_arrival_delay_ms,
std::vector<rtc::Optional<float>>* playout_delay_ms,
std::vector<rtc::Optional<float>>* target_delay_ms) const {
if (get_audio_time_ms_.empty()) {
return;
}
// Create nominal_get_audio_time_ms, a vector starting at
// get_audio_time_ms_[0] and increasing by 10 for each element.
std::vector<int64_t> nominal_get_audio_time_ms(get_audio_time_ms_.size());
nominal_get_audio_time_ms[0] = get_audio_time_ms_[0];
std::transform(
nominal_get_audio_time_ms.begin(), nominal_get_audio_time_ms.end() - 1,
nominal_get_audio_time_ms.begin() + 1, [](int64_t& x) { return x + 10; });
RTC_DCHECK(
std::is_sorted(get_audio_time_ms_.begin(), get_audio_time_ms_.end()));
std::vector<double> rtp_timestamps_ms;
double offset = std::numeric_limits<double>::max();
TimestampUnwrapper unwrapper;
// This loop traverses data_ and populates rtp_timestamps_ms as well as
// calculates the base offset.
for (auto& d : data_) {
rtp_timestamps_ms.push_back(unwrapper.Unwrap(d.first) /
(last_sample_rate_hz_ / 1000.f));
offset =
std::min(offset, d.second.arrival_time_ms - rtp_timestamps_ms.back());
}
// Calculate send times in seconds for each packet. This is the (unwrapped)
// RTP timestamp in ms divided by 1000.
send_time_s->resize(rtp_timestamps_ms.size());
std::transform(rtp_timestamps_ms.begin(), rtp_timestamps_ms.end(),
send_time_s->begin(), [rtp_timestamps_ms](double x) {
return (x - rtp_timestamps_ms[0]) / 1000.f;
});
RTC_DCHECK_EQ(send_time_s->size(), rtp_timestamps_ms.size());
// This loop traverses the data again and populates the graph vectors. The
// reason to have two loops and traverse twice is that the offset cannot be
// known until the first traversal is done. Meanwhile, the final offset must
// be known already at the start of this second loop.
auto data_it = data_.cbegin();
for (size_t i = 0; i < send_time_s->size(); ++i, ++data_it) {
RTC_DCHECK(data_it != data_.end());
const double offset_send_time_ms = rtp_timestamps_ms[i] + offset;
const auto& timing = data_it->second;
corrected_arrival_delay_ms->push_back(
LinearInterpolate(timing.arrival_time_ms, get_audio_time_ms_,
nominal_get_audio_time_ms) -
offset_send_time_ms);
arrival_delay_ms->push_back(timing.arrival_time_ms - offset_send_time_ms);
if (timing.decode_get_audio_count) {
// This packet was decoded.
RTC_DCHECK(timing.sync_delay_ms);
const float playout_ms = *timing.decode_get_audio_count * 10 +
get_audio_time_ms_[0] + *timing.sync_delay_ms -
offset_send_time_ms;
playout_delay_ms->push_back(rtc::Optional<float>(playout_ms));
RTC_DCHECK(timing.target_delay_ms);
RTC_DCHECK(timing.current_delay_ms);
const float target =
playout_ms - *timing.current_delay_ms + *timing.target_delay_ms;
target_delay_ms->push_back(rtc::Optional<float>(target));
} else {
// This packet was never decoded. Mark target and playout delays as empty.
playout_delay_ms->push_back(rtc::Optional<float>());
target_delay_ms->push_back(rtc::Optional<float>());
}
}
RTC_DCHECK(data_it == data_.end());
RTC_DCHECK_EQ(send_time_s->size(), corrected_arrival_delay_ms->size());
RTC_DCHECK_EQ(send_time_s->size(), playout_delay_ms->size());
RTC_DCHECK_EQ(send_time_s->size(), target_delay_ms->size());
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#include <map>
#include <vector>
#include "webrtc/base/optional.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket,
public test::NetEqGetAudioCallback {
public:
void AfterInsertPacket(const test::NetEqInput::PacketData& packet,
NetEq* neteq) override;
void BeforeGetAudio(NetEq* neteq) override;
void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) override;
void CreateGraphs(std::vector<float>* send_times_s,
std::vector<float>* arrival_delay_ms,
std::vector<float>* corrected_arrival_delay_ms,
std::vector<rtc::Optional<float>>* playout_delay_ms,
std::vector<rtc::Optional<float>>* target_delay_ms) const;
private:
struct TimingData {
explicit TimingData(double at) : arrival_time_ms(at) {}
double arrival_time_ms;
rtc::Optional<int64_t> decode_get_audio_count;
rtc::Optional<int64_t> sync_delay_ms;
rtc::Optional<int> target_delay_ms;
rtc::Optional<int> current_delay_ms;
};
std::map<uint32_t, TimingData> data_;
std::vector<int64_t> get_audio_time_ms_;
size_t get_audio_count_ = 0;
size_t last_sync_buffer_ms_ = 0;
int last_sample_rate_hz_ = 0;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_

View File

@ -212,6 +212,7 @@ if (rtc_enable_protobuf) {
"../logging:rtc_event_log_parser",
"../modules:module_api",
"../modules/audio_coding:ana_debug_dump_proto",
"../modules/audio_coding:neteq_tools",
# TODO(kwiberg): Remove this dependency.
"../api/audio_codecs:audio_codecs_api",
@ -246,6 +247,7 @@ if (rtc_include_tests) {
":event_log_visualizer_utils",
"../base:rtc_base_approved",
"../test:field_trial",
"../test:test_support",
]
}
}

View File

@ -5,6 +5,7 @@ include_rules = [
"+webrtc/logging/rtc_event_log",
"+webrtc/modules/audio_device",
"+webrtc/modules/audio_coding/audio_network_adaptor",
"+webrtc/modules/audio_coding/neteq/tools",
"+webrtc/modules/audio_processing",
"+webrtc/modules/bitrate_controller",
"+webrtc/modules/congestion_controller",

View File

@ -18,6 +18,7 @@
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/ptr_util.h"
#include "webrtc/base/rate_statistics.h"
@ -25,6 +26,12 @@
#include "webrtc/call/audio_send_stream.h"
#include "webrtc/call/call.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h"
#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "webrtc/modules/congestion_controller/include/congestion_controller.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
@ -302,6 +309,8 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log)
// this can be removed. Tracking bug: webrtc:6399
RtpHeaderExtensionMap default_extension_map = GetDefaultHeaderExtensionMap();
rtc::Optional<uint64_t> last_log_start;
for (size_t i = 0; i < parsed_log_.GetNumberOfEvents(); i++) {
ParsedRtcEventLog::EventType event_type = parsed_log_.GetEventType(i);
if (event_type != ParsedRtcEventLog::VIDEO_RECEIVER_CONFIG_EVENT &&
@ -437,12 +446,26 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log)
break;
}
case ParsedRtcEventLog::LOG_START: {
if (last_log_start) {
// A LOG_END event was missing. Use last_timestamp.
RTC_DCHECK_GE(last_timestamp, *last_log_start);
log_segments_.push_back(
std::make_pair(*last_log_start, last_timestamp));
}
last_log_start = rtc::Optional<uint64_t>(parsed_log_.GetTimestamp(i));
break;
}
case ParsedRtcEventLog::LOG_END: {
RTC_DCHECK(last_log_start);
log_segments_.push_back(
std::make_pair(*last_log_start, parsed_log_.GetTimestamp(i)));
last_log_start.reset();
break;
}
case ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT: {
uint32_t this_ssrc;
parsed_log_.GetAudioPlayout(i, &this_ssrc);
audio_playout_events_[this_ssrc].push_back(parsed_log_.GetTimestamp(i));
break;
}
case ParsedRtcEventLog::LOSS_BASED_BWE_UPDATE: {
@ -487,6 +510,10 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log)
begin_time_ = first_timestamp;
end_time_ = last_timestamp;
call_duration_s_ = static_cast<float>(end_time_ - begin_time_) / 1000000;
if (last_log_start) {
// The log was missing the last LOG_END event. Fake it.
log_segments_.push_back(std::make_pair(*last_log_start, end_time_));
}
}
class BitrateObserver : public CongestionController::Observer,
@ -1406,5 +1433,246 @@ void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder number of channels");
}
class NetEqStreamInput : public test::NetEqInput {
public:
// Does not take any ownership, and all pointers must refer to valid objects
// that outlive the one constructed.
NetEqStreamInput(const std::vector<LoggedRtpPacket>* packet_stream,
const std::vector<uint64_t>* output_events_us,
rtc::Optional<uint64_t> end_time_us)
: packet_stream_(*packet_stream),
packet_stream_it_(packet_stream_.begin()),
output_events_us_it_(output_events_us->begin()),
output_events_us_end_(output_events_us->end()),
end_time_us_(end_time_us) {
RTC_DCHECK(packet_stream);
RTC_DCHECK(output_events_us);
}
rtc::Optional<int64_t> NextPacketTime() const override {
if (packet_stream_it_ == packet_stream_.end()) {
return rtc::Optional<int64_t>();
}
if (end_time_us_ && packet_stream_it_->timestamp > *end_time_us_) {
return rtc::Optional<int64_t>();
}
// Convert from us to ms.
return rtc::Optional<int64_t>(packet_stream_it_->timestamp / 1000);
}
rtc::Optional<int64_t> NextOutputEventTime() const override {
if (output_events_us_it_ == output_events_us_end_) {
return rtc::Optional<int64_t>();
}
if (end_time_us_ && *output_events_us_it_ > *end_time_us_) {
return rtc::Optional<int64_t>();
}
// Convert from us to ms.
return rtc::Optional<int64_t>(
rtc::checked_cast<int64_t>(*output_events_us_it_ / 1000));
}
std::unique_ptr<PacketData> PopPacket() override {
if (packet_stream_it_ == packet_stream_.end()) {
return std::unique_ptr<PacketData>();
}
std::unique_ptr<PacketData> packet_data(new PacketData());
packet_data->header = packet_stream_it_->header;
// Convert from us to ms.
packet_data->time_ms = packet_stream_it_->timestamp / 1000.0;
// This is a header-only "dummy" packet. Set the payload to all zeros, with
// length according to the virtual length.
packet_data->payload.SetSize(packet_stream_it_->total_length);
std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
++packet_stream_it_;
return packet_data;
}
void AdvanceOutputEvent() override {
if (output_events_us_it_ != output_events_us_end_) {
++output_events_us_it_;
}
}
bool ended() const override { return !NextEventTime(); }
rtc::Optional<RTPHeader> NextHeader() const override {
if (packet_stream_it_ == packet_stream_.end()) {
return rtc::Optional<RTPHeader>();
}
return rtc::Optional<RTPHeader>(packet_stream_it_->header);
}
private:
const std::vector<LoggedRtpPacket>& packet_stream_;
std::vector<LoggedRtpPacket>::const_iterator packet_stream_it_;
std::vector<uint64_t>::const_iterator output_events_us_it_;
const std::vector<uint64_t>::const_iterator output_events_us_end_;
const rtc::Optional<uint64_t> end_time_us_;
};
namespace {
// Creates a NetEq test object and all necessary input and output helpers. Runs
// the test and returns the NetEqDelayAnalyzer object that was used to
// instrument the test.
std::unique_ptr<test::NetEqDelayAnalyzer> CreateNetEqTestAndRun(
const std::vector<LoggedRtpPacket>* packet_stream,
const std::vector<uint64_t>* output_events_us,
rtc::Optional<uint64_t> end_time_us,
const std::string& replacement_file_name,
int file_sample_rate_hz) {
std::unique_ptr<test::NetEqInput> input(
new NetEqStreamInput(packet_stream, output_events_us, end_time_us));
constexpr int kReplacementPt = 127;
std::set<uint8_t> cn_types;
std::set<uint8_t> forbidden_types;
input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt,
cn_types, forbidden_types));
NetEq::Config config;
config.max_packets_in_buffer = 200;
config.enable_fast_accelerate = true;
std::unique_ptr<test::VoidAudioSink> output(new test::VoidAudioSink());
test::NetEqTest::DecoderMap codecs;
// Create a "replacement decoder" that produces the decoded audio by reading
// from a file rather than from the encoded payloads.
std::unique_ptr<test::ResampleInputAudioFile> replacement_file(
new test::ResampleInputAudioFile(replacement_file_name,
file_sample_rate_hz));
replacement_file->set_output_rate_hz(48000);
std::unique_ptr<AudioDecoder> replacement_decoder(
new test::FakeDecodeFromFile(std::move(replacement_file), 48000, false));
test::NetEqTest::ExtDecoderMap ext_codecs;
ext_codecs[kReplacementPt] = {replacement_decoder.get(),
NetEqDecoder::kDecoderArbitrary,
"replacement codec"};
std::unique_ptr<test::NetEqDelayAnalyzer> delay_cb(
new test::NetEqDelayAnalyzer);
test::DefaultNetEqTestErrorCallback error_cb;
test::NetEqTest::Callbacks callbacks;
callbacks.error_callback = &error_cb;
callbacks.post_insert_packet = delay_cb.get();
callbacks.get_audio_callback = delay_cb.get();
test::NetEqTest test(config, codecs, ext_codecs, std::move(input),
std::move(output), callbacks);
test.Run();
return delay_cb;
}
} // namespace
// Plots the jitter buffer delay profile. This will plot only for the first
// incoming audio SSRC. If the stream contains more than one incoming audio
// SSRC, all but the first will be ignored.
void EventLogAnalyzer::CreateAudioJitterBufferGraph(
const std::string& replacement_file_name,
int file_sample_rate_hz,
Plot* plot) {
const auto& incoming_audio_kv = std::find_if(
rtp_packets_.begin(), rtp_packets_.end(),
[this](std::pair<StreamId, std::vector<LoggedRtpPacket>> kv) {
return kv.first.GetDirection() == kIncomingPacket &&
this->IsAudioSsrc(kv.first);
});
if (incoming_audio_kv == rtp_packets_.end()) {
// No incoming audio stream found.
return;
}
const uint32_t ssrc = incoming_audio_kv->first.GetSsrc();
std::map<uint32_t, std::vector<uint64_t>>::const_iterator output_events_it =
audio_playout_events_.find(ssrc);
if (output_events_it == audio_playout_events_.end()) {
// Could not find output events with SSRC matching the input audio stream.
// Using the first available stream of output events.
output_events_it = audio_playout_events_.cbegin();
}
rtc::Optional<uint64_t> end_time_us =
log_segments_.empty()
? rtc::Optional<uint64_t>()
: rtc::Optional<uint64_t>(log_segments_.front().second);
auto delay_cb = CreateNetEqTestAndRun(
&incoming_audio_kv->second, &output_events_it->second, end_time_us,
replacement_file_name, file_sample_rate_hz);
std::vector<float> send_times_s;
std::vector<float> arrival_delay_ms;
std::vector<float> corrected_arrival_delay_ms;
std::vector<rtc::Optional<float>> playout_delay_ms;
std::vector<rtc::Optional<float>> target_delay_ms;
delay_cb->CreateGraphs(&send_times_s, &arrival_delay_ms,
&corrected_arrival_delay_ms, &playout_delay_ms,
&target_delay_ms);
RTC_DCHECK_EQ(send_times_s.size(), arrival_delay_ms.size());
RTC_DCHECK_EQ(send_times_s.size(), corrected_arrival_delay_ms.size());
RTC_DCHECK_EQ(send_times_s.size(), playout_delay_ms.size());
RTC_DCHECK_EQ(send_times_s.size(), target_delay_ms.size());
std::map<StreamId, TimeSeries> time_series_packet_arrival;
std::map<StreamId, TimeSeries> time_series_relative_packet_arrival;
std::map<StreamId, TimeSeries> time_series_play_time;
std::map<StreamId, TimeSeries> time_series_target_time;
float min_y_axis = 0.f;
float max_y_axis = 0.f;
const StreamId stream_id = incoming_audio_kv->first;
for (size_t i = 0; i < send_times_s.size(); ++i) {
time_series_packet_arrival[stream_id].points.emplace_back(
TimeSeriesPoint(send_times_s[i], arrival_delay_ms[i]));
time_series_relative_packet_arrival[stream_id].points.emplace_back(
TimeSeriesPoint(send_times_s[i], corrected_arrival_delay_ms[i]));
min_y_axis = std::min(min_y_axis, corrected_arrival_delay_ms[i]);
max_y_axis = std::max(max_y_axis, corrected_arrival_delay_ms[i]);
if (playout_delay_ms[i]) {
time_series_play_time[stream_id].points.emplace_back(
TimeSeriesPoint(send_times_s[i], *playout_delay_ms[i]));
min_y_axis = std::min(min_y_axis, *playout_delay_ms[i]);
max_y_axis = std::max(max_y_axis, *playout_delay_ms[i]);
}
if (target_delay_ms[i]) {
time_series_target_time[stream_id].points.emplace_back(
TimeSeriesPoint(send_times_s[i], *target_delay_ms[i]));
min_y_axis = std::min(min_y_axis, *target_delay_ms[i]);
max_y_axis = std::max(max_y_axis, *target_delay_ms[i]);
}
}
// This code is adapted for a single stream. The creation of the streams above
// guarantee that no more than one steam is included. If multiple streams are
// to be plotted, they should likely be given distinct labels below.
RTC_DCHECK_EQ(time_series_relative_packet_arrival.size(), 1);
for (auto& series : time_series_relative_packet_arrival) {
series.second.label = "Relative packet arrival delay";
series.second.style = LINE_GRAPH;
plot->AppendTimeSeries(std::move(series.second));
}
RTC_DCHECK_EQ(time_series_play_time.size(), 1);
for (auto& series : time_series_play_time) {
series.second.label = "Playout delay";
series.second.style = LINE_GRAPH;
plot->AppendTimeSeries(std::move(series.second));
}
RTC_DCHECK_EQ(time_series_target_time.size(), 1);
for (auto& series : time_series_target_time) {
series.second.label = "Target delay";
series.second.style = LINE_DOT_GRAPH;
plot->AppendTimeSeries(std::move(series.second));
}
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetYAxis(min_y_axis, max_y_axis, "Relative delay (ms)", kBottomMargin,
kTopMargin);
plot->SetTitle("NetEq timing");
}
} // namespace plotting
} // namespace webrtc

View File

@ -100,6 +100,9 @@ class EventLogAnalyzer {
void CreateAudioEncoderEnableFecGraph(Plot* plot);
void CreateAudioEncoderEnableDtxGraph(Plot* plot);
void CreateAudioEncoderNumChannelsGraph(Plot* plot);
void CreateAudioJitterBufferGraph(const std::string& replacement_file_name,
int file_sample_rate_hz,
Plot* plot);
// Returns a vector of capture and arrival timestamps for the video frames
// of the stream with the most number of frames.
@ -163,6 +166,13 @@ class EventLogAnalyzer {
std::map<StreamId, std::vector<LoggedRtcpPacket>> rtcp_packets_;
// Maps an SSRC to the timestamps of parsed audio playout events.
std::map<uint32_t, std::vector<uint64_t>> audio_playout_events_;
// Stores the timestamps for all log segments, in the form of associated start
// and end events.
std::vector<std::pair<uint64_t, uint64_t>> log_segments_;
// A list of all updates from the send-side loss-based bandwidth estimator.
std::vector<LossBasedBweUpdate> bwe_loss_updates_;

View File

@ -13,6 +13,7 @@
#include "webrtc/base/flags.h"
#include "webrtc/logging/rtc_event_log/rtc_event_log_parser.h"
#include "webrtc/test/field_trial.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/tools/event_log_visualizer/analyzer.h"
#include "webrtc/tools/event_log_visualizer/plot_base.h"
#include "webrtc/tools/event_log_visualizer/plot_python.h"
@ -77,6 +78,9 @@ DEFINE_bool(audio_encoder_dtx, false, "Plot the audio encoder DTX.");
DEFINE_bool(audio_encoder_num_channels,
false,
"Plot the audio encoder number of channels.");
DEFINE_bool(plot_audio_jitter_buffer,
false,
"Plot the audio jitter buffer delay profile.");
DEFINE_string(
force_fieldtrials,
"",
@ -105,6 +109,7 @@ int main(int argc, char* argv[]) {
return 0;
}
webrtc::test::SetExecutablePath(argv[0]);
webrtc::test::InitFieldTrialsFromString(FLAG_force_fieldtrials);
std::string filename = argv[1];
@ -231,6 +236,14 @@ int main(int argc, char* argv[]) {
analyzer.CreateAudioEncoderNumChannelsGraph(collection->AppendNewPlot());
}
if (FLAG_plot_all || FLAG_plot_audio_jitter_buffer) {
analyzer.CreateAudioJitterBufferGraph(
webrtc::test::ResourcePath(
"audio_processing/conversational_speech/EN_script2_F_sp2_B1",
"wav"),
48000, collection->AppendNewPlot());
}
collection->Draw();
return 0;