From c186e1498be9e80d90265f1214755f23386bfbb4 Mon Sep 17 00:00:00 2001 From: Bjorn Terelius Date: Fri, 5 Jun 2020 10:47:19 +0200 Subject: [PATCH] Move NetEq and ANA plotting to a separate file. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bug: webrtc:11566 Change-Id: I6d6176ff72a158a1629e14b539de2e928e7d02a9 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/176510 Reviewed-by: Mirko Bonadei Reviewed-by: Mirko Bonadei Commit-Queue: Björn Terelius Cr-Commit-Position: refs/heads/master@{#31472} --- rtc_tools/BUILD.gn | 3 + .../rtc_event_log_visualizer/analyze_audio.cc | 503 ++++++++++++++++ .../rtc_event_log_visualizer/analyze_audio.h | 75 +++ .../rtc_event_log_visualizer/analyzer.cc | 562 ------------------ rtc_tools/rtc_event_log_visualizer/analyzer.h | 35 -- .../analyzer_common.h | 103 ++++ rtc_tools/rtc_event_log_visualizer/main.cc | 67 ++- 7 files changed, 718 insertions(+), 630 deletions(-) create mode 100644 rtc_tools/rtc_event_log_visualizer/analyze_audio.cc create mode 100644 rtc_tools/rtc_event_log_visualizer/analyze_audio.h diff --git a/rtc_tools/BUILD.gn b/rtc_tools/BUILD.gn index f193c51638..7d7ae99323 100644 --- a/rtc_tools/BUILD.gn +++ b/rtc_tools/BUILD.gn @@ -325,6 +325,8 @@ if (!build_with_chromium) { sources = [ "rtc_event_log_visualizer/alerts.cc", "rtc_event_log_visualizer/alerts.h", + "rtc_event_log_visualizer/analyze_audio.cc", + "rtc_event_log_visualizer/analyze_audio.h", "rtc_event_log_visualizer/analyzer.cc", "rtc_event_log_visualizer/analyzer.h", "rtc_event_log_visualizer/analyzer_common.cc", @@ -371,6 +373,7 @@ if (!build_with_chromium) { absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] } } diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc new file mode 100644 index 0000000000..becc0044ab --- /dev/null +++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h" + +#include +#include +#include +#include + +#include "modules/audio_coding/neteq/tools/audio_sink.h" +#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h" +#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h" +#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h" +#include "modules/audio_coding/neteq/tools/neteq_test.h" +#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" +#include "rtc_base/ref_counted_object.h" + +namespace webrtc { + +void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event) + -> absl::optional { + if (ana_event.config.bitrate_bps) + return absl::optional( + static_cast(*ana_event.config.bitrate_bps)); + return absl::nullopt; + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaBitrateBps, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder target bitrate"); +} + +void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder frame length", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaFrameLengthMs = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.frame_length_ms) + return absl::optional( + static_cast(*ana_event.config.frame_length_ms)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaFrameLengthMs, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder frame length"); +} + +void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder uplink packet loss fraction", + LineStyle::kLine, PointStyle::kHighlight); + auto GetAnaPacketLoss = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.uplink_packet_loss_fraction) + return absl::optional(static_cast( + *ana_event.config.uplink_packet_loss_fraction)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaPacketLoss, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, + kTopMargin); + plot->SetTitle("Reported audio encoder lost packets"); +} + +void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder FEC", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaFecEnabled = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.enable_fec) + return absl::optional( + static_cast(*ana_event.config.enable_fec)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaFecEnabled, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder FEC"); +} + +void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder DTX", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaDtxEnabled = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.enable_dtx) + return absl::optional( + static_cast(*ana_event.config.enable_dtx)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaDtxEnabled, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder DTX"); +} + +void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaNumChannels = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.num_channels) + return absl::optional( + static_cast(*ana_event.config.num_channels)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaNumChannels, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", + kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder number of channels"); +} + +class NetEqStreamInput : public test::NetEqInput { + public: + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + NetEqStreamInput(const std::vector* packet_stream, + const std::vector* output_events, + absl::optional end_time_ms) + : packet_stream_(*packet_stream), + packet_stream_it_(packet_stream_.begin()), + output_events_it_(output_events->begin()), + output_events_end_(output_events->end()), + end_time_ms_(end_time_ms) { + RTC_DCHECK(packet_stream); + RTC_DCHECK(output_events); + } + + absl::optional NextPacketTime() const override { + if (packet_stream_it_ == packet_stream_.end()) { + return absl::nullopt; + } + if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) { + return absl::nullopt; + } + return packet_stream_it_->rtp.log_time_ms(); + } + + absl::optional NextOutputEventTime() const override { + if (output_events_it_ == output_events_end_) { + return absl::nullopt; + } + if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) { + return absl::nullopt; + } + return output_events_it_->log_time_ms(); + } + + std::unique_ptr PopPacket() override { + if (packet_stream_it_ == packet_stream_.end()) { + return std::unique_ptr(); + } + std::unique_ptr packet_data(new PacketData()); + packet_data->header = packet_stream_it_->rtp.header; + packet_data->time_ms = packet_stream_it_->rtp.log_time_ms(); + + // This is a header-only "dummy" packet. Set the payload to all zeros, with + // length according to the virtual length. + packet_data->payload.SetSize(packet_stream_it_->rtp.total_length - + packet_stream_it_->rtp.header_length); + std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0); + + ++packet_stream_it_; + return packet_data; + } + + void AdvanceOutputEvent() override { + if (output_events_it_ != output_events_end_) { + ++output_events_it_; + } + } + + bool ended() const override { return !NextEventTime(); } + + absl::optional NextHeader() const override { + if (packet_stream_it_ == packet_stream_.end()) { + return absl::nullopt; + } + return packet_stream_it_->rtp.header; + } + + private: + const std::vector& packet_stream_; + std::vector::const_iterator packet_stream_it_; + std::vector::const_iterator output_events_it_; + const std::vector::const_iterator output_events_end_; + const absl::optional end_time_ms_; +}; + +namespace { + +// Factory to create a "replacement decoder" that produces the decoded audio +// by reading from a file rather than from the encoded payloads. +class ReplacementAudioDecoderFactory : public AudioDecoderFactory { + public: + ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name, + int file_sample_rate_hz) + : replacement_file_name_(replacement_file_name), + file_sample_rate_hz_(file_sample_rate_hz) {} + + std::vector GetSupportedDecoders() override { + RTC_NOTREACHED(); + return {}; + } + + bool IsSupportedDecoder(const SdpAudioFormat& format) override { + return true; + } + + std::unique_ptr MakeAudioDecoder( + const SdpAudioFormat& format, + absl::optional codec_pair_id) override { + auto replacement_file = std::make_unique( + replacement_file_name_, file_sample_rate_hz_); + replacement_file->set_output_rate_hz(48000); + return std::make_unique( + std::move(replacement_file), 48000, false); + } + + private: + const std::string replacement_file_name_; + const int file_sample_rate_hz_; +}; + +// Creates a NetEq test object and all necessary input and output helpers. Runs +// the test and returns the NetEqDelayAnalyzer object that was used to +// instrument the test. +std::unique_ptr CreateNetEqTestAndRun( + const std::vector* packet_stream, + const std::vector* output_events, + absl::optional end_time_ms, + const std::string& replacement_file_name, + int file_sample_rate_hz) { + std::unique_ptr input( + new NetEqStreamInput(packet_stream, output_events, end_time_ms)); + + constexpr int kReplacementPt = 127; + std::set cn_types; + std::set forbidden_types; + input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, + cn_types, forbidden_types)); + + NetEq::Config config; + config.max_packets_in_buffer = 200; + config.enable_fast_accelerate = true; + + std::unique_ptr output(new test::VoidAudioSink()); + + rtc::scoped_refptr decoder_factory = + new rtc::RefCountedObject( + replacement_file_name, file_sample_rate_hz); + + test::NetEqTest::DecoderMap codecs = { + {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}}; + + std::unique_ptr delay_cb( + new test::NetEqDelayAnalyzer); + std::unique_ptr neteq_stats_getter( + new test::NetEqStatsGetter(std::move(delay_cb))); + test::DefaultNetEqTestErrorCallback error_cb; + test::NetEqTest::Callbacks callbacks; + callbacks.error_callback = &error_cb; + callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer(); + callbacks.get_audio_callback = neteq_stats_getter.get(); + + test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr, + /*factory=*/nullptr, std::move(input), std::move(output), + callbacks); + test.Run(); + return neteq_stats_getter; +} +} // namespace + +NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const std::string& replacement_file_name, + int file_sample_rate_hz) { + NetEqStatsGetterMap neteq_stats; + + for (const auto& stream : parsed_log.incoming_rtp_packets_by_ssrc()) { + const uint32_t ssrc = stream.ssrc; + if (!IsAudioSsrc(parsed_log, kIncomingPacket, ssrc)) + continue; + const std::vector* audio_packets = + &stream.incoming_packets; + if (audio_packets == nullptr) { + // No incoming audio stream found. + continue; + } + + RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end()); + + std::map>::const_iterator + output_events_it = parsed_log.audio_playout_events().find(ssrc); + if (output_events_it == parsed_log.audio_playout_events().end()) { + // Could not find output events with SSRC matching the input audio stream. + // Using the first available stream of output events. + output_events_it = parsed_log.audio_playout_events().cbegin(); + } + + int64_t end_time_ms = parsed_log.first_log_segment().stop_time_ms(); + + neteq_stats[ssrc] = CreateNetEqTestAndRun( + audio_packets, &output_events_it->second, end_time_ms, + replacement_file_name, file_sample_rate_hz); + } + + return neteq_stats; +} + +// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created +// for, this method generates a plot for the jitter buffer delay profile. +void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + uint32_t ssrc, + const test::NetEqStatsGetter* stats_getter, + Plot* plot) { + test::NetEqDelayAnalyzer::Delays arrival_delay_ms; + test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms; + test::NetEqDelayAnalyzer::Delays playout_delay_ms; + test::NetEqDelayAnalyzer::Delays target_delay_ms; + + stats_getter->delay_analyzer()->CreateGraphs( + &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms, + &target_delay_ms); + + TimeSeries time_series_packet_arrival("packet arrival delay", + LineStyle::kLine); + TimeSeries time_series_relative_packet_arrival( + "Relative packet arrival delay", LineStyle::kLine); + TimeSeries time_series_play_time("Playout delay", LineStyle::kLine); + TimeSeries time_series_target_time("Target delay", LineStyle::kLine, + PointStyle::kHighlight); + + for (const auto& data : arrival_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y)); + } + for (const auto& data : corrected_arrival_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_relative_packet_arrival.points.emplace_back( + TimeSeriesPoint(x, y)); + } + for (const auto& data : playout_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y)); + } + for (const auto& data : target_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y)); + } + + plot->AppendTimeSeries(std::move(time_series_packet_arrival)); + plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival)); + plot->AppendTimeSeries(std::move(time_series_play_time)); + plot->AppendTimeSeries(std::move(time_series_target_time)); + + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin, + kTopMargin); + plot->SetTitle("NetEq timing for " + + GetStreamName(parsed_log, kIncomingPacket, ssrc)); +} + +template +void CreateNetEqStatsGraphInternal( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView>*( + const test::NetEqStatsGetter*)> data_extractor, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + std::map time_series; + + for (const auto& st : neteq_stats) { + const uint32_t ssrc = st.first; + const std::vector>* data_vector = + data_extractor(st.second.get()); + for (const auto& data : *data_vector) { + const float time = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float value = stats_extractor(data.second); + time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value)); + } + } + + for (auto& series : time_series) { + series.second.label = + GetStreamName(parsed_log, kIncomingPacket, series.first); + series.second.line_style = LineStyle::kLine; + plot->AppendTimeSeries(std::move(series.second)); + } + + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin); + plot->SetTitle(plot_name); +} + +void CreateNetEqNetworkStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + CreateNetEqStatsGraphInternal( + parsed_log, config, neteq_stats, + [](const test::NetEqStatsGetter* stats_getter) { + return stats_getter->stats(); + }, + stats_extractor, plot_name, plot); +} + +void CreateNetEqLifetimeStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + CreateNetEqStatsGraphInternal( + parsed_log, config, neteq_stats, + [](const test::NetEqStatsGetter* stats_getter) { + return stats_getter->lifetime_stats(); + }, + stats_extractor, plot_name, plot); +} + +} // namespace webrtc diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.h b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h new file mode 100644 index 0000000000..726e84492d --- /dev/null +++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ +#define RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ + +#include +#include +#include +#include + +#include "api/function_view.h" +#include "logging/rtc_event_log/rtc_event_log_parser.h" +#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h" +#include "rtc_tools/rtc_event_log_visualizer/analyzer_common.h" +#include "rtc_tools/rtc_event_log_visualizer/plot_base.h" + +namespace webrtc { + +void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); + +using NetEqStatsGetterMap = + std::map>; +NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const std::string& replacement_file_name, + int file_sample_rate_hz); + +void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + uint32_t ssrc, + const test::NetEqStatsGetter* stats_getter, + Plot* plot); +void CreateNetEqNetworkStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats_getters, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot); +void CreateNetEqLifetimeStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats_getters, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot); + +} // namespace webrtc + +#endif // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.cc b/rtc_tools/rtc_event_log_visualizer/analyzer.cc index 287fbe2eb8..8ca108e48f 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer.cc +++ b/rtc_tools/rtc_event_log_visualizer/analyzer.cc @@ -31,12 +31,6 @@ #include "logging/rtc_event_log/rtc_event_processor.h" #include "logging/rtc_event_log/rtc_stream_config.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" -#include "modules/audio_coding/neteq/tools/audio_sink.h" -#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h" -#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h" -#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h" -#include "modules/audio_coding/neteq/tools/neteq_test.h" -#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" #include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/delay_based_bwe.h" @@ -71,8 +65,6 @@ namespace webrtc { namespace { -const int kNumMicrosecsPerSec = 1000000; - std::string SsrcToString(uint32_t ssrc) { rtc::StringBuilder ss; ss << "SSRC " << ssrc; @@ -168,11 +160,6 @@ absl::optional EstimateRtpClockFrequency( return absl::nullopt; } -constexpr float kLeftMargin = 0.01f; -constexpr float kRightMargin = 0.02f; -constexpr float kBottomMargin = 0.02f; -constexpr float kTopMargin = 0.05f; - absl::optional NetworkDelayDiff_AbsSendTime( const LoggedRtpPacketIncoming& old_packet, const LoggedRtpPacketIncoming& new_packet) { @@ -222,99 +209,6 @@ absl::optional NetworkDelayDiff_CaptureTime( return delay_change; } -// For each element in data_view, use |f()| to extract a y-coordinate and -// store the result in a TimeSeries. -template -void ProcessPoints(rtc::FunctionView fx, - rtc::FunctionView(const DataType&)> fy, - const IterableType& data_view, - TimeSeries* result) { - for (size_t i = 0; i < data_view.size(); i++) { - const DataType& elem = data_view[i]; - float x = fx(elem); - absl::optional y = fy(elem); - if (y) - result->points.emplace_back(x, *y); - } -} - -// For each pair of adjacent elements in |data|, use |f()| to extract a -// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate -// will be the time of the second element in the pair. -template -void ProcessPairs( - rtc::FunctionView fx, - rtc::FunctionView(const DataType&, - const DataType&)> fy, - const IterableType& data, - TimeSeries* result) { - for (size_t i = 1; i < data.size(); i++) { - float x = fx(data[i]); - absl::optional y = fy(data[i - 1], data[i]); - if (y) - result->points.emplace_back(x, static_cast(*y)); - } -} - -// For each pair of adjacent elements in |data|, use |f()| to extract a -// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate -// will be the time of the second element in the pair. -template -void AccumulatePairs( - rtc::FunctionView fx, - rtc::FunctionView(const DataType&, - const DataType&)> fy, - const IterableType& data, - TimeSeries* result) { - ResultType sum = 0; - for (size_t i = 1; i < data.size(); i++) { - float x = fx(data[i]); - absl::optional y = fy(data[i - 1], data[i]); - if (y) { - sum += *y; - result->points.emplace_back(x, static_cast(sum)); - } - } -} - -// Calculates a moving average of |data| and stores the result in a TimeSeries. -// A data point is generated every |step| microseconds from |begin_time| -// to |end_time|. The value of each data point is the average of the data -// during the preceding |window_duration_us| microseconds. -template -void MovingAverage( - rtc::FunctionView(const DataType&)> fy, - const IterableType& data_view, - AnalyzerConfig config, - TimeSeries* result) { - size_t window_index_begin = 0; - size_t window_index_end = 0; - ResultType sum_in_window = 0; - - for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_; - t += config.step_) { - while (window_index_end < data_view.size() && - data_view[window_index_end].log_time_us() < t) { - absl::optional value = fy(data_view[window_index_end]); - if (value) - sum_in_window += *value; - ++window_index_end; - } - while (window_index_begin < data_view.size() && - data_view[window_index_begin].log_time_us() < - t - config.window_duration_) { - absl::optional value = fy(data_view[window_index_begin]); - if (value) - sum_in_window -= *value; - ++window_index_begin; - } - float window_duration_s = - static_cast(config.window_duration_) / kNumMicrosecsPerSec; - float x = config.GetCallTimeSec(t); - float y = sum_in_window / window_duration_s; - result->points.emplace_back(x, y); - } -} template TimeSeries CreateRtcpTypeTimeSeries(const std::vector& rtcp_list, @@ -1725,462 +1619,6 @@ void EventLogAnalyzer::CreateSenderAndReceiverReportPlot( plot->SetTitle(title); } -void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { - TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event) - -> absl::optional { - if (ana_event.config.bitrate_bps) - return absl::optional( - static_cast(*ana_event.config.bitrate_bps)); - return absl::nullopt; - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaBitrateBps, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder target bitrate"); -} - -void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { - TimeSeries time_series("Audio encoder frame length", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaFrameLengthMs = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.frame_length_ms) - return absl::optional( - static_cast(*ana_event.config.frame_length_ms)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaFrameLengthMs, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder frame length"); -} - -void EventLogAnalyzer::CreateAudioEncoderPacketLossGraph(Plot* plot) { - TimeSeries time_series("Audio encoder uplink packet loss fraction", - LineStyle::kLine, PointStyle::kHighlight); - auto GetAnaPacketLoss = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.uplink_packet_loss_fraction) - return absl::optional(static_cast( - *ana_event.config.uplink_packet_loss_fraction)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaPacketLoss, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, - kTopMargin); - plot->SetTitle("Reported audio encoder lost packets"); -} - -void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { - TimeSeries time_series("Audio encoder FEC", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaFecEnabled = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.enable_fec) - return absl::optional( - static_cast(*ana_event.config.enable_fec)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaFecEnabled, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder FEC"); -} - -void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { - TimeSeries time_series("Audio encoder DTX", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaDtxEnabled = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.enable_dtx) - return absl::optional( - static_cast(*ana_event.config.enable_dtx)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaDtxEnabled, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder DTX"); -} - -void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { - TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaNumChannels = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.num_channels) - return absl::optional( - static_cast(*ana_event.config.num_channels)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaNumChannels, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", - kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder number of channels"); -} - -class NetEqStreamInput : public test::NetEqInput { - public: - // Does not take any ownership, and all pointers must refer to valid objects - // that outlive the one constructed. - NetEqStreamInput(const std::vector* packet_stream, - const std::vector* output_events, - absl::optional end_time_ms) - : packet_stream_(*packet_stream), - packet_stream_it_(packet_stream_.begin()), - output_events_it_(output_events->begin()), - output_events_end_(output_events->end()), - end_time_ms_(end_time_ms) { - RTC_DCHECK(packet_stream); - RTC_DCHECK(output_events); - } - - absl::optional NextPacketTime() const override { - if (packet_stream_it_ == packet_stream_.end()) { - return absl::nullopt; - } - if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) { - return absl::nullopt; - } - return packet_stream_it_->rtp.log_time_ms(); - } - - absl::optional NextOutputEventTime() const override { - if (output_events_it_ == output_events_end_) { - return absl::nullopt; - } - if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) { - return absl::nullopt; - } - return output_events_it_->log_time_ms(); - } - - std::unique_ptr PopPacket() override { - if (packet_stream_it_ == packet_stream_.end()) { - return std::unique_ptr(); - } - std::unique_ptr packet_data(new PacketData()); - packet_data->header = packet_stream_it_->rtp.header; - packet_data->time_ms = packet_stream_it_->rtp.log_time_ms(); - - // This is a header-only "dummy" packet. Set the payload to all zeros, with - // length according to the virtual length. - packet_data->payload.SetSize(packet_stream_it_->rtp.total_length - - packet_stream_it_->rtp.header_length); - std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0); - - ++packet_stream_it_; - return packet_data; - } - - void AdvanceOutputEvent() override { - if (output_events_it_ != output_events_end_) { - ++output_events_it_; - } - } - - bool ended() const override { return !NextEventTime(); } - - absl::optional NextHeader() const override { - if (packet_stream_it_ == packet_stream_.end()) { - return absl::nullopt; - } - return packet_stream_it_->rtp.header; - } - - private: - const std::vector& packet_stream_; - std::vector::const_iterator packet_stream_it_; - std::vector::const_iterator output_events_it_; - const std::vector::const_iterator output_events_end_; - const absl::optional end_time_ms_; -}; - -namespace { - -// Factory to create a "replacement decoder" that produces the decoded audio -// by reading from a file rather than from the encoded payloads. -class ReplacementAudioDecoderFactory : public AudioDecoderFactory { - public: - ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name, - int file_sample_rate_hz) - : replacement_file_name_(replacement_file_name), - file_sample_rate_hz_(file_sample_rate_hz) {} - - std::vector GetSupportedDecoders() override { - RTC_NOTREACHED(); - return {}; - } - - bool IsSupportedDecoder(const SdpAudioFormat& format) override { - return true; - } - - std::unique_ptr MakeAudioDecoder( - const SdpAudioFormat& format, - absl::optional codec_pair_id) override { - auto replacement_file = std::make_unique( - replacement_file_name_, file_sample_rate_hz_); - replacement_file->set_output_rate_hz(48000); - return std::make_unique( - std::move(replacement_file), 48000, false); - } - - private: - const std::string replacement_file_name_; - const int file_sample_rate_hz_; -}; - -// Creates a NetEq test object and all necessary input and output helpers. Runs -// the test and returns the NetEqDelayAnalyzer object that was used to -// instrument the test. -std::unique_ptr CreateNetEqTestAndRun( - const std::vector* packet_stream, - const std::vector* output_events, - absl::optional end_time_ms, - const std::string& replacement_file_name, - int file_sample_rate_hz) { - std::unique_ptr input( - new NetEqStreamInput(packet_stream, output_events, end_time_ms)); - - constexpr int kReplacementPt = 127; - std::set cn_types; - std::set forbidden_types; - input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, - cn_types, forbidden_types)); - - NetEq::Config config; - config.max_packets_in_buffer = 200; - config.enable_fast_accelerate = true; - - std::unique_ptr output(new test::VoidAudioSink()); - - rtc::scoped_refptr decoder_factory = - new rtc::RefCountedObject( - replacement_file_name, file_sample_rate_hz); - - test::NetEqTest::DecoderMap codecs = { - {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}}; - - std::unique_ptr delay_cb( - new test::NetEqDelayAnalyzer); - std::unique_ptr neteq_stats_getter( - new test::NetEqStatsGetter(std::move(delay_cb))); - test::DefaultNetEqTestErrorCallback error_cb; - test::NetEqTest::Callbacks callbacks; - callbacks.error_callback = &error_cb; - callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer(); - callbacks.get_audio_callback = neteq_stats_getter.get(); - - test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr, - /*factory=*/nullptr, std::move(input), std::move(output), - callbacks); - test.Run(); - return neteq_stats_getter; -} -} // namespace - -EventLogAnalyzer::NetEqStatsGetterMap EventLogAnalyzer::SimulateNetEq( - const std::string& replacement_file_name, - int file_sample_rate_hz) const { - NetEqStatsGetterMap neteq_stats; - - for (const auto& stream : parsed_log_.incoming_rtp_packets_by_ssrc()) { - const uint32_t ssrc = stream.ssrc; - if (!IsAudioSsrc(parsed_log_, kIncomingPacket, ssrc)) - continue; - const std::vector* audio_packets = - &stream.incoming_packets; - if (audio_packets == nullptr) { - // No incoming audio stream found. - continue; - } - - RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end()); - - std::map>::const_iterator - output_events_it = parsed_log_.audio_playout_events().find(ssrc); - if (output_events_it == parsed_log_.audio_playout_events().end()) { - // Could not find output events with SSRC matching the input audio stream. - // Using the first available stream of output events. - output_events_it = parsed_log_.audio_playout_events().cbegin(); - } - - int64_t end_time_ms = parsed_log_.first_log_segment().stop_time_ms(); - - neteq_stats[ssrc] = CreateNetEqTestAndRun( - audio_packets, &output_events_it->second, end_time_ms, - replacement_file_name, file_sample_rate_hz); - } - - return neteq_stats; -} - -// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created -// for, this method generates a plot for the jitter buffer delay profile. -void EventLogAnalyzer::CreateAudioJitterBufferGraph( - uint32_t ssrc, - const test::NetEqStatsGetter* stats_getter, - Plot* plot) const { - test::NetEqDelayAnalyzer::Delays arrival_delay_ms; - test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms; - test::NetEqDelayAnalyzer::Delays playout_delay_ms; - test::NetEqDelayAnalyzer::Delays target_delay_ms; - - stats_getter->delay_analyzer()->CreateGraphs( - &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms, - &target_delay_ms); - - TimeSeries time_series_packet_arrival("packet arrival delay", - LineStyle::kLine); - TimeSeries time_series_relative_packet_arrival( - "Relative packet arrival delay", LineStyle::kLine); - TimeSeries time_series_play_time("Playout delay", LineStyle::kLine); - TimeSeries time_series_target_time("Target delay", LineStyle::kLine, - PointStyle::kHighlight); - - for (const auto& data : arrival_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y)); - } - for (const auto& data : corrected_arrival_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_relative_packet_arrival.points.emplace_back( - TimeSeriesPoint(x, y)); - } - for (const auto& data : playout_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y)); - } - for (const auto& data : target_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y)); - } - - plot->AppendTimeSeries(std::move(time_series_packet_arrival)); - plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival)); - plot->AppendTimeSeries(std::move(time_series_play_time)); - plot->AppendTimeSeries(std::move(time_series_target_time)); - - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin, - kTopMargin); - plot->SetTitle("NetEq timing for " + - GetStreamName(parsed_log_, kIncomingPacket, ssrc)); -} - -template -void EventLogAnalyzer::CreateNetEqStatsGraphInternal( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView>*( - const test::NetEqStatsGetter*)> data_extractor, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - std::map time_series; - - for (const auto& st : neteq_stats) { - const uint32_t ssrc = st.first; - const std::vector>* data_vector = - data_extractor(st.second.get()); - for (const auto& data : *data_vector) { - const float time = - config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float value = stats_extractor(data.second); - time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value)); - } - } - - for (auto& series : time_series) { - series.second.label = - GetStreamName(parsed_log_, kIncomingPacket, series.first); - series.second.line_style = LineStyle::kLine; - plot->AppendTimeSeries(std::move(series.second)); - } - - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin); - plot->SetTitle(plot_name); -} - -void EventLogAnalyzer::CreateNetEqNetworkStatsGraph( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - CreateNetEqStatsGraphInternal( - neteq_stats, - [](const test::NetEqStatsGetter* stats_getter) { - return stats_getter->stats(); - }, - stats_extractor, plot_name, plot); -} - -void EventLogAnalyzer::CreateNetEqLifetimeStatsGraph( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - CreateNetEqStatsGraphInternal( - neteq_stats, - [](const test::NetEqStatsGetter* stats_getter) { - return stats_getter->lifetime_stats(); - }, - stats_extractor, plot_name, plot); -} - void EventLogAnalyzer::CreateIceCandidatePairConfigGraph(Plot* plot) { std::map configs_by_cp_id; for (const auto& config : parsed_log_.ice_candidate_pair_configs()) { diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.h b/rtc_tools/rtc_event_log_visualizer/analyzer.h index ebdfdcc41c..4918cf48e1 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer.h +++ b/rtc_tools/rtc_event_log_visualizer/analyzer.h @@ -79,32 +79,6 @@ class EventLogAnalyzer { std::string yaxis_label, Plot* plot); - void CreateAudioEncoderTargetBitrateGraph(Plot* plot); - void CreateAudioEncoderFrameLengthGraph(Plot* plot); - void CreateAudioEncoderPacketLossGraph(Plot* plot); - void CreateAudioEncoderEnableFecGraph(Plot* plot); - void CreateAudioEncoderEnableDtxGraph(Plot* plot); - void CreateAudioEncoderNumChannelsGraph(Plot* plot); - - using NetEqStatsGetterMap = - std::map>; - NetEqStatsGetterMap SimulateNetEq(const std::string& replacement_file_name, - int file_sample_rate_hz) const; - - void CreateAudioJitterBufferGraph(uint32_t ssrc, - const test::NetEqStatsGetter* stats_getter, - Plot* plot) const; - void CreateNetEqNetworkStatsGraph( - const NetEqStatsGetterMap& neteq_stats_getters, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - void CreateNetEqLifetimeStatsGraph( - const NetEqStatsGetterMap& neteq_stats_getters, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - void CreateIceCandidatePairConfigGraph(Plot* plot); void CreateIceConnectivityCheckGraph(Plot* plot); @@ -115,15 +89,6 @@ class EventLogAnalyzer { void PrintNotifications(FILE* file); private: - template - void CreateNetEqStatsGraphInternal( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView>*( - const test::NetEqStatsGetter*)> data_extractor, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - template void CreateAccumulatedPacketsTimeSeries(Plot* plot, const IterableType& packets, diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h index 3ac651e69a..d5776acf62 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h +++ b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h @@ -14,10 +14,19 @@ #include #include +#include "absl/types/optional.h" +#include "api/function_view.h" #include "logging/rtc_event_log/rtc_event_log_parser.h" +#include "rtc_tools/rtc_event_log_visualizer/plot_base.h" namespace webrtc { +constexpr int kNumMicrosecsPerSec = 1000000; +constexpr float kLeftMargin = 0.01f; +constexpr float kRightMargin = 0.02f; +constexpr float kBottomMargin = 0.02f; +constexpr float kTopMargin = 0.05f; + class AnalyzerConfig { public: float GetCallTimeSec(int64_t timestamp_us) const { @@ -74,6 +83,100 @@ std::string GetStreamName(const ParsedRtcEventLog& parsed_log, uint32_t ssrc); std::string GetLayerName(LayerDescription layer); +// For each element in data_view, use |f()| to extract a y-coordinate and +// store the result in a TimeSeries. +template +void ProcessPoints(rtc::FunctionView fx, + rtc::FunctionView(const DataType&)> fy, + const IterableType& data_view, + TimeSeries* result) { + for (size_t i = 0; i < data_view.size(); i++) { + const DataType& elem = data_view[i]; + float x = fx(elem); + absl::optional y = fy(elem); + if (y) + result->points.emplace_back(x, *y); + } +} + +// For each pair of adjacent elements in |data|, use |f()| to extract a +// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate +// will be the time of the second element in the pair. +template +void ProcessPairs( + rtc::FunctionView fx, + rtc::FunctionView(const DataType&, + const DataType&)> fy, + const IterableType& data, + TimeSeries* result) { + for (size_t i = 1; i < data.size(); i++) { + float x = fx(data[i]); + absl::optional y = fy(data[i - 1], data[i]); + if (y) + result->points.emplace_back(x, static_cast(*y)); + } +} + +// For each pair of adjacent elements in |data|, use |f()| to extract a +// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate +// will be the time of the second element in the pair. +template +void AccumulatePairs( + rtc::FunctionView fx, + rtc::FunctionView(const DataType&, + const DataType&)> fy, + const IterableType& data, + TimeSeries* result) { + ResultType sum = 0; + for (size_t i = 1; i < data.size(); i++) { + float x = fx(data[i]); + absl::optional y = fy(data[i - 1], data[i]); + if (y) { + sum += *y; + result->points.emplace_back(x, static_cast(sum)); + } + } +} + +// Calculates a moving average of |data| and stores the result in a TimeSeries. +// A data point is generated every |step| microseconds from |begin_time| +// to |end_time|. The value of each data point is the average of the data +// during the preceding |window_duration_us| microseconds. +template +void MovingAverage( + rtc::FunctionView(const DataType&)> fy, + const IterableType& data_view, + AnalyzerConfig config, + TimeSeries* result) { + size_t window_index_begin = 0; + size_t window_index_end = 0; + ResultType sum_in_window = 0; + + for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_; + t += config.step_) { + while (window_index_end < data_view.size() && + data_view[window_index_end].log_time_us() < t) { + absl::optional value = fy(data_view[window_index_end]); + if (value) + sum_in_window += *value; + ++window_index_end; + } + while (window_index_begin < data_view.size() && + data_view[window_index_begin].log_time_us() < + t - config.window_duration_) { + absl::optional value = fy(data_view[window_index_begin]); + if (value) + sum_in_window -= *value; + ++window_index_begin; + } + float window_duration_s = + static_cast(config.window_duration_) / kNumMicrosecsPerSec; + float x = config.GetCallTimeSec(t); + float y = sum_in_window / window_duration_s; + result->points.emplace_back(x, y); + } +} + } // namespace webrtc #endif // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZER_COMMON_H_ diff --git a/rtc_tools/rtc_event_log_visualizer/main.cc b/rtc_tools/rtc_event_log_visualizer/main.cc index 42ee7e13dd..2aa1653a93 100644 --- a/rtc_tools/rtc_event_log_visualizer/main.cc +++ b/rtc_tools/rtc_event_log_visualizer/main.cc @@ -31,6 +31,7 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_tools/rtc_event_log_visualizer/alerts.h" +#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h" #include "rtc_tools/rtc_event_log_visualizer/analyzer.h" #include "rtc_tools/rtc_event_log_visualizer/plot_base.h" #include "rtc_tools/rtc_event_log_visualizer/plot_protobuf.h" @@ -436,22 +437,22 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("pacer_delay", [&](Plot* plot) { analyzer.CreatePacerDelayGraph(plot); }); plots.RegisterPlot("audio_encoder_bitrate", [&](Plot* plot) { - analyzer.CreateAudioEncoderTargetBitrateGraph(plot); + CreateAudioEncoderTargetBitrateGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_frame_length", [&](Plot* plot) { - analyzer.CreateAudioEncoderFrameLengthGraph(plot); + CreateAudioEncoderFrameLengthGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_packet_loss", [&](Plot* plot) { - analyzer.CreateAudioEncoderPacketLossGraph(plot); + CreateAudioEncoderPacketLossGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_fec", [&](Plot* plot) { - analyzer.CreateAudioEncoderEnableFecGraph(plot); + CreateAudioEncoderEnableFecGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_dtx", [&](Plot* plot) { - analyzer.CreateAudioEncoderEnableDtxGraph(plot); + CreateAudioEncoderEnableDtxGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_num_channels", [&](Plot* plot) { - analyzer.CreateAudioEncoderNumChannelsGraph(plot); + CreateAudioEncoderNumChannelsGraph(parsed_log, config, plot); }); plots.RegisterPlot("ice_candidate_pair_config", [&](Plot* plot) { @@ -474,14 +475,14 @@ int main(int argc, char* argv[]) { wav_path = webrtc::test::ResourcePath( "audio_processing/conversational_speech/EN_script2_F_sp2_B1", "wav"); } - absl::optional neteq_stats; + absl::optional neteq_stats; plots.RegisterPlot("simulated_neteq_expand_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.expand_rate / 16384.f; }, @@ -490,10 +491,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_speech_expand_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.speech_expand_rate / 16384.f; }, @@ -502,10 +503,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_accelerate_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.accelerate_rate / 16384.f; }, @@ -514,10 +515,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_preemptive_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.preemptive_rate / 16384.f; }, @@ -526,10 +527,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_packet_loss_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.packet_loss_rate / 16384.f; }, @@ -538,10 +539,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_concealment_events", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqLifetimeStatsGraph( - *neteq_stats, + webrtc::CreateNetEqLifetimeStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqLifetimeStatistics& stats) { return static_cast(stats.concealment_events); }, @@ -550,10 +551,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_preferred_buffer_size", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.preferred_buffer_size_ms; }, @@ -614,13 +615,13 @@ int main(int argc, char* argv[]) { if (absl::c_find(plot_flags, "simulated_neteq_jitter_buffer_delay") != plot_flags.end()) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - for (webrtc::EventLogAnalyzer::NetEqStatsGetterMap::const_iterator it = - neteq_stats->cbegin(); + for (webrtc::NetEqStatsGetterMap::const_iterator it = neteq_stats->cbegin(); it != neteq_stats->cend(); ++it) { - analyzer.CreateAudioJitterBufferGraph(it->first, it->second.get(), - collection->AppendNewPlot()); + webrtc::CreateAudioJitterBufferGraph(parsed_log, config, it->first, + it->second.get(), + collection->AppendNewPlot()); } }