Remove NetEq extra delay option.

Bug: b/156734419
Change-Id: I787e6961ad283990d633029c0cf296e10b825875
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/237403
Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org>
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Commit-Queue: Jakob Ivarsson <jakobi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35326}
This commit is contained in:
Jakob Ivarsson
2021-11-08 17:22:51 +01:00
committed by WebRTC LUCI CQ
parent fa68ac0c4e
commit 4a97d7281f
12 changed files with 15 additions and 358 deletions

View File

@ -11,8 +11,6 @@
#include "api/audio/audio_frame.h"
#include <string.h>
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
@ -24,28 +22,6 @@ AudioFrame::AudioFrame() {
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
void swap(AudioFrame& a, AudioFrame& b) {
using std::swap;
swap(a.timestamp_, b.timestamp_);
swap(a.elapsed_time_ms_, b.elapsed_time_ms_);
swap(a.ntp_time_ms_, b.ntp_time_ms_);
swap(a.samples_per_channel_, b.samples_per_channel_);
swap(a.sample_rate_hz_, b.sample_rate_hz_);
swap(a.num_channels_, b.num_channels_);
swap(a.channel_layout_, b.channel_layout_);
swap(a.speech_type_, b.speech_type_);
swap(a.vad_activity_, b.vad_activity_);
swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_);
swap(a.packet_infos_, b.packet_infos_);
const size_t length_a = a.samples_per_channel_ * a.num_channels_;
const size_t length_b = b.samples_per_channel_ * b.num_channels_;
RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples);
RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples);
std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_);
swap(a.muted_, b.muted_);
swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;

View File

@ -14,8 +14,6 @@
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
@ -60,8 +58,6 @@ class AudioFrame {
AudioFrame();
friend void swap(AudioFrame& a, AudioFrame& b);
// Resets all members to their default state.
void Reset();
// Same as Reset(), but leaves mute state unchanged. Muting a frame requires

View File

@ -133,54 +133,4 @@ TEST(AudioFrameTest, CopyFrom) {
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
}
TEST(AudioFrameTest, SwapFrames) {
AudioFrame frame1, frame2;
int16_t samples1[kNumChannelsMono * kSamplesPerChannel];
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
samples1[i] = i;
}
frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
frame1.set_absolute_capture_timestamp_ms(12345678);
const auto frame1_channel_layout = frame1.channel_layout();
int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)];
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
samples2[i] = 1000 + i;
}
frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1,
kSampleRateHz + 1, AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, kNumChannelsMono + 1);
const auto frame2_channel_layout = frame2.channel_layout();
swap(frame1, frame2);
EXPECT_EQ(kTimestamp + 1, frame1.timestamp_);
ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_);
EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_);
EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_);
ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_);
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
EXPECT_EQ(samples2[i], frame1.data()[i]);
}
EXPECT_FALSE(frame1.absolute_capture_timestamp_ms());
EXPECT_EQ(frame2_channel_layout, frame1.channel_layout());
EXPECT_EQ(kTimestamp, frame2.timestamp_);
ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_);
EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_);
ASSERT_EQ(kNumChannelsMono, frame2.num_channels_);
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
EXPECT_EQ(samples1[i], frame2.data()[i]);
}
EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms());
EXPECT_EQ(frame1_channel_layout, frame2.channel_layout());
}
} // namespace webrtc

View File

@ -30,8 +30,7 @@ std::string NetEq::Config::ToString() const {
<< ", min_delay_ms=" << min_delay_ms << ", enable_fast_accelerate="
<< (enable_fast_accelerate ? "true" : "false")
<< ", enable_muted_state=" << (enable_muted_state ? "true" : "false")
<< ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false")
<< ", extra_output_delay_ms=" << extra_output_delay_ms;
<< ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false");
return ss.str();
}

View File

@ -136,10 +136,6 @@ class NetEq {
bool enable_rtx_handling = false;
absl::optional<AudioCodecPairId> codec_pair_id;
bool for_test_no_time_stretching = false; // Use only for testing.
// Adds extra delay to the output of NetEq, without affecting jitter or
// loss behavior. This is mainly for testing. Value must be a non-negative
// multiple of 10 ms.
int extra_output_delay_ms = 0;
};
enum ReturnCodes { kOK = 0, kFail = -1 };

View File

@ -50,7 +50,6 @@
#include "rtc_base/strings/audio_format_to_string.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/clock.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc {
namespace {
@ -71,24 +70,6 @@ std::unique_ptr<NetEqController> CreateNetEqController(
return controller_factory.CreateNetEqController(config);
}
int GetDelayChainLengthMs(int config_extra_delay_ms) {
constexpr char kExtraDelayFieldTrial[] = "WebRTC-Audio-NetEqExtraDelay";
if (webrtc::field_trial::IsEnabled(kExtraDelayFieldTrial)) {
const auto field_trial_string =
webrtc::field_trial::FindFullName(kExtraDelayFieldTrial);
int extra_delay_ms = -1;
if (sscanf(field_trial_string.c_str(), "Enabled-%d", &extra_delay_ms) ==
1 &&
extra_delay_ms >= 0 && extra_delay_ms <= 2000) {
RTC_LOG(LS_INFO) << "Delay chain length set to " << extra_delay_ms
<< " ms in field trial";
return (extra_delay_ms / 10) * 10; // Rounding down to multiple of 10.
}
}
// Field trial not set, or invalid value read. Use value from config.
return config_extra_delay_ms;
}
} // namespace
NetEqImpl::Dependencies::Dependencies(
@ -154,10 +135,7 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
speech_expand_uma_logger_("WebRTC.Audio.SpeechExpandRatePercent",
10, // Report once every 10 s.
tick_timer_.get()),
no_time_stretching_(config.for_test_no_time_stretching),
output_delay_chain_ms_(
GetDelayChainLengthMs(config.extra_output_delay_ms)),
output_delay_chain_(rtc::CheckedDivExact(output_delay_chain_ms_, 10)) {
no_time_stretching_(config.for_test_no_time_stretching) {
RTC_LOG(LS_INFO) << "NetEq config: " << config.ToString();
int fs = config.sample_rate_hz;
if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) {
@ -275,27 +253,8 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
last_output_sample_rate_hz_ == 48000)
<< "Unexpected sample rate " << last_output_sample_rate_hz_;
if (!output_delay_chain_.empty()) {
if (output_delay_chain_empty_) {
for (auto& f : output_delay_chain_) {
f.CopyFrom(*audio_frame);
}
output_delay_chain_empty_ = false;
delayed_last_output_sample_rate_hz_ = last_output_sample_rate_hz_;
} else {
RTC_DCHECK_GE(output_delay_chain_ix_, 0);
RTC_DCHECK_LT(output_delay_chain_ix_, output_delay_chain_.size());
swap(output_delay_chain_[output_delay_chain_ix_], *audio_frame);
*muted = audio_frame->muted();
output_delay_chain_ix_ =
(output_delay_chain_ix_ + 1) % output_delay_chain_.size();
delayed_last_output_sample_rate_hz_ = audio_frame->sample_rate_hz();
}
}
if (current_sample_rate_hz) {
*current_sample_rate_hz = delayed_last_output_sample_rate_hz_.value_or(
last_output_sample_rate_hz_);
*current_sample_rate_hz = last_output_sample_rate_hz_;
}
return kOK;
@ -340,8 +299,7 @@ bool NetEqImpl::SetMinimumDelay(int delay_ms) {
MutexLock lock(&mutex_);
if (delay_ms >= 0 && delay_ms <= 10000) {
RTC_DCHECK(controller_.get());
return controller_->SetMinimumDelay(
std::max(delay_ms - output_delay_chain_ms_, 0));
return controller_->SetMinimumDelay(delay_ms);
}
return false;
}
@ -350,8 +308,7 @@ bool NetEqImpl::SetMaximumDelay(int delay_ms) {
MutexLock lock(&mutex_);
if (delay_ms >= 0 && delay_ms <= 10000) {
RTC_DCHECK(controller_.get());
return controller_->SetMaximumDelay(
std::max(delay_ms - output_delay_chain_ms_, 0));
return controller_->SetMaximumDelay(delay_ms);
}
return false;
}
@ -372,7 +329,7 @@ int NetEqImpl::GetBaseMinimumDelayMs() const {
int NetEqImpl::TargetDelayMs() const {
MutexLock lock(&mutex_);
RTC_DCHECK(controller_.get());
return controller_->TargetLevelMs() + output_delay_chain_ms_;
return controller_->TargetLevelMs();
}
int NetEqImpl::FilteredCurrentDelayMs() const {
@ -382,8 +339,7 @@ int NetEqImpl::FilteredCurrentDelayMs() const {
const int delay_samples =
controller_->GetFilteredBufferLevel() + sync_buffer_->FutureLength();
// The division below will truncate. The return value is in ms.
return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000) +
output_delay_chain_ms_;
return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000);
}
int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
@ -391,11 +347,6 @@ int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
RTC_DCHECK(decoder_database_.get());
*stats = CurrentNetworkStatisticsInternal();
stats_->GetNetworkStatistics(decoder_frame_length_, stats);
// Compensate for output delay chain.
stats->mean_waiting_time_ms += output_delay_chain_ms_;
stats->median_waiting_time_ms += output_delay_chain_ms_;
stats->min_waiting_time_ms += output_delay_chain_ms_;
stats->max_waiting_time_ms += output_delay_chain_ms_;
return 0;
}
@ -417,10 +368,6 @@ NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatisticsInternal() const {
RTC_DCHECK_GT(fs_hz_, 0);
stats.current_buffer_size_ms =
static_cast<uint16_t>(total_samples_in_buffers * 1000 / fs_hz_);
// Compensate for output delay chain.
stats.current_buffer_size_ms += output_delay_chain_ms_;
stats.preferred_buffer_size_ms += output_delay_chain_ms_;
return stats;
}
@ -464,19 +411,12 @@ absl::optional<uint32_t> NetEqImpl::GetPlayoutTimestamp() const {
// which is indicated by returning an empty value.
return absl::nullopt;
}
size_t sum_samples_in_output_delay_chain = 0;
for (const auto& audio_frame : output_delay_chain_) {
sum_samples_in_output_delay_chain += audio_frame.samples_per_channel();
}
return timestamp_scaler_->ToExternal(
playout_timestamp_ -
static_cast<uint32_t>(sum_samples_in_output_delay_chain));
return timestamp_scaler_->ToExternal(playout_timestamp_);
}
int NetEqImpl::last_output_sample_rate_hz() const {
MutexLock lock(&mutex_);
return delayed_last_output_sample_rate_hz_.value_or(
last_output_sample_rate_hz_);
return last_output_sample_rate_hz_;
}
absl::optional<NetEq::DecoderFormat> NetEqImpl::GetDecoderFormat(
@ -2072,9 +2012,8 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
extracted_samples = packet->timestamp - first_timestamp + packet_duration;
RTC_DCHECK(controller_);
stats_->JitterBufferDelay(
packet_duration, waiting_time_ms + output_delay_chain_ms_,
controller_->TargetLevelMs() + output_delay_chain_ms_);
stats_->JitterBufferDelay(packet_duration, waiting_time_ms,
controller_->TargetLevelMs());
packet_list->push_back(std::move(*packet)); // Store packet in list.
packet = absl::nullopt; // Ensure it's never used after the move.

View File

@ -399,22 +399,6 @@ class NetEqImpl : public webrtc::NetEq {
ExpandUmaLogger speech_expand_uma_logger_ RTC_GUARDED_BY(mutex_);
bool no_time_stretching_ RTC_GUARDED_BY(mutex_); // Only used for test.
rtc::BufferT<int16_t> concealment_audio_ RTC_GUARDED_BY(mutex_);
// Data members used for adding extra delay to the output of NetEq.
// The delay in ms (which is 10 times the number of elements in
// output_delay_chain_).
const int output_delay_chain_ms_ RTC_GUARDED_BY(mutex_);
// Vector of AudioFrames which contains the delayed audio. Accessed as a
// circular buffer.
std::vector<AudioFrame> output_delay_chain_ RTC_GUARDED_BY(mutex_);
// Index into output_delay_chain_.
size_t output_delay_chain_ix_ RTC_GUARDED_BY(mutex_) = 0;
// Did output_delay_chain_ get populated yet?
bool output_delay_chain_empty_ RTC_GUARDED_BY(mutex_) = true;
// Contains the sample rate of the AudioFrame last emitted from the delay
// chain. If the extra output delay chain is not used, or if no audio has been
// emitted yet, the variable is empty.
absl::optional<int> delayed_last_output_sample_rate_hz_
RTC_GUARDED_BY(mutex_);
private:
RTC_DISALLOW_COPY_AND_ASSIGN(NetEqImpl);

View File

@ -1105,186 +1105,5 @@ TEST(NetEqNoTimeStretchingMode, RunTest) {
EXPECT_EQ(0, stats.preemptive_rate);
}
namespace {
// Helper classes and data types and functions for NetEqOutputDelayTest.
class VectorAudioSink : public AudioSink {
public:
// Does not take ownership of the vector.
VectorAudioSink(std::vector<int16_t>* output_vector) : v_(output_vector) {}
virtual ~VectorAudioSink() = default;
bool WriteArray(const int16_t* audio, size_t num_samples) override {
v_->reserve(v_->size() + num_samples);
for (size_t i = 0; i < num_samples; ++i) {
v_->push_back(audio[i]);
}
return true;
}
private:
std::vector<int16_t>* const v_;
};
struct TestResult {
NetEqLifetimeStatistics lifetime_stats;
NetEqNetworkStatistics network_stats;
absl::optional<uint32_t> playout_timestamp;
int target_delay_ms;
int filtered_current_delay_ms;
int sample_rate_hz;
};
// This class is used as callback object to NetEqTest to collect some stats
// at the end of the simulation.
class SimEndStatsCollector : public NetEqSimulationEndedCallback {
public:
SimEndStatsCollector(TestResult& result) : result_(result) {}
void SimulationEnded(int64_t /*simulation_time_ms*/, NetEq* neteq) override {
result_.playout_timestamp = neteq->GetPlayoutTimestamp();
result_.target_delay_ms = neteq->TargetDelayMs();
result_.filtered_current_delay_ms = neteq->FilteredCurrentDelayMs();
result_.sample_rate_hz = neteq->last_output_sample_rate_hz();
}
private:
TestResult& result_;
};
TestResult DelayLineNetEqTest(int delay_ms,
std::vector<int16_t>* output_vector) {
NetEq::Config config;
config.for_test_no_time_stretching = true;
config.extra_output_delay_ms = delay_ms;
auto codecs = NetEqTest::StandardDecoderMap();
NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
{1, kRtpExtensionAudioLevel},
{3, kRtpExtensionAbsoluteSendTime},
{5, kRtpExtensionTransportSequenceNumber},
{7, kRtpExtensionVideoContentType},
{8, kRtpExtensionVideoTiming}};
std::unique_ptr<NetEqInput> input = std::make_unique<NetEqRtpDumpInput>(
webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
rtp_ext_map, absl::nullopt /*No SSRC filter*/);
std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
new TimeLimitedNetEqInput(std::move(input), 10000));
std::unique_ptr<AudioSink> output =
std::make_unique<VectorAudioSink>(output_vector);
TestResult result;
SimEndStatsCollector stats_collector(result);
NetEqTest::Callbacks callbacks;
callbacks.simulation_ended_callback = &stats_collector;
NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
/*text_log=*/nullptr, /*neteq_factory=*/nullptr,
/*input=*/std::move(input_time_limit), std::move(output),
callbacks);
test.Run();
result.lifetime_stats = test.LifetimeStats();
result.network_stats = test.SimulationStats();
return result;
}
} // namespace
// Tests the extra output delay functionality of NetEq.
TEST(NetEqOutputDelayTest, RunTest) {
std::vector<int16_t> output;
const auto result_no_delay = DelayLineNetEqTest(0, &output);
std::vector<int16_t> output_delayed;
constexpr int kDelayMs = 100;
const auto result_delay = DelayLineNetEqTest(kDelayMs, &output_delayed);
// Verify that the loss concealment remains unchanged. The point of the delay
// is to not affect the jitter buffering behavior.
// First verify that there are concealments in the test.
EXPECT_GT(result_no_delay.lifetime_stats.concealed_samples, 0u);
// And that not all of the output is concealment.
EXPECT_GT(result_no_delay.lifetime_stats.total_samples_received,
result_no_delay.lifetime_stats.concealed_samples);
// Now verify that they remain unchanged by the delay.
EXPECT_EQ(result_no_delay.lifetime_stats.concealed_samples,
result_delay.lifetime_stats.concealed_samples);
// Accelerate and pre-emptive expand should also be unchanged.
EXPECT_EQ(result_no_delay.lifetime_stats.inserted_samples_for_deceleration,
result_delay.lifetime_stats.inserted_samples_for_deceleration);
EXPECT_EQ(result_no_delay.lifetime_stats.removed_samples_for_acceleration,
result_delay.lifetime_stats.removed_samples_for_acceleration);
// Verify that delay stats are increased with the delay chain.
EXPECT_EQ(
result_no_delay.lifetime_stats.jitter_buffer_delay_ms +
kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
result_delay.lifetime_stats.jitter_buffer_delay_ms);
EXPECT_EQ(
result_no_delay.lifetime_stats.jitter_buffer_target_delay_ms +
kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
result_delay.lifetime_stats.jitter_buffer_target_delay_ms);
EXPECT_EQ(result_no_delay.network_stats.current_buffer_size_ms + kDelayMs,
result_delay.network_stats.current_buffer_size_ms);
EXPECT_EQ(result_no_delay.network_stats.preferred_buffer_size_ms + kDelayMs,
result_delay.network_stats.preferred_buffer_size_ms);
EXPECT_EQ(result_no_delay.network_stats.mean_waiting_time_ms + kDelayMs,
result_delay.network_stats.mean_waiting_time_ms);
EXPECT_EQ(result_no_delay.network_stats.median_waiting_time_ms + kDelayMs,
result_delay.network_stats.median_waiting_time_ms);
EXPECT_EQ(result_no_delay.network_stats.min_waiting_time_ms + kDelayMs,
result_delay.network_stats.min_waiting_time_ms);
EXPECT_EQ(result_no_delay.network_stats.max_waiting_time_ms + kDelayMs,
result_delay.network_stats.max_waiting_time_ms);
ASSERT_TRUE(result_no_delay.playout_timestamp);
ASSERT_TRUE(result_delay.playout_timestamp);
EXPECT_EQ(*result_no_delay.playout_timestamp -
static_cast<uint32_t>(
kDelayMs *
rtc::CheckedDivExact(result_no_delay.sample_rate_hz, 1000)),
*result_delay.playout_timestamp);
EXPECT_EQ(result_no_delay.target_delay_ms + kDelayMs,
result_delay.target_delay_ms);
EXPECT_EQ(result_no_delay.filtered_current_delay_ms + kDelayMs,
result_delay.filtered_current_delay_ms);
// Verify expected delay in decoded signal. The test vector uses 8 kHz sample
// rate, so the delay will be 8 times the delay in ms.
constexpr size_t kExpectedDelaySamples = kDelayMs * 8;
for (size_t i = 0;
i < output.size() && i + kExpectedDelaySamples < output_delayed.size();
++i) {
EXPECT_EQ(output[i], output_delayed[i + kExpectedDelaySamples]);
}
}
// Tests the extra output delay functionality of NetEq when configured via
// field trial.
TEST(NetEqOutputDelayTest, RunTestWithFieldTrial) {
test::ScopedFieldTrials field_trial(
"WebRTC-Audio-NetEqExtraDelay/Enabled-50/");
constexpr int kExpectedDelayMs = 50;
std::vector<int16_t> output;
const auto result = DelayLineNetEqTest(0, &output);
// The base delay values are taken from the resuts of the non-delayed case in
// NetEqOutputDelayTest.RunTest above.
EXPECT_EQ(20 + kExpectedDelayMs, result.target_delay_ms);
EXPECT_EQ(60 + kExpectedDelayMs, result.filtered_current_delay_ms);
}
// Set a non-multiple-of-10 value in the field trial, and verify that we don't
// crash, and that the result is rounded down.
TEST(NetEqOutputDelayTest, RunTestWithFieldTrialOddValue) {
test::ScopedFieldTrials field_trial(
"WebRTC-Audio-NetEqExtraDelay/Enabled-103/");
constexpr int kRoundedDelayMs = 100;
std::vector<int16_t> output;
const auto result = DelayLineNetEqTest(0, &output);
// The base delay values are taken from the resuts of the non-delayed case in
// NetEqOutputDelayTest.RunTest above.
EXPECT_EQ(20 + kRoundedDelayMs, result.target_delay_ms);
EXPECT_EQ(60 + kRoundedDelayMs, result.filtered_current_delay_ms);
}
} // namespace test
} // namespace webrtc

View File

@ -33,8 +33,7 @@ NetEqStatsPlotter::NetEqStatsPlotter(bool make_matlab_plot,
stats_getter_.reset(new NetEqStatsGetter(std::move(delay_analyzer)));
}
void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms,
NetEq* /*neteq*/) {
void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms) {
if (make_matlab_plot_) {
auto matlab_script_name = base_file_name_;
std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.',

View File

@ -28,7 +28,7 @@ class NetEqStatsPlotter : public NetEqSimulationEndedCallback {
bool show_concealment_events,
std::string base_file_name);
void SimulationEnded(int64_t simulation_time_ms, NetEq* neteq) override;
void SimulationEnded(int64_t simulation_time_ms) override;
NetEqStatsGetter* stats_getter() { return stats_getter_.get(); }

View File

@ -91,8 +91,7 @@ int64_t NetEqTest::Run() {
simulation_time += step_result.simulation_step_ms;
} while (!step_result.is_simulation_finished);
if (callbacks_.simulation_ended_callback) {
callbacks_.simulation_ended_callback->SimulationEnded(simulation_time,
neteq_.get());
callbacks_.simulation_ended_callback->SimulationEnded(simulation_time);
}
return simulation_time;
}

View File

@ -61,7 +61,7 @@ class NetEqGetAudioCallback {
class NetEqSimulationEndedCallback {
public:
virtual ~NetEqSimulationEndedCallback() = default;
virtual void SimulationEnded(int64_t simulation_time_ms, NetEq* neteq) = 0;
virtual void SimulationEnded(int64_t simulation_time_ms) = 0;
};
// Class that provides an input--output test for NetEq. The input (both packets