Files
platform-external-webrtc/video/stream_synchronization_unittest.cc
Shyam Sadhwani 986e745106 Fix for unbounded increase in audio delay when no audio packets are flowing in
WebRTC’s Audio Video sync can go in unbounded loop and keep on increasing audio delay if audio packets stop coming in.
The issue happens, if StreamSynchronization::ComputeDelays has:

1. relative_delay_ms = some positive value which causes avg_diff_ms_ > 30ms
2. current_audio_delay_ms < current_video_delay_ms
3. audio_delay_.extra_ms > 0 and video_delay_.extra_ms = 0

To compensate for relative delay, audio_delay_.extra_ms gets incremented every time StreamSynchronization::ComputeDelays is called by RtpStreamsSynchronizer::Process(), which happens every 1sec

RtpStreamsSynchronizer::Process()  will try to set the new delay to audio stream by calling syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms);

This ends up calling DelayManager::SetMinimumDelay and update minimum_delay_ms_

But this update has no impact on the value returned by NetEqImpl::FilteredCurrentDelayMs (as there are no audio packets flowing in, hence neteq is not running) which is called next time RtpStreamsSynchronizer::Process(), runs and tried to compute the new audio delay (audio_info→current_delay_ms)

This causes audio delay to be increased in every iteration and it grows unbounded. I guess it will stop growing above 10sec as that is hardcoded max delay in NetEQ.
To avoid this added a check to not adjust delays when no new audio stream has come in.

Bug: webrtc:11894
Change-Id: If648f9227e43c351f887d054876cb119cc1a917e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/183340
Reviewed-by: Åsa Persson <asapersson@webrtc.org>
Reviewed-by: Ivo Creusen <ivoc@webrtc.org>
Commit-Queue: Shyam Sadhwani <shyamsadhwani@fb.com>
Cr-Commit-Position: refs/heads/master@{#32106}
2020-09-15 15:54:54 +00:00

534 lines
23 KiB
C++

/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video/stream_synchronization.h"
#include <algorithm>
#include "system_wrappers/include/clock.h"
#include "system_wrappers/include/ntp_time.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
constexpr int kMaxChangeMs = 80; // From stream_synchronization.cc
constexpr int kDefaultAudioFrequency = 8000;
constexpr int kDefaultVideoFrequency = 90000;
constexpr int kSmoothingFilter = 4 * 2;
} // namespace
class StreamSynchronizationTest : public ::testing::Test {
public:
StreamSynchronizationTest()
: sync_(0, 0), clock_sender_(98765000), clock_receiver_(43210000) {}
protected:
// Generates the necessary RTCP measurements and RTP timestamps and computes
// the audio and video delays needed to get the two streams in sync.
// |audio_delay_ms| and |video_delay_ms| are the number of milliseconds after
// capture which the frames are received.
// |current_audio_delay_ms| is the number of milliseconds which audio is
// currently being delayed by the receiver.
bool DelayedStreams(int audio_delay_ms,
int video_delay_ms,
int current_audio_delay_ms,
int* total_audio_delay_ms,
int* total_video_delay_ms) {
int audio_frequency =
static_cast<int>(kDefaultAudioFrequency * audio_clock_drift_ + 0.5);
int video_frequency =
static_cast<int>(kDefaultVideoFrequency * video_clock_drift_ + 0.5);
// Generate NTP/RTP timestamp pair for both streams corresponding to RTCP.
bool new_sr;
StreamSynchronization::Measurements audio;
StreamSynchronization::Measurements video;
NtpTime ntp_time = clock_sender_.CurrentNtpTime();
uint32_t rtp_timestamp =
clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
EXPECT_TRUE(audio.rtp_to_ntp.UpdateMeasurements(
ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
clock_sender_.AdvanceTimeMilliseconds(100);
clock_receiver_.AdvanceTimeMilliseconds(100);
ntp_time = clock_sender_.CurrentNtpTime();
rtp_timestamp = clock_sender_.CurrentTime().ms() * video_frequency / 1000;
EXPECT_TRUE(video.rtp_to_ntp.UpdateMeasurements(
ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
clock_sender_.AdvanceTimeMilliseconds(900);
clock_receiver_.AdvanceTimeMilliseconds(900);
ntp_time = clock_sender_.CurrentNtpTime();
rtp_timestamp = clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
EXPECT_TRUE(audio.rtp_to_ntp.UpdateMeasurements(
ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
clock_sender_.AdvanceTimeMilliseconds(100);
clock_receiver_.AdvanceTimeMilliseconds(100);
ntp_time = clock_sender_.CurrentNtpTime();
rtp_timestamp = clock_sender_.CurrentTime().ms() * video_frequency / 1000;
EXPECT_TRUE(video.rtp_to_ntp.UpdateMeasurements(
ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
clock_sender_.AdvanceTimeMilliseconds(900);
clock_receiver_.AdvanceTimeMilliseconds(900);
// Capture an audio and a video frame at the same time.
audio.latest_timestamp =
clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
video.latest_timestamp =
clock_sender_.CurrentTime().ms() * video_frequency / 1000;
if (audio_delay_ms > video_delay_ms) {
// Audio later than video.
clock_receiver_.AdvanceTimeMilliseconds(video_delay_ms);
video.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
clock_receiver_.AdvanceTimeMilliseconds(audio_delay_ms - video_delay_ms);
audio.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
} else {
// Video later than audio.
clock_receiver_.AdvanceTimeMilliseconds(audio_delay_ms);
audio.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
clock_receiver_.AdvanceTimeMilliseconds(video_delay_ms - audio_delay_ms);
video.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
}
int relative_delay_ms;
EXPECT_TRUE(StreamSynchronization::ComputeRelativeDelay(
audio, video, &relative_delay_ms));
EXPECT_EQ(video_delay_ms - audio_delay_ms, relative_delay_ms);
return sync_.ComputeDelays(relative_delay_ms, current_audio_delay_ms,
total_audio_delay_ms, total_video_delay_ms);
}
// Simulate audio playback 300 ms after capture and video rendering 100 ms
// after capture. Verify that the correct extra delays are calculated for
// audio and video, and that they change correctly when we simulate that
// NetEQ or the VCM adds more delay to the streams.
void BothDelayedAudioLaterTest(int base_target_delay_ms) {
const int kAudioDelayMs = base_target_delay_ms + 300;
const int kVideoDelayMs = base_target_delay_ms + 100;
int current_audio_delay_ms = base_target_delay_ms;
int total_audio_delay_ms = 0;
int total_video_delay_ms = base_target_delay_ms;
int filtered_move = (kAudioDelayMs - kVideoDelayMs) / kSmoothingFilter;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Set new current delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
1000 - std::max(kAudioDelayMs, kVideoDelayMs));
// Simulate base_target_delay_ms minimum delay in the VCM.
total_video_delay_ms = base_target_delay_ms;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms + 2 * filtered_move, total_video_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Set new current delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
1000 - std::max(kAudioDelayMs, kVideoDelayMs));
// Simulate base_target_delay_ms minimum delay in the VCM.
total_video_delay_ms = base_target_delay_ms;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms + 3 * filtered_move, total_video_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Simulate that NetEQ introduces some audio delay.
const int kNeteqDelayIncrease = 50;
current_audio_delay_ms = base_target_delay_ms + kNeteqDelayIncrease;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
1000 - std::max(kAudioDelayMs, kVideoDelayMs));
// Simulate base_target_delay_ms minimum delay in the VCM.
total_video_delay_ms = base_target_delay_ms;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
filtered_move = 3 * filtered_move +
(kNeteqDelayIncrease + kAudioDelayMs - kVideoDelayMs) /
kSmoothingFilter;
EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Simulate that NetEQ reduces its delay.
const int kNeteqDelayDecrease = 10;
current_audio_delay_ms = base_target_delay_ms + kNeteqDelayDecrease;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
1000 - std::max(kAudioDelayMs, kVideoDelayMs));
// Simulate base_target_delay_ms minimum delay in the VCM.
total_video_delay_ms = base_target_delay_ms;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
filtered_move =
filtered_move + (kNeteqDelayDecrease + kAudioDelayMs - kVideoDelayMs) /
kSmoothingFilter;
EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
}
void BothDelayedVideoLaterTest(int base_target_delay_ms) {
const int kAudioDelayMs = base_target_delay_ms + 100;
const int kVideoDelayMs = base_target_delay_ms + 300;
int current_audio_delay_ms = base_target_delay_ms;
int total_audio_delay_ms = 0;
int total_video_delay_ms = base_target_delay_ms;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
// The audio delay is not allowed to change more than this.
EXPECT_GE(base_target_delay_ms + kMaxChangeMs, total_audio_delay_ms);
int last_total_audio_delay_ms = total_audio_delay_ms;
// Set new current audio delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(
current_audio_delay_ms,
base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Set new current audio delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(
current_audio_delay_ms,
base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
current_audio_delay_ms = base_target_delay_ms + 10;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(
current_audio_delay_ms,
base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
current_audio_delay_ms = base_target_delay_ms + 350;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(
current_audio_delay_ms,
base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
total_audio_delay_ms);
}
int MaxAudioDelayChangeMs(int current_audio_delay_ms, int delay_ms) const {
int diff_ms = (delay_ms - current_audio_delay_ms) / kSmoothingFilter;
diff_ms = std::min(diff_ms, kMaxChangeMs);
diff_ms = std::max(diff_ms, -kMaxChangeMs);
return diff_ms;
}
StreamSynchronization sync_;
SimulatedClock clock_sender_;
SimulatedClock clock_receiver_;
double audio_clock_drift_ = 1.0;
double video_clock_drift_ = 1.0;
};
TEST_F(StreamSynchronizationTest, NoDelay) {
int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
EXPECT_FALSE(DelayedStreams(/*audio_delay_ms=*/0, /*video_delay_ms=*/0,
/*current_audio_delay_ms=*/0,
&total_audio_delay_ms, &total_video_delay_ms));
EXPECT_EQ(0, total_audio_delay_ms);
EXPECT_EQ(0, total_video_delay_ms);
}
TEST_F(StreamSynchronizationTest, VideoDelayed) {
const int kAudioDelayMs = 200;
int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
/*current_audio_delay_ms=*/0,
&total_audio_delay_ms, &total_video_delay_ms));
EXPECT_EQ(0, total_audio_delay_ms);
// The delay is not allowed to change more than this.
EXPECT_EQ(kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
/*current_audio_delay_ms=*/0,
&total_audio_delay_ms, &total_video_delay_ms));
EXPECT_EQ(0, total_audio_delay_ms);
EXPECT_EQ(2 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
/*current_audio_delay_ms=*/0,
&total_audio_delay_ms, &total_video_delay_ms));
EXPECT_EQ(0, total_audio_delay_ms);
EXPECT_EQ(3 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
}
TEST_F(StreamSynchronizationTest, AudioDelayed) {
const int kVideoDelayMs = 200;
int current_audio_delay_ms = 0;
int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The delay is not allowed to change more than this.
EXPECT_EQ(kVideoDelayMs / kSmoothingFilter, total_audio_delay_ms);
int last_total_audio_delay_ms = total_audio_delay_ms;
// Set new current audio delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Set new current audio delay.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
current_audio_delay_ms = 10;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
current_audio_delay_ms = 350;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
total_audio_delay_ms);
}
TEST_F(StreamSynchronizationTest, NoAudioIncomingUnboundedIncrease) {
// Test how audio delay can grow unbounded when audio stops coming in.
// This is handled in caller of RtpStreamsSynchronizer, for example in
// RtpStreamsSynchronizer by not updating delays when audio samples stop
// coming in.
const int kVideoDelayMs = 300;
const int kAudioDelayMs = 100;
int current_audio_delay_ms = kAudioDelayMs;
int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The delay is not allowed to change more than this.
EXPECT_EQ((kVideoDelayMs - kAudioDelayMs) / kSmoothingFilter,
total_audio_delay_ms);
int last_total_audio_delay_ms = total_audio_delay_ms;
// Set new current audio delay: simulate audio samples are flowing in.
current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(1000);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
EXPECT_EQ(last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate no incoming audio by not update audio delay.
const int kSimulationSecs = 300; // 5min
const int kMaxDeltaDelayMs = 10000; // max delay for audio in webrtc
for (auto time_secs = 0; time_secs < kSimulationSecs; time_secs++) {
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(1000);
EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// Audio delay does not go above kMaxDeltaDelayMs.
EXPECT_EQ(std::min(kMaxDeltaDelayMs,
last_total_audio_delay_ms +
MaxAudioDelayChangeMs(current_audio_delay_ms,
kVideoDelayMs)),
total_audio_delay_ms);
last_total_audio_delay_ms = total_audio_delay_ms;
}
// By now the audio delay has grown unbounded to kMaxDeltaDelayMs.
EXPECT_EQ(kMaxDeltaDelayMs, last_total_audio_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
BothDelayedVideoLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterAudioClockDrift) {
audio_clock_drift_ = 1.05;
BothDelayedVideoLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterVideoClockDrift) {
video_clock_drift_ = 1.05;
BothDelayedVideoLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioLater) {
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDrift) {
audio_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
video_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothEquallyDelayed) {
const int kDelayMs = 2000;
int current_audio_delay_ms = kDelayMs;
int total_audio_delay_ms = 0;
int total_video_delay_ms = kDelayMs;
// In sync, expect no change.
EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
&total_audio_delay_ms, &total_video_delay_ms));
// Trigger another call with the same values, delay should not be modified.
total_video_delay_ms = kDelayMs;
EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
&total_audio_delay_ms, &total_video_delay_ms));
// Change delay value, delay should not be modified.
const int kDelayMs2 = 5000;
current_audio_delay_ms = kDelayMs2;
total_video_delay_ms = kDelayMs2;
EXPECT_FALSE(DelayedStreams(kDelayMs2, kDelayMs2, current_audio_delay_ms,
&total_audio_delay_ms, &total_video_delay_ms));
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
const int kBaseTargetDelayMs = 3000;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
const int kBaseTargetDelayMs = 3000;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
audio_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
const int kBaseTargetDelayMs = 3000;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
video_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
const int kBaseTargetDelayMs = 2000;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
const int kBaseTargetDelayMs = 2000;
audio_clock_drift_ = 1.05;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
const int kBaseTargetDelayMs = 2000;
video_clock_drift_ = 1.05;
sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
} // namespace webrtc