Add support of negotiating Vp9 SVC in PC test framework.
SVC support is limited: During SVC testing there is no SFU, so framework will try to emulate SFU behavior in regular p2p call. Because of it there are such limitations: * if |target_spatial_index| is not equal to the highest spatial layer then no packet/frame drops are allowed. If there will be any drops, that will affect requested layer, then WebRTC SVC implementation will continue decoding only the highest available layer and won't restore lower layers, so analyzer won't receive required data which will cause wrong results or test failures. Bug: webrtc:10138 Change-Id: I079566260ca9f1815935bce365d1bca10766663a Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/144882 Commit-Queue: Artem Titov <titovartem@webrtc.org> Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Reviewed-by: Stefan Holmer <stefan@webrtc.org> Cr-Commit-Position: refs/heads/master@{#28612}
This commit is contained in:
@ -113,6 +113,18 @@ class PeerConnectionE2EQualityTestFixture {
|
||||
|
||||
enum VideoGeneratorType { kDefault, kI420A, kI010 };
|
||||
|
||||
// Config for Vp8 simulcast or Vp9 SVC testing.
|
||||
//
|
||||
// SVC support is limited:
|
||||
// During SVC testing there is no SFU, so framework will try to emulate SFU
|
||||
// behavior in regular p2p call. Because of it there are such limitations:
|
||||
// * if |target_spatial_index| is not equal to the highest spatial layer
|
||||
// then no packet/frame drops are allowed.
|
||||
//
|
||||
// If there will be any drops, that will affect requested layer, then
|
||||
// WebRTC SVC implementation will continue decoding only the highest
|
||||
// available layer and won't restore lower layers, so analyzer won't
|
||||
// receive required data which will cause wrong results or test failures.
|
||||
struct VideoSimulcastConfig {
|
||||
VideoSimulcastConfig(int simulcast_streams_count, int target_spatial_index)
|
||||
: simulcast_streams_count(simulcast_streams_count),
|
||||
|
@ -163,11 +163,13 @@ void DefaultVideoQualityAnalyzer::OnFrameEncoded(
|
||||
rtc::CritScope crit(&lock_);
|
||||
auto it = frame_stats_.find(frame_id);
|
||||
RTC_DCHECK(it != frame_stats_.end());
|
||||
RTC_DCHECK(it->second.encoded_time.IsInfinite())
|
||||
<< "Received multiple spatial layers for stream_label="
|
||||
<< it->second.stream_label;
|
||||
frame_counters_.encoded++;
|
||||
stream_frame_counters_[it->second.stream_label].encoded++;
|
||||
// For SVC we can receive multiple encoded images for one frame, so to cover
|
||||
// all cases we have to pick the last encode time.
|
||||
if (it->second.encoded_time.IsInfinite()) {
|
||||
// Increase counters only when we meet this frame first time.
|
||||
frame_counters_.encoded++;
|
||||
stream_frame_counters_[it->second.stream_label].encoded++;
|
||||
}
|
||||
it->second.encoded_time = Now();
|
||||
}
|
||||
|
||||
|
@ -290,12 +290,10 @@ bool QualityAnalyzingVideoEncoder::ShouldDiscard(
|
||||
absl::optional<int> required_spatial_index =
|
||||
stream_required_spatial_index_[stream_label];
|
||||
if (required_spatial_index) {
|
||||
RTC_CHECK(encoded_image.SpatialIndex())
|
||||
<< "Specific spatial layer/simulcast stream requested for track, but "
|
||||
"now spatial layers/simulcast streams produced by encoder. "
|
||||
"stream_label="
|
||||
<< stream_label
|
||||
<< "; required_spatial_index=" << *required_spatial_index;
|
||||
absl::optional<int> cur_spatial_index = encoded_image.SpatialIndex();
|
||||
if (!cur_spatial_index) {
|
||||
cur_spatial_index = 0;
|
||||
}
|
||||
RTC_CHECK(mode_ != SimulcastMode::kNormal)
|
||||
<< "Analyzing encoder is in kNormal "
|
||||
"mode, but spatial layer/simulcast "
|
||||
@ -303,21 +301,21 @@ bool QualityAnalyzingVideoEncoder::ShouldDiscard(
|
||||
if (mode_ == SimulcastMode::kSimulcast) {
|
||||
// In simulcast mode only encoded images with required spatial index are
|
||||
// interested, so all others have to be discarded.
|
||||
return *encoded_image.SpatialIndex() != *required_spatial_index;
|
||||
return *cur_spatial_index != *required_spatial_index;
|
||||
} else if (mode_ == SimulcastMode::kSVC) {
|
||||
// In SVC mode encoded images with spatial indexes that are equal or
|
||||
// less than required one are interesting, so all above have to be
|
||||
// discarded.
|
||||
return *encoded_image.SpatialIndex() > *required_spatial_index;
|
||||
return *cur_spatial_index > *required_spatial_index;
|
||||
} else if (mode_ == SimulcastMode::kKSVC) {
|
||||
// In KSVC mode for key frame encoded images with spatial indexes that
|
||||
// are equal or less than required one are interesting, so all above
|
||||
// have to be discarded. For other frames only required spatial index
|
||||
// is interesting, so all others have to be discarded.
|
||||
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
|
||||
return *encoded_image.SpatialIndex() > *required_spatial_index;
|
||||
return *cur_spatial_index > *required_spatial_index;
|
||||
} else {
|
||||
return *encoded_image.SpatialIndex() != *required_spatial_index;
|
||||
return *cur_spatial_index != *required_spatial_index;
|
||||
}
|
||||
} else {
|
||||
RTC_NOTREACHED() << "Unsupported encoder mode";
|
||||
|
@ -81,7 +81,7 @@ EncodedImageExtractionResult SingleProcessEncodedImageDataInjector::ExtractData(
|
||||
// Extract frame id from first 2 bytes starting from insertion pos.
|
||||
uint16_t next_id = buffer[insertion_pos] + (buffer[insertion_pos + 1] << 8);
|
||||
// Extract frame sub id from second 3 byte starting from insertion pos.
|
||||
uint16_t sub_id = buffer[insertion_pos + 2];
|
||||
uint8_t sub_id = buffer[insertion_pos + 2];
|
||||
RTC_CHECK(!id || *id == next_id)
|
||||
<< "Different frames encoded into single encoded image: " << *id
|
||||
<< " vs " << next_id;
|
||||
@ -102,7 +102,7 @@ EncodedImageExtractionResult SingleProcessEncodedImageDataInjector::ExtractData(
|
||||
extraction_infos.push_back(info);
|
||||
// We need to discard encoded image only if all concatenated encoded images
|
||||
// have to be discarded.
|
||||
discard = discard & info.discard;
|
||||
discard = discard && info.discard;
|
||||
if (pos < info.length) {
|
||||
break;
|
||||
}
|
||||
|
@ -211,5 +211,45 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Simulcast) {
|
||||
});
|
||||
}
|
||||
|
||||
// IOS debug builds can be quite slow, disabling to avoid issues with timeouts.
|
||||
#if defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64) && !defined(NDEBUG)
|
||||
#define MAYBE_Svc DISABLED_Svc
|
||||
#else
|
||||
#define MAYBE_Svc Svc
|
||||
#endif
|
||||
TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Svc) {
|
||||
RunParams run_params(TimeDelta::seconds(7));
|
||||
run_params.video_codec_name = cricket::kVp9CodecName;
|
||||
RunTest(
|
||||
"simulcast", run_params,
|
||||
[](PeerConfigurer* alice) {
|
||||
VideoConfig simulcast(1280, 720, 30);
|
||||
simulcast.stream_label = "alice-simulcast";
|
||||
// Because we have network with packets loss we can analyze only the
|
||||
// highest spatial layer in SVC mode.
|
||||
simulcast.simulcast_config = VideoSimulcastConfig(3, 2);
|
||||
alice->AddVideoConfig(std::move(simulcast));
|
||||
|
||||
AudioConfig audio;
|
||||
audio.stream_label = "alice-audio";
|
||||
audio.mode = AudioConfig::Mode::kFile;
|
||||
audio.input_file_name =
|
||||
test::ResourcePath("pc_quality_smoke_test_alice_source", "wav");
|
||||
alice->SetAudioConfig(std::move(audio));
|
||||
},
|
||||
[](PeerConfigurer* bob) {
|
||||
VideoConfig video(640, 360, 30);
|
||||
video.stream_label = "bob-video";
|
||||
bob->AddVideoConfig(std::move(video));
|
||||
|
||||
AudioConfig audio;
|
||||
audio.stream_label = "bob-audio";
|
||||
audio.mode = AudioConfig::Mode::kFile;
|
||||
audio.input_file_name =
|
||||
test::ResourcePath("pc_quality_smoke_test_bob_source", "wav");
|
||||
bob->SetAudioConfig(std::move(audio));
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace webrtc_pc_e2e
|
||||
} // namespace webrtc
|
||||
|
@ -521,9 +521,7 @@ void PeerConnectionE2EQualityTest::ValidateParams(const RunParams& run_params,
|
||||
}
|
||||
}
|
||||
if (video_config.simulcast_config) {
|
||||
// We support simulcast only for Vp8 for now.
|
||||
// RTC_CHECK_EQ(run_params.video_codec_name, cricket::kVp8CodecName);
|
||||
// Also we support simulcast only from caller.
|
||||
// We support simulcast only from caller.
|
||||
RTC_CHECK_EQ(i, 0)
|
||||
<< "Only simulcast stream from first peer is supported";
|
||||
}
|
||||
@ -616,13 +614,17 @@ void PeerConnectionE2EQualityTest::SetupCallOnSignalingThread(
|
||||
if (video_config.simulcast_config) {
|
||||
RtpTransceiverInit transceiver_params;
|
||||
transceiver_params.direction = RtpTransceiverDirection::kSendOnly;
|
||||
for (int i = 0;
|
||||
i < video_config.simulcast_config->simulcast_streams_count; ++i) {
|
||||
RtpEncodingParameters enc_params;
|
||||
// We need to be sure, that all rids will be unique with all mids.
|
||||
enc_params.rid = std::to_string(alice_transceivers_counter) + "000" +
|
||||
std::to_string(i);
|
||||
transceiver_params.send_encodings.push_back(enc_params);
|
||||
if (run_params.video_codec_name == cricket::kVp8CodecName) {
|
||||
// For Vp8 simulcast we need to add as many RtpEncodingParameters to the
|
||||
// track as many simulcast streams requested.
|
||||
for (int i = 0;
|
||||
i < video_config.simulcast_config->simulcast_streams_count; ++i) {
|
||||
RtpEncodingParameters enc_params;
|
||||
// We need to be sure, that all rids will be unique with all mids.
|
||||
enc_params.rid = std::to_string(alice_transceivers_counter) + "000" +
|
||||
std::to_string(i);
|
||||
transceiver_params.send_encodings.push_back(enc_params);
|
||||
}
|
||||
}
|
||||
RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>> result =
|
||||
alice_->AddTransceiver(cricket::MediaType::MEDIA_TYPE_VIDEO,
|
||||
@ -645,7 +647,7 @@ void PeerConnectionE2EQualityTest::SetupCallOnSignalingThread(
|
||||
SetPeerCodecPreferences(alice_.get(), run_params);
|
||||
SetPeerCodecPreferences(bob_.get(), run_params);
|
||||
|
||||
SetupCall();
|
||||
SetupCall(run_params);
|
||||
}
|
||||
|
||||
void PeerConnectionE2EQualityTest::TearDownCallOnSignalingThread() {
|
||||
@ -822,7 +824,7 @@ void PeerConnectionE2EQualityTest::SetPeerCodecPreferences(
|
||||
}
|
||||
}
|
||||
|
||||
void PeerConnectionE2EQualityTest::SetupCall() {
|
||||
void PeerConnectionE2EQualityTest::SetupCall(const RunParams& run_params) {
|
||||
std::map<std::string, VideoSimulcastConfig> stream_label_to_simulcast_config;
|
||||
// We add only Alice here, because simulcast/svc is supported only from the
|
||||
// first peer.
|
||||
@ -832,7 +834,8 @@ void PeerConnectionE2EQualityTest::SetupCall() {
|
||||
{*video_config.stream_label, *video_config.simulcast_config});
|
||||
}
|
||||
}
|
||||
PatchingParams patching_params(stream_label_to_simulcast_config);
|
||||
PatchingParams patching_params(run_params.video_codec_name,
|
||||
stream_label_to_simulcast_config);
|
||||
SignalingInterceptor signaling_interceptor(patching_params);
|
||||
// Connect peers.
|
||||
ExchangeOfferAnswer(&signaling_interceptor);
|
||||
|
@ -241,7 +241,7 @@ class PeerConnectionE2EQualityTest
|
||||
const VideoConfig& video_config);
|
||||
void MaybeAddAudio(TestPeer* peer);
|
||||
void SetPeerCodecPreferences(TestPeer* peer, const RunParams& run_params);
|
||||
void SetupCall();
|
||||
void SetupCall(const RunParams& run_params);
|
||||
void ExchangeOfferAnswer(SignalingInterceptor* signaling_interceptor);
|
||||
void ExchangeIceCandidates(SignalingInterceptor* signaling_interceptor);
|
||||
void StartVideo(
|
||||
|
@ -90,7 +90,8 @@ std::vector<RtpCodecCapability> FilterVideoCodecCapabilities(
|
||||
// If offer has simulcast video sections - for each section creates
|
||||
// SimulcastSectionInfo and put it into |context_|. Also will set conference
|
||||
// mode if requested.
|
||||
void SignalingInterceptor::FillContext(SessionDescriptionInterface* offer) {
|
||||
void SignalingInterceptor::FillSimulcastContext(
|
||||
SessionDescriptionInterface* offer) {
|
||||
for (auto& content : offer->description()->contents()) {
|
||||
context_.mids_order.push_back(content.mid());
|
||||
cricket::MediaContentDescription* media_desc = content.media_description();
|
||||
@ -160,7 +161,21 @@ void SignalingInterceptor::FillContext(SessionDescriptionInterface* offer) {
|
||||
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchOffer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer) {
|
||||
FillContext(offer.get());
|
||||
if (params_.video_codec_name == cricket::kVp8CodecName) {
|
||||
return PatchVp8Offer(std::move(offer));
|
||||
}
|
||||
|
||||
if (params_.video_codec_name == cricket::kVp9CodecName) {
|
||||
return PatchVp9Offer(std::move(offer));
|
||||
}
|
||||
|
||||
auto offer_for_remote = CloneSessionDescription(offer.get());
|
||||
return LocalAndRemoteSdp(std::move(offer), std::move(offer_for_remote));
|
||||
}
|
||||
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchVp8Offer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer) {
|
||||
FillSimulcastContext(offer.get());
|
||||
if (!context_.HasSimulcast()) {
|
||||
auto offer_for_remote = CloneSessionDescription(offer.get());
|
||||
return LocalAndRemoteSdp(std::move(offer), std::move(offer_for_remote));
|
||||
@ -267,24 +282,77 @@ LocalAndRemoteSdp SignalingInterceptor::PatchOffer(
|
||||
return LocalAndRemoteSdp(std::move(offer), std::move(patched_offer));
|
||||
}
|
||||
|
||||
std::unique_ptr<cricket::SessionDescription>
|
||||
SignalingInterceptor::RestoreMediaSectionsOrder(
|
||||
std::unique_ptr<cricket::SessionDescription> source) {
|
||||
std::unique_ptr<cricket::SessionDescription> out = source->Clone();
|
||||
for (auto& mid : context_.mids_order) {
|
||||
RTC_CHECK(out->RemoveContentByName(mid));
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchVp9Offer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer) {
|
||||
rtc::UniqueRandomIdGenerator ssrcs_generator;
|
||||
for (auto& content : offer->description()->contents()) {
|
||||
for (auto& stream : content.media_description()->streams()) {
|
||||
for (auto& ssrc : stream.ssrcs) {
|
||||
ssrcs_generator.AddKnownId(ssrc);
|
||||
}
|
||||
}
|
||||
}
|
||||
RTC_CHECK_EQ(out->contents().size(), 0);
|
||||
for (auto& mid : context_.mids_order) {
|
||||
cricket::ContentInfo* content = source->GetContentByName(mid);
|
||||
RTC_CHECK(content);
|
||||
out->AddContent(mid, content->type, content->media_description()->Clone());
|
||||
|
||||
for (auto& content : offer->description()->contents()) {
|
||||
if (content.media_description()->type() !=
|
||||
cricket::MediaType::MEDIA_TYPE_VIDEO) {
|
||||
// We are interested in only video tracks
|
||||
continue;
|
||||
}
|
||||
if (content.media_description()->direction() ==
|
||||
RtpTransceiverDirection::kRecvOnly) {
|
||||
// If direction is receive only, then there is no media in this track from
|
||||
// sender side, so we needn't to do anything with this track.
|
||||
continue;
|
||||
}
|
||||
RTC_CHECK(content.media_description()->streams().size() == 1);
|
||||
cricket::StreamParams& stream =
|
||||
content.media_description()->mutable_streams()[0];
|
||||
RTC_CHECK(stream.stream_ids().size() == 1)
|
||||
<< "Too many stream ids in video stream";
|
||||
std::string stream_id = stream.stream_ids()[0];
|
||||
|
||||
auto it = params_.stream_label_to_simulcast_config.find(stream_id);
|
||||
if (it == params_.stream_label_to_simulcast_config.end()) {
|
||||
continue;
|
||||
}
|
||||
int svc_layers_count = it->second.simulcast_streams_count;
|
||||
|
||||
RTC_CHECK(stream.has_ssrc_groups()) << "Only SVC with RTX is supported";
|
||||
RTC_CHECK(stream.ssrc_groups.size() == 1)
|
||||
<< "Too many ssrc groups in the track";
|
||||
std::vector<uint32_t> primary_ssrcs;
|
||||
stream.GetPrimarySsrcs(&primary_ssrcs);
|
||||
RTC_CHECK(primary_ssrcs.size() == 1);
|
||||
for (int i = 1; i < svc_layers_count; ++i) {
|
||||
uint32_t ssrc = ssrcs_generator.GenerateId();
|
||||
primary_ssrcs.push_back(ssrc);
|
||||
stream.add_ssrc(ssrc);
|
||||
stream.AddFidSsrc(ssrc, ssrcs_generator.GenerateId());
|
||||
}
|
||||
stream.ssrc_groups.push_back(
|
||||
cricket::SsrcGroup(cricket::kSimSsrcGroupSemantics, primary_ssrcs));
|
||||
}
|
||||
return out;
|
||||
auto offer_for_remote = CloneSessionDescription(offer.get());
|
||||
return LocalAndRemoteSdp(std::move(offer), std::move(offer_for_remote));
|
||||
}
|
||||
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchAnswer(
|
||||
std::unique_ptr<SessionDescriptionInterface> answer) {
|
||||
if (params_.video_codec_name == cricket::kVp8CodecName) {
|
||||
return PatchVp8Answer(std::move(answer));
|
||||
}
|
||||
|
||||
if (params_.video_codec_name == cricket::kVp9CodecName) {
|
||||
return PatchVp9Answer(std::move(answer));
|
||||
}
|
||||
|
||||
auto answer_for_remote = CloneSessionDescription(answer.get());
|
||||
return LocalAndRemoteSdp(std::move(answer), std::move(answer_for_remote));
|
||||
}
|
||||
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchVp8Answer(
|
||||
std::unique_ptr<SessionDescriptionInterface> answer) {
|
||||
if (!context_.HasSimulcast()) {
|
||||
auto answer_for_remote = CloneSessionDescription(answer.get());
|
||||
return LocalAndRemoteSdp(std::move(answer), std::move(answer_for_remote));
|
||||
@ -398,6 +466,28 @@ LocalAndRemoteSdp SignalingInterceptor::PatchAnswer(
|
||||
return LocalAndRemoteSdp(std::move(answer), std::move(patched_answer));
|
||||
}
|
||||
|
||||
std::unique_ptr<cricket::SessionDescription>
|
||||
SignalingInterceptor::RestoreMediaSectionsOrder(
|
||||
std::unique_ptr<cricket::SessionDescription> source) {
|
||||
std::unique_ptr<cricket::SessionDescription> out = source->Clone();
|
||||
for (auto& mid : context_.mids_order) {
|
||||
RTC_CHECK(out->RemoveContentByName(mid));
|
||||
}
|
||||
RTC_CHECK_EQ(out->contents().size(), 0);
|
||||
for (auto& mid : context_.mids_order) {
|
||||
cricket::ContentInfo* content = source->GetContentByName(mid);
|
||||
RTC_CHECK(content);
|
||||
out->AddContent(mid, content->type, content->media_description()->Clone());
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
LocalAndRemoteSdp SignalingInterceptor::PatchVp9Answer(
|
||||
std::unique_ptr<SessionDescriptionInterface> answer) {
|
||||
auto answer_for_remote = CloneSessionDescription(answer.get());
|
||||
return LocalAndRemoteSdp(std::move(answer), std::move(answer_for_remote));
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<IceCandidateInterface>>
|
||||
SignalingInterceptor::PatchOffererIceCandidates(
|
||||
rtc::ArrayView<const IceCandidateInterface* const> candidates) {
|
||||
|
@ -62,11 +62,14 @@ struct LocalAndRemoteSdp {
|
||||
|
||||
struct PatchingParams {
|
||||
PatchingParams(
|
||||
std::string video_codec_name,
|
||||
std::map<std::string,
|
||||
PeerConnectionE2EQualityTestFixture::VideoSimulcastConfig>
|
||||
stream_label_to_simulcast_config)
|
||||
: stream_label_to_simulcast_config(stream_label_to_simulcast_config) {}
|
||||
: video_codec_name(video_codec_name),
|
||||
stream_label_to_simulcast_config(stream_label_to_simulcast_config) {}
|
||||
|
||||
std::string video_codec_name;
|
||||
std::map<std::string,
|
||||
PeerConnectionE2EQualityTestFixture::VideoSimulcastConfig>
|
||||
stream_label_to_simulcast_config;
|
||||
@ -125,7 +128,16 @@ class SignalingInterceptor {
|
||||
std::vector<std::string> mids_order;
|
||||
};
|
||||
|
||||
void FillContext(SessionDescriptionInterface* offer);
|
||||
LocalAndRemoteSdp PatchVp8Offer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer);
|
||||
LocalAndRemoteSdp PatchVp9Offer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer);
|
||||
LocalAndRemoteSdp PatchVp8Answer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer);
|
||||
LocalAndRemoteSdp PatchVp9Answer(
|
||||
std::unique_ptr<SessionDescriptionInterface> offer);
|
||||
|
||||
void FillSimulcastContext(SessionDescriptionInterface* offer);
|
||||
std::unique_ptr<cricket::SessionDescription> RestoreMediaSectionsOrder(
|
||||
std::unique_ptr<cricket::SessionDescription> source);
|
||||
|
||||
|
Reference in New Issue
Block a user