Remove RTPVideoHeader::vp9() accessors.
TBR=stefan@webrtc.org Bug: none Change-Id: Ia2f728ea3377754a16a0b081e25c4479fe211b3e Reviewed-on: https://webrtc-review.googlesource.com/93024 Commit-Queue: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#24243}
This commit is contained in:
@ -33,39 +33,40 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
|
||||
return;
|
||||
}
|
||||
case kVideoCodecVP9: {
|
||||
rtp->vp9().InitRTPVideoHeaderVP9();
|
||||
rtp->vp9().inter_pic_predicted =
|
||||
auto& vp9_header = rtp->video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
vp9_header.InitRTPVideoHeaderVP9();
|
||||
vp9_header.inter_pic_predicted =
|
||||
info.codecSpecific.VP9.inter_pic_predicted;
|
||||
rtp->vp9().flexible_mode = info.codecSpecific.VP9.flexible_mode;
|
||||
rtp->vp9().ss_data_available = info.codecSpecific.VP9.ss_data_available;
|
||||
rtp->vp9().non_ref_for_inter_layer_pred =
|
||||
vp9_header.flexible_mode = info.codecSpecific.VP9.flexible_mode;
|
||||
vp9_header.ss_data_available = info.codecSpecific.VP9.ss_data_available;
|
||||
vp9_header.non_ref_for_inter_layer_pred =
|
||||
info.codecSpecific.VP9.non_ref_for_inter_layer_pred;
|
||||
rtp->vp9().temporal_idx = info.codecSpecific.VP9.temporal_idx;
|
||||
rtp->vp9().spatial_idx = info.codecSpecific.VP9.spatial_idx;
|
||||
rtp->vp9().temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
|
||||
rtp->vp9().inter_layer_predicted =
|
||||
vp9_header.temporal_idx = info.codecSpecific.VP9.temporal_idx;
|
||||
vp9_header.spatial_idx = info.codecSpecific.VP9.spatial_idx;
|
||||
vp9_header.temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
|
||||
vp9_header.inter_layer_predicted =
|
||||
info.codecSpecific.VP9.inter_layer_predicted;
|
||||
rtp->vp9().gof_idx = info.codecSpecific.VP9.gof_idx;
|
||||
rtp->vp9().num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
|
||||
vp9_header.gof_idx = info.codecSpecific.VP9.gof_idx;
|
||||
vp9_header.num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
|
||||
|
||||
if (info.codecSpecific.VP9.ss_data_available) {
|
||||
rtp->vp9().spatial_layer_resolution_present =
|
||||
vp9_header.spatial_layer_resolution_present =
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present;
|
||||
if (info.codecSpecific.VP9.spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < info.codecSpecific.VP9.num_spatial_layers;
|
||||
++i) {
|
||||
rtp->vp9().width[i] = info.codecSpecific.VP9.width[i];
|
||||
rtp->vp9().height[i] = info.codecSpecific.VP9.height[i];
|
||||
vp9_header.width[i] = info.codecSpecific.VP9.width[i];
|
||||
vp9_header.height[i] = info.codecSpecific.VP9.height[i];
|
||||
}
|
||||
}
|
||||
rtp->vp9().gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
|
||||
vp9_header.gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
|
||||
}
|
||||
|
||||
rtp->vp9().num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
|
||||
vp9_header.num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
|
||||
for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) {
|
||||
rtp->vp9().pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
|
||||
vp9_header.pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
|
||||
}
|
||||
rtp->vp9().end_of_picture = info.codecSpecific.VP9.end_of_picture;
|
||||
vp9_header.end_of_picture = info.codecSpecific.VP9.end_of_picture;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264: {
|
||||
@ -161,19 +162,21 @@ void RtpPayloadParams::Set(RTPVideoHeader* rtp_video_header,
|
||||
}
|
||||
}
|
||||
if (rtp_video_header->codec == kVideoCodecVP9) {
|
||||
rtp_video_header->vp9().picture_id = state_.picture_id;
|
||||
auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(rtp_video_header->video_type_header);
|
||||
vp9_header.picture_id = state_.picture_id;
|
||||
|
||||
// Note that in the case that we have no temporal layers but we do have
|
||||
// spatial layers, packets will carry layering info with a temporal_idx of
|
||||
// zero, and we then have to set and increment tl0_pic_idx.
|
||||
if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
|
||||
rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
|
||||
if (vp9_header.temporal_idx != kNoTemporalIdx ||
|
||||
vp9_header.spatial_idx != kNoSpatialIdx) {
|
||||
if (first_frame_in_picture &&
|
||||
(rtp_video_header->vp9().temporal_idx == 0 ||
|
||||
rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
|
||||
(vp9_header.temporal_idx == 0 ||
|
||||
vp9_header.temporal_idx == kNoTemporalIdx)) {
|
||||
++state_.tl0_pic_idx;
|
||||
}
|
||||
rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
|
||||
vp9_header.tl0_pic_idx = state_.tl0_pic_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -83,14 +83,15 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
EXPECT_EQ(kVideoRotation_90, header.rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header.vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header.vp9().num_spatial_layers,
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(header.video_type_header);
|
||||
EXPECT_EQ(kPictureId + 1, vp9_header.picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, vp9_header.tl0_pic_idx);
|
||||
EXPECT_EQ(vp9_header.temporal_idx, codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(vp9_header.spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(vp9_header.num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header.vp9().end_of_picture,
|
||||
EXPECT_EQ(vp9_header.end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
|
||||
// Next spatial layer.
|
||||
@ -103,14 +104,13 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
EXPECT_EQ(kVideoRotation_90, header.rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header.vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header.vp9().num_spatial_layers,
|
||||
EXPECT_EQ(kPictureId + 1, vp9_header.picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, vp9_header.tl0_pic_idx);
|
||||
EXPECT_EQ(vp9_header.temporal_idx, codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(vp9_header.spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(vp9_header.num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header.vp9().end_of_picture,
|
||||
EXPECT_EQ(vp9_header.end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
}
|
||||
|
||||
@ -226,8 +226,10 @@ TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, header.vp9().tl0_pic_idx);
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(header.video_type_header);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, vp9_header.picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, vp9_header.tl0_pic_idx);
|
||||
|
||||
// OnEncodedImage, temporalIdx: 0.
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 0;
|
||||
@ -235,8 +237,8 @@ TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, vp9_header.picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp9_header.tl0_pic_idx);
|
||||
|
||||
// OnEncodedImage, first_frame_in_picture = false
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
@ -244,8 +246,8 @@ TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, vp9_header.picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp9_header.tl0_pic_idx);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
|
||||
|
@ -34,9 +34,12 @@ RtpPacketizer* RtpPacketizer::Create(VideoCodecType type,
|
||||
case kVideoCodecVP8:
|
||||
return new RtpPacketizerVp8(rtp_video_header->vp8(), max_payload_len,
|
||||
last_packet_reduction_len);
|
||||
case kVideoCodecVP9:
|
||||
return new RtpPacketizerVp9(rtp_video_header->vp9(), max_payload_len,
|
||||
case kVideoCodecVP9: {
|
||||
const auto& vp9 =
|
||||
absl::get<RTPVideoHeaderVP9>(rtp_video_header->video_type_header);
|
||||
return new RtpPacketizerVp9(vp9, max_payload_len,
|
||||
last_packet_reduction_len);
|
||||
}
|
||||
case kVideoCodecGeneric:
|
||||
return new RtpPacketizerGeneric(frame_type, max_payload_len,
|
||||
last_packet_reduction_len);
|
||||
|
@ -719,41 +719,42 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
|
||||
|
||||
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
|
||||
|
||||
RTPVideoHeaderVP9* vp9 = &parsed_payload->video_header().vp9();
|
||||
vp9->InitRTPVideoHeaderVP9();
|
||||
vp9->inter_pic_predicted = p_bit ? true : false;
|
||||
vp9->flexible_mode = f_bit ? true : false;
|
||||
vp9->beginning_of_frame = b_bit ? true : false;
|
||||
vp9->end_of_frame = e_bit ? true : false;
|
||||
vp9->ss_data_available = v_bit ? true : false;
|
||||
vp9->non_ref_for_inter_layer_pred = z_bit ? true : false;
|
||||
auto& vp9_header = parsed_payload->video_header()
|
||||
.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
vp9_header.InitRTPVideoHeaderVP9();
|
||||
vp9_header.inter_pic_predicted = p_bit ? true : false;
|
||||
vp9_header.flexible_mode = f_bit ? true : false;
|
||||
vp9_header.beginning_of_frame = b_bit ? true : false;
|
||||
vp9_header.end_of_frame = e_bit ? true : false;
|
||||
vp9_header.ss_data_available = v_bit ? true : false;
|
||||
vp9_header.non_ref_for_inter_layer_pred = z_bit ? true : false;
|
||||
|
||||
// Parse fields that are present.
|
||||
if (i_bit && !ParsePictureId(&parser, vp9)) {
|
||||
if (i_bit && !ParsePictureId(&parser, &vp9_header)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed parsing VP9 picture id.";
|
||||
return false;
|
||||
}
|
||||
if (l_bit && !ParseLayerInfo(&parser, vp9)) {
|
||||
if (l_bit && !ParseLayerInfo(&parser, &vp9_header)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed parsing VP9 layer info.";
|
||||
return false;
|
||||
}
|
||||
if (p_bit && f_bit && !ParseRefIndices(&parser, vp9)) {
|
||||
if (p_bit && f_bit && !ParseRefIndices(&parser, &vp9_header)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed parsing VP9 ref indices.";
|
||||
return false;
|
||||
}
|
||||
if (v_bit) {
|
||||
if (!ParseSsData(&parser, vp9)) {
|
||||
if (!ParseSsData(&parser, &vp9_header)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed parsing VP9 SS data.";
|
||||
return false;
|
||||
}
|
||||
if (vp9->spatial_layer_resolution_present) {
|
||||
if (vp9_header.spatial_layer_resolution_present) {
|
||||
// TODO(asapersson): Add support for spatial layers.
|
||||
parsed_payload->video_header().width = vp9->width[0];
|
||||
parsed_payload->video_header().height = vp9->height[0];
|
||||
parsed_payload->video_header().width = vp9_header.width[0];
|
||||
parsed_payload->video_header().height = vp9_header.height[0];
|
||||
}
|
||||
}
|
||||
parsed_payload->video_header().is_first_packet_in_frame =
|
||||
b_bit && (!l_bit || !vp9->inter_layer_predicted);
|
||||
b_bit && (!l_bit || !vp9_header.inter_layer_predicted);
|
||||
|
||||
uint64_t rem_bits = parser.RemainingBitCount();
|
||||
assert(rem_bits % 8 == 0);
|
||||
|
@ -82,7 +82,9 @@ void ParseAndCheckPacket(const uint8_t* packet,
|
||||
RtpDepacketizer::ParsedPayload parsed;
|
||||
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
|
||||
EXPECT_EQ(kVideoCodecVP9, parsed.video_header().codec);
|
||||
VerifyHeader(expected, parsed.video_header().vp9());
|
||||
auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(parsed.video_header().video_type_header);
|
||||
VerifyHeader(expected, vp9_header);
|
||||
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
|
||||
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
|
||||
}
|
||||
|
@ -1891,8 +1891,9 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) {
|
||||
RTPVideoHeader header;
|
||||
header.codec = kVideoCodecVP9;
|
||||
|
||||
auto& vp9_header = header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
|
||||
header.vp9().temporal_idx = tid;
|
||||
vp9_header.temporal_idx = tid;
|
||||
|
||||
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
|
||||
header, kRetransmitOff,
|
||||
|
@ -471,7 +471,8 @@ uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
|
||||
case kVideoCodecVP8:
|
||||
return header.vp8().temporalIdx;
|
||||
case kVideoCodecVP9:
|
||||
return header.vp9().temporal_idx;
|
||||
return absl::get<RTPVideoHeaderVP9>(header.video_type_header)
|
||||
.temporal_idx;
|
||||
default:
|
||||
return kNoTemporalIdx;
|
||||
}
|
||||
|
@ -44,20 +44,6 @@ struct RTPVideoHeader {
|
||||
|
||||
return absl::get<RTPVideoHeaderVP8>(video_type_header);
|
||||
}
|
||||
// TODO(philipel): Remove when downstream projects have been updated.
|
||||
RTPVideoHeaderVP9& vp9() {
|
||||
if (!absl::holds_alternative<RTPVideoHeaderVP9>(video_type_header))
|
||||
video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
||||
return absl::get<RTPVideoHeaderVP9>(video_type_header);
|
||||
}
|
||||
// TODO(philipel): Remove when downstream projects have been updated.
|
||||
const RTPVideoHeaderVP9& vp9() const {
|
||||
if (!absl::holds_alternative<RTPVideoHeaderVP9>(video_type_header))
|
||||
video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
||||
return absl::get<RTPVideoHeaderVP9>(video_type_header);
|
||||
}
|
||||
|
||||
// Information for generic codec descriptor.
|
||||
int64_t frame_id = 0;
|
||||
|
@ -458,7 +458,8 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
|
||||
packet.dataPtr = data;
|
||||
packet.video_header.codec = kVideoCodecVP9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
|
||||
auto& vp9_hdr =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
@ -501,7 +502,8 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
|
||||
packet.dataPtr = data;
|
||||
packet.video_header.codec = kVideoCodecVP9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
|
||||
auto& vp9_hdr =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
@ -556,7 +558,8 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
||||
packet.dataPtr = data;
|
||||
packet.video_header.codec = kVideoCodecVP9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
|
||||
auto& vp9_hdr =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
|
@ -77,6 +77,8 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
|
||||
break;
|
||||
}
|
||||
case kVideoCodecVP9: {
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(header->video_type_header);
|
||||
if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
|
||||
// This is the first packet for this frame.
|
||||
_codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
|
||||
@ -86,48 +88,48 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
|
||||
_codecSpecificInfo.codecType = kVideoCodecVP9;
|
||||
}
|
||||
_codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
|
||||
header->vp9().inter_pic_predicted;
|
||||
vp9_header.inter_pic_predicted;
|
||||
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
|
||||
header->vp9().flexible_mode;
|
||||
vp9_header.flexible_mode;
|
||||
_codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
|
||||
header->vp9().num_ref_pics;
|
||||
for (uint8_t r = 0; r < header->vp9().num_ref_pics; ++r) {
|
||||
vp9_header.num_ref_pics;
|
||||
for (uint8_t r = 0; r < vp9_header.num_ref_pics; ++r) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
|
||||
header->vp9().pid_diff[r];
|
||||
vp9_header.pid_diff[r];
|
||||
}
|
||||
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
|
||||
header->vp9().ss_data_available;
|
||||
if (header->vp9().temporal_idx != kNoTemporalIdx) {
|
||||
vp9_header.ss_data_available;
|
||||
if (vp9_header.temporal_idx != kNoTemporalIdx) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.temporal_idx =
|
||||
header->vp9().temporal_idx;
|
||||
vp9_header.temporal_idx;
|
||||
_codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
|
||||
header->vp9().temporal_up_switch;
|
||||
vp9_header.temporal_up_switch;
|
||||
}
|
||||
if (header->vp9().spatial_idx != kNoSpatialIdx) {
|
||||
if (vp9_header.spatial_idx != kNoSpatialIdx) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.spatial_idx =
|
||||
header->vp9().spatial_idx;
|
||||
vp9_header.spatial_idx;
|
||||
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
|
||||
header->vp9().inter_layer_predicted;
|
||||
vp9_header.inter_layer_predicted;
|
||||
}
|
||||
if (header->vp9().gof_idx != kNoGofIdx) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.gof_idx = header->vp9().gof_idx;
|
||||
if (vp9_header.gof_idx != kNoGofIdx) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
|
||||
}
|
||||
if (header->vp9().ss_data_available) {
|
||||
if (vp9_header.ss_data_available) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
|
||||
header->vp9().num_spatial_layers;
|
||||
vp9_header.num_spatial_layers;
|
||||
_codecSpecificInfo.codecSpecific.VP9
|
||||
.spatial_layer_resolution_present =
|
||||
header->vp9().spatial_layer_resolution_present;
|
||||
if (header->vp9().spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < header->vp9().num_spatial_layers; ++i) {
|
||||
vp9_header.spatial_layer_resolution_present;
|
||||
if (vp9_header.spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < vp9_header.num_spatial_layers; ++i) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.width[i] =
|
||||
header->vp9().width[i];
|
||||
vp9_header.width[i];
|
||||
_codecSpecificInfo.codecSpecific.VP9.height[i] =
|
||||
header->vp9().height[i];
|
||||
vp9_header.height[i];
|
||||
}
|
||||
}
|
||||
_codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
|
||||
header->vp9().gof);
|
||||
vp9_header.gof);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -127,10 +127,12 @@ Vp9SsMap::Vp9SsMap() {}
|
||||
Vp9SsMap::~Vp9SsMap() {}
|
||||
|
||||
bool Vp9SsMap::Insert(const VCMPacket& packet) {
|
||||
if (!packet.video_header.vp9().ss_data_available)
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(packet.video_header.video_type_header);
|
||||
if (!vp9_header.ss_data_available)
|
||||
return false;
|
||||
|
||||
ss_map_[packet.timestamp] = packet.video_header.vp9().gof;
|
||||
ss_map_[packet.timestamp] = vp9_header.gof;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -178,7 +180,9 @@ void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
|
||||
|
||||
// TODO(asapersson): Update according to updates in RTP payload profile.
|
||||
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
|
||||
uint8_t gof_idx = packet->video_header.vp9().gof_idx;
|
||||
auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(packet->video_header.video_type_header);
|
||||
uint8_t gof_idx = vp9_header.gof_idx;
|
||||
if (gof_idx == kNoGofIdx)
|
||||
return false; // No update needed.
|
||||
|
||||
@ -189,14 +193,13 @@ bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
|
||||
if (gof_idx >= it->second.num_frames_in_gof)
|
||||
return false; // Assume corresponding SS not yet received.
|
||||
|
||||
RTPVideoHeaderVP9* vp9 = &packet->video_header.vp9();
|
||||
vp9->temporal_idx = it->second.temporal_idx[gof_idx];
|
||||
vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx];
|
||||
vp9_header.temporal_idx = it->second.temporal_idx[gof_idx];
|
||||
vp9_header.temporal_up_switch = it->second.temporal_up_switch[gof_idx];
|
||||
|
||||
// TODO(asapersson): Set vp9.ref_picture_id[i] and add usage.
|
||||
vp9->num_ref_pics = it->second.num_ref_pics[gof_idx];
|
||||
vp9_header.num_ref_pics = it->second.num_ref_pics[gof_idx];
|
||||
for (uint8_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
|
||||
vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i];
|
||||
vp9_header.pid_diff[i] = it->second.pid_diff[gof_idx][i];
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -41,6 +41,8 @@ class Vp9SsMapTest : public ::testing::Test {
|
||||
Vp9SsMapTest() : packet_() {}
|
||||
|
||||
virtual void SetUp() {
|
||||
auto& vp9_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
packet_.is_first_packet_in_frame = true;
|
||||
packet_.dataPtr = data_;
|
||||
packet_.sizeBytes = 1400;
|
||||
@ -50,12 +52,12 @@ class Vp9SsMapTest : public ::testing::Test {
|
||||
packet_.frameType = kVideoFrameKey;
|
||||
packet_.codec = kVideoCodecVP9;
|
||||
packet_.video_header.codec = kVideoCodecVP9;
|
||||
packet_.video_header.vp9().flexible_mode = false;
|
||||
packet_.video_header.vp9().gof_idx = 0;
|
||||
packet_.video_header.vp9().temporal_idx = kNoTemporalIdx;
|
||||
packet_.video_header.vp9().temporal_up_switch = false;
|
||||
packet_.video_header.vp9().ss_data_available = true;
|
||||
packet_.video_header.vp9().gof.SetGofInfoVP9(
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.gof_idx = 0;
|
||||
vp9_header.temporal_idx = kNoTemporalIdx;
|
||||
vp9_header.temporal_up_switch = false;
|
||||
vp9_header.ss_data_available = true;
|
||||
vp9_header.gof.SetGofInfoVP9(
|
||||
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
|
||||
}
|
||||
|
||||
@ -69,7 +71,8 @@ TEST_F(Vp9SsMapTest, Insert) {
|
||||
}
|
||||
|
||||
TEST_F(Vp9SsMapTest, Insert_NoSsData) {
|
||||
packet_.video_header.vp9().ss_data_available = false;
|
||||
absl::get<RTPVideoHeaderVP9>(packet_.video_header.video_type_header)
|
||||
.ss_data_available = false;
|
||||
EXPECT_FALSE(map_.Insert(packet_));
|
||||
}
|
||||
|
||||
@ -146,52 +149,57 @@ TEST_F(Vp9SsMapTest, RemoveOld_WithWrap) {
|
||||
}
|
||||
|
||||
TEST_F(Vp9SsMapTest, UpdatePacket_NoSsData) {
|
||||
packet_.video_header.vp9().gof_idx = 0;
|
||||
absl::get<RTPVideoHeaderVP9>(packet_.video_header.video_type_header).gof_idx =
|
||||
0;
|
||||
EXPECT_FALSE(map_.UpdatePacket(&packet_));
|
||||
}
|
||||
|
||||
TEST_F(Vp9SsMapTest, UpdatePacket_NoGofIdx) {
|
||||
EXPECT_TRUE(map_.Insert(packet_));
|
||||
packet_.video_header.vp9().gof_idx = kNoGofIdx;
|
||||
absl::get<RTPVideoHeaderVP9>(packet_.video_header.video_type_header).gof_idx =
|
||||
kNoGofIdx;
|
||||
EXPECT_FALSE(map_.UpdatePacket(&packet_));
|
||||
}
|
||||
|
||||
TEST_F(Vp9SsMapTest, UpdatePacket_InvalidGofIdx) {
|
||||
EXPECT_TRUE(map_.Insert(packet_));
|
||||
packet_.video_header.vp9().gof_idx = 4;
|
||||
absl::get<RTPVideoHeaderVP9>(packet_.video_header.video_type_header).gof_idx =
|
||||
4;
|
||||
EXPECT_FALSE(map_.UpdatePacket(&packet_));
|
||||
}
|
||||
|
||||
TEST_F(Vp9SsMapTest, UpdatePacket) {
|
||||
auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(packet_.video_header.video_type_header);
|
||||
EXPECT_TRUE(map_.Insert(packet_)); // kTemporalStructureMode3: 0-2-1-2..
|
||||
|
||||
packet_.video_header.vp9().gof_idx = 0;
|
||||
vp9_header.gof_idx = 0;
|
||||
EXPECT_TRUE(map_.UpdatePacket(&packet_));
|
||||
EXPECT_EQ(0, packet_.video_header.vp9().temporal_idx);
|
||||
EXPECT_FALSE(packet_.video_header.vp9().temporal_up_switch);
|
||||
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
|
||||
EXPECT_EQ(4, packet_.video_header.vp9().pid_diff[0]);
|
||||
EXPECT_EQ(0, vp9_header.temporal_idx);
|
||||
EXPECT_FALSE(vp9_header.temporal_up_switch);
|
||||
EXPECT_EQ(1U, vp9_header.num_ref_pics);
|
||||
EXPECT_EQ(4, vp9_header.pid_diff[0]);
|
||||
|
||||
packet_.video_header.vp9().gof_idx = 1;
|
||||
vp9_header.gof_idx = 1;
|
||||
EXPECT_TRUE(map_.UpdatePacket(&packet_));
|
||||
EXPECT_EQ(2, packet_.video_header.vp9().temporal_idx);
|
||||
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
|
||||
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
|
||||
EXPECT_EQ(1, packet_.video_header.vp9().pid_diff[0]);
|
||||
EXPECT_EQ(2, vp9_header.temporal_idx);
|
||||
EXPECT_TRUE(vp9_header.temporal_up_switch);
|
||||
EXPECT_EQ(1U, vp9_header.num_ref_pics);
|
||||
EXPECT_EQ(1, vp9_header.pid_diff[0]);
|
||||
|
||||
packet_.video_header.vp9().gof_idx = 2;
|
||||
vp9_header.gof_idx = 2;
|
||||
EXPECT_TRUE(map_.UpdatePacket(&packet_));
|
||||
EXPECT_EQ(1, packet_.video_header.vp9().temporal_idx);
|
||||
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
|
||||
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
|
||||
EXPECT_EQ(2, packet_.video_header.vp9().pid_diff[0]);
|
||||
EXPECT_EQ(1, vp9_header.temporal_idx);
|
||||
EXPECT_TRUE(vp9_header.temporal_up_switch);
|
||||
EXPECT_EQ(1U, vp9_header.num_ref_pics);
|
||||
EXPECT_EQ(2, vp9_header.pid_diff[0]);
|
||||
|
||||
packet_.video_header.vp9().gof_idx = 3;
|
||||
vp9_header.gof_idx = 3;
|
||||
EXPECT_TRUE(map_.UpdatePacket(&packet_));
|
||||
EXPECT_EQ(2, packet_.video_header.vp9().temporal_idx);
|
||||
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
|
||||
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
|
||||
EXPECT_EQ(1, packet_.video_header.vp9().pid_diff[0]);
|
||||
EXPECT_EQ(2, vp9_header.temporal_idx);
|
||||
EXPECT_TRUE(vp9_header.temporal_up_switch);
|
||||
EXPECT_EQ(1U, vp9_header.num_ref_pics);
|
||||
EXPECT_EQ(1, vp9_header.pid_diff[0]);
|
||||
}
|
||||
|
||||
class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
|
||||
@ -920,25 +928,28 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
||||
// -------------------------------------------------
|
||||
// |<----------tl0idx:200--------->|<---tl0idx:201---
|
||||
|
||||
auto& vp9_header =
|
||||
packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
||||
bool re = false;
|
||||
packet_->codec = kVideoCodecVP9;
|
||||
packet_->video_header.codec = kVideoCodecVP9;
|
||||
packet_->is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->video_header.vp9().flexible_mode = false;
|
||||
packet_->video_header.vp9().spatial_idx = 0;
|
||||
packet_->video_header.vp9().beginning_of_frame = true;
|
||||
packet_->video_header.vp9().end_of_frame = true;
|
||||
packet_->video_header.vp9().temporal_up_switch = false;
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.spatial_idx = 0;
|
||||
vp9_header.beginning_of_frame = true;
|
||||
vp9_header.end_of_frame = true;
|
||||
vp9_header.temporal_up_switch = false;
|
||||
|
||||
packet_->seqNum = 65485;
|
||||
packet_->timestamp = 1000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->video_header.vp9().picture_id = 5;
|
||||
packet_->video_header.vp9().tl0_pic_idx = 200;
|
||||
packet_->video_header.vp9().temporal_idx = 0;
|
||||
packet_->video_header.vp9().ss_data_available = true;
|
||||
packet_->video_header.vp9().gof.SetGofInfoVP9(
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.tl0_pic_idx = 200;
|
||||
vp9_header.temporal_idx = 0;
|
||||
vp9_header.ss_data_available = true;
|
||||
vp9_header.gof.SetGofInfoVP9(
|
||||
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
@ -946,10 +957,10 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
||||
packet_->seqNum = 65489;
|
||||
packet_->timestamp = 13000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->video_header.vp9().picture_id = 9;
|
||||
packet_->video_header.vp9().tl0_pic_idx = 201;
|
||||
packet_->video_header.vp9().temporal_idx = 0;
|
||||
packet_->video_header.vp9().ss_data_available = false;
|
||||
vp9_header.picture_id = 9;
|
||||
vp9_header.tl0_pic_idx = 201;
|
||||
vp9_header.temporal_idx = 0;
|
||||
vp9_header.ss_data_available = false;
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
@ -973,31 +984,34 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
||||
// --------------------------------
|
||||
// |<--------tl0idx:200--------->|
|
||||
|
||||
auto& vp9_header =
|
||||
packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
||||
bool re = false;
|
||||
packet_->codec = kVideoCodecVP9;
|
||||
packet_->video_header.codec = kVideoCodecVP9;
|
||||
packet_->is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->video_header.vp9().flexible_mode = false;
|
||||
packet_->video_header.vp9().spatial_idx = 0;
|
||||
packet_->video_header.vp9().beginning_of_frame = true;
|
||||
packet_->video_header.vp9().end_of_frame = true;
|
||||
packet_->video_header.vp9().tl0_pic_idx = 200;
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.spatial_idx = 0;
|
||||
vp9_header.beginning_of_frame = true;
|
||||
vp9_header.end_of_frame = true;
|
||||
vp9_header.tl0_pic_idx = 200;
|
||||
|
||||
packet_->seqNum = 65486;
|
||||
packet_->timestamp = 6000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->video_header.vp9().picture_id = 6;
|
||||
packet_->video_header.vp9().temporal_idx = 2;
|
||||
packet_->video_header.vp9().temporal_up_switch = true;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 2;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
packet_->seqNum = 65487;
|
||||
packet_->timestamp = 9000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->video_header.vp9().picture_id = 7;
|
||||
packet_->video_header.vp9().temporal_idx = 1;
|
||||
packet_->video_header.vp9().temporal_up_switch = true;
|
||||
vp9_header.picture_id = 7;
|
||||
vp9_header.temporal_idx = 1;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
// Insert first frame with SS data.
|
||||
@ -1006,11 +1020,11 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->width = 352;
|
||||
packet_->height = 288;
|
||||
packet_->video_header.vp9().picture_id = 5;
|
||||
packet_->video_header.vp9().temporal_idx = 0;
|
||||
packet_->video_header.vp9().temporal_up_switch = false;
|
||||
packet_->video_header.vp9().ss_data_available = true;
|
||||
packet_->video_header.vp9().gof.SetGofInfoVP9(
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.temporal_idx = 0;
|
||||
vp9_header.temporal_up_switch = false;
|
||||
vp9_header.ss_data_available = true;
|
||||
vp9_header.gof.SetGofInfoVP9(
|
||||
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
@ -1049,33 +1063,36 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
||||
// -----------------------------------------
|
||||
// |<-----------tl0idx:200------------>|
|
||||
|
||||
auto& vp9_header =
|
||||
packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
||||
bool re = false;
|
||||
packet_->codec = kVideoCodecVP9;
|
||||
packet_->video_header.codec = kVideoCodecVP9;
|
||||
packet_->video_header.vp9().flexible_mode = false;
|
||||
packet_->video_header.vp9().beginning_of_frame = true;
|
||||
packet_->video_header.vp9().end_of_frame = true;
|
||||
packet_->video_header.vp9().tl0_pic_idx = 200;
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.beginning_of_frame = true;
|
||||
vp9_header.end_of_frame = true;
|
||||
vp9_header.tl0_pic_idx = 200;
|
||||
|
||||
packet_->is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = 65486;
|
||||
packet_->timestamp = 6000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->video_header.vp9().spatial_idx = 0;
|
||||
packet_->video_header.vp9().picture_id = 6;
|
||||
packet_->video_header.vp9().temporal_idx = 1;
|
||||
packet_->video_header.vp9().temporal_up_switch = true;
|
||||
vp9_header.spatial_idx = 0;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 1;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
packet_->is_first_packet_in_frame = false;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = 65487;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->video_header.vp9().spatial_idx = 1;
|
||||
packet_->video_header.vp9().picture_id = 6;
|
||||
packet_->video_header.vp9().temporal_idx = 1;
|
||||
packet_->video_header.vp9().temporal_up_switch = true;
|
||||
vp9_header.spatial_idx = 1;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 1;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
packet_->is_first_packet_in_frame = false;
|
||||
@ -1083,10 +1100,10 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
||||
packet_->seqNum = 65485;
|
||||
packet_->timestamp = 3000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->video_header.vp9().spatial_idx = 1;
|
||||
packet_->video_header.vp9().picture_id = 5;
|
||||
packet_->video_header.vp9().temporal_idx = 0;
|
||||
packet_->video_header.vp9().temporal_up_switch = false;
|
||||
vp9_header.spatial_idx = 1;
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.temporal_idx = 0;
|
||||
vp9_header.temporal_up_switch = false;
|
||||
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
// Insert first frame with SS data.
|
||||
@ -1096,12 +1113,12 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->width = 352;
|
||||
packet_->height = 288;
|
||||
packet_->video_header.vp9().spatial_idx = 0;
|
||||
packet_->video_header.vp9().picture_id = 5;
|
||||
packet_->video_header.vp9().temporal_idx = 0;
|
||||
packet_->video_header.vp9().temporal_up_switch = false;
|
||||
packet_->video_header.vp9().ss_data_available = true;
|
||||
packet_->video_header.vp9().gof.SetGofInfoVP9(
|
||||
vp9_header.spatial_idx = 0;
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.temporal_idx = 0;
|
||||
vp9_header.temporal_up_switch = false;
|
||||
vp9_header.ss_data_available = true;
|
||||
vp9_header.gof.SetGofInfoVP9(
|
||||
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||
|
||||
|
@ -135,26 +135,28 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
||||
bool up_switch = false,
|
||||
GofInfoVP9* ss = nullptr) {
|
||||
VCMPacket packet;
|
||||
auto& vp9_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
packet.timestamp = pid;
|
||||
packet.codec = kVideoCodecVP9;
|
||||
packet.seqNum = seq_num_start;
|
||||
packet.markerBit = (seq_num_start == seq_num_end);
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.video_header.vp9().flexible_mode = false;
|
||||
packet.video_header.vp9().picture_id = pid % (1 << 15);
|
||||
packet.video_header.vp9().temporal_idx = tid;
|
||||
packet.video_header.vp9().spatial_idx = sid;
|
||||
packet.video_header.vp9().tl0_pic_idx = tl0;
|
||||
packet.video_header.vp9().temporal_up_switch = up_switch;
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.picture_id = pid % (1 << 15);
|
||||
vp9_header.temporal_idx = tid;
|
||||
vp9_header.spatial_idx = sid;
|
||||
vp9_header.tl0_pic_idx = tl0;
|
||||
vp9_header.temporal_up_switch = up_switch;
|
||||
if (ss != nullptr) {
|
||||
packet.video_header.vp9().ss_data_available = true;
|
||||
packet.video_header.vp9().gof = *ss;
|
||||
vp9_header.ss_data_available = true;
|
||||
vp9_header.gof = *ss;
|
||||
}
|
||||
ref_packet_buffer_->InsertPacket(&packet);
|
||||
|
||||
if (seq_num_start != seq_num_end) {
|
||||
packet.markerBit = true;
|
||||
packet.video_header.vp9().ss_data_available = false;
|
||||
vp9_header.ss_data_available = false;
|
||||
packet.seqNum = seq_num_end;
|
||||
ref_packet_buffer_->InsertPacket(&packet);
|
||||
}
|
||||
@ -174,20 +176,22 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
||||
bool inter = false,
|
||||
std::vector<uint8_t> refs = std::vector<uint8_t>()) {
|
||||
VCMPacket packet;
|
||||
auto& vp9_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
packet.timestamp = pid;
|
||||
packet.codec = kVideoCodecVP9;
|
||||
packet.seqNum = seq_num_start;
|
||||
packet.markerBit = (seq_num_start == seq_num_end);
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.video_header.vp9().inter_layer_predicted = inter;
|
||||
packet.video_header.vp9().flexible_mode = true;
|
||||
packet.video_header.vp9().picture_id = pid % (1 << 15);
|
||||
packet.video_header.vp9().temporal_idx = tid;
|
||||
packet.video_header.vp9().spatial_idx = sid;
|
||||
packet.video_header.vp9().tl0_pic_idx = tl0;
|
||||
packet.video_header.vp9().num_ref_pics = refs.size();
|
||||
vp9_header.inter_layer_predicted = inter;
|
||||
vp9_header.flexible_mode = true;
|
||||
vp9_header.picture_id = pid % (1 << 15);
|
||||
vp9_header.temporal_idx = tid;
|
||||
vp9_header.spatial_idx = sid;
|
||||
vp9_header.tl0_pic_idx = tl0;
|
||||
vp9_header.num_ref_pics = refs.size();
|
||||
for (size_t i = 0; i < refs.size(); ++i)
|
||||
packet.video_header.vp9().pid_diff[i] = refs[i];
|
||||
vp9_header.pid_diff[i] = refs[i];
|
||||
ref_packet_buffer_->InsertPacket(&packet);
|
||||
|
||||
if (seq_num_start != seq_num_end) {
|
||||
|
@ -64,7 +64,9 @@ int VCMSessionInfo::PictureId() const {
|
||||
if (packets_.front().video_header.codec == kVideoCodecVP8) {
|
||||
return packets_.front().video_header.vp8().pictureId;
|
||||
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
|
||||
return packets_.front().video_header.vp9().picture_id;
|
||||
return absl::get<RTPVideoHeaderVP9>(
|
||||
packets_.front().video_header.video_type_header)
|
||||
.picture_id;
|
||||
} else {
|
||||
return kNoPictureId;
|
||||
}
|
||||
@ -76,7 +78,9 @@ int VCMSessionInfo::TemporalId() const {
|
||||
if (packets_.front().video_header.codec == kVideoCodecVP8) {
|
||||
return packets_.front().video_header.vp8().temporalIdx;
|
||||
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
|
||||
return packets_.front().video_header.vp9().temporal_idx;
|
||||
return absl::get<RTPVideoHeaderVP9>(
|
||||
packets_.front().video_header.video_type_header)
|
||||
.temporal_idx;
|
||||
} else {
|
||||
return kNoTemporalIdx;
|
||||
}
|
||||
@ -88,7 +92,9 @@ bool VCMSessionInfo::LayerSync() const {
|
||||
if (packets_.front().video_header.codec == kVideoCodecVP8) {
|
||||
return packets_.front().video_header.vp8().layerSync;
|
||||
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
|
||||
return packets_.front().video_header.vp9().temporal_up_switch;
|
||||
return absl::get<RTPVideoHeaderVP9>(
|
||||
packets_.front().video_header.video_type_header)
|
||||
.temporal_up_switch;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -100,7 +106,9 @@ int VCMSessionInfo::Tl0PicId() const {
|
||||
if (packets_.front().video_header.codec == kVideoCodecVP8) {
|
||||
return packets_.front().video_header.vp8().tl0PicIdx;
|
||||
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
|
||||
return packets_.front().video_header.vp9().tl0_pic_idx;
|
||||
return absl::get<RTPVideoHeaderVP9>(
|
||||
packets_.front().video_header.video_type_header)
|
||||
.tl0_pic_idx;
|
||||
} else {
|
||||
return kNoTl0PicIdx;
|
||||
}
|
||||
@ -122,17 +130,19 @@ std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
|
||||
}
|
||||
|
||||
void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().video_header.codec != kVideoCodecVP9 ||
|
||||
packets_.front().video_header.vp9().flexible_mode) {
|
||||
if (packets_.empty())
|
||||
return;
|
||||
}
|
||||
packets_.front().video_header.vp9().temporal_idx = gof_info.temporal_idx[idx];
|
||||
packets_.front().video_header.vp9().temporal_up_switch =
|
||||
gof_info.temporal_up_switch[idx];
|
||||
packets_.front().video_header.vp9().num_ref_pics = gof_info.num_ref_pics[idx];
|
||||
|
||||
auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
|
||||
&packets_.front().video_header.video_type_header);
|
||||
if (!vp9_header || vp9_header->flexible_mode)
|
||||
return;
|
||||
|
||||
vp9_header->temporal_idx = gof_info.temporal_idx[idx];
|
||||
vp9_header->temporal_up_switch = gof_info.temporal_up_switch[idx];
|
||||
vp9_header->num_ref_pics = gof_info.num_ref_pics[idx];
|
||||
for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
|
||||
packets_.front().video_header.vp9().pid_diff[i] = gof_info.pid_diff[idx][i];
|
||||
vp9_header->pid_diff[i] = gof_info.pid_diff[idx][i];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,32 +135,42 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
|
||||
RtpDepacketizer::Create(is_vp8 ? kVideoCodecVP8 : kVideoCodecVP9));
|
||||
RtpDepacketizer::ParsedPayload parsed_payload;
|
||||
if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
|
||||
const int temporal_idx = static_cast<int>(
|
||||
is_vp8 ? parsed_payload.video_header().vp8().temporalIdx
|
||||
: parsed_payload.video_header().vp9().temporal_idx);
|
||||
const int spatial_idx = static_cast<int>(
|
||||
is_vp8 ? kNoSpatialIdx
|
||||
: parsed_payload.video_header().vp9().spatial_idx);
|
||||
const bool non_ref_for_inter_layer_pred =
|
||||
is_vp8 ? false
|
||||
: parsed_payload.video_header()
|
||||
.vp9()
|
||||
.non_ref_for_inter_layer_pred;
|
||||
// The number of spatial layers is sent in ssData, which is included only
|
||||
// in the first packet of the first spatial layer of a key frame.
|
||||
if (!parsed_payload.video_header().vp9().inter_pic_predicted &&
|
||||
parsed_payload.video_header().vp9().beginning_of_frame == 1 &&
|
||||
spatial_idx == 0) {
|
||||
num_active_spatial_layers_ =
|
||||
parsed_payload.video_header().vp9().num_spatial_layers;
|
||||
} else if (spatial_idx == kNoSpatialIdx)
|
||||
int temporal_idx;
|
||||
int spatial_idx;
|
||||
bool non_ref_for_inter_layer_pred;
|
||||
bool end_of_frame;
|
||||
|
||||
if (is_vp8) {
|
||||
temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
|
||||
spatial_idx = kNoSpatialIdx;
|
||||
num_active_spatial_layers_ = 1;
|
||||
non_ref_for_inter_layer_pred = false;
|
||||
end_of_frame = true;
|
||||
} else {
|
||||
const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
|
||||
parsed_payload.video_header().video_type_header);
|
||||
temporal_idx = vp9_header.temporal_idx;
|
||||
spatial_idx = vp9_header.spatial_idx;
|
||||
non_ref_for_inter_layer_pred = vp9_header.non_ref_for_inter_layer_pred;
|
||||
end_of_frame = vp9_header.end_of_frame;
|
||||
|
||||
// The number of spatial layers is sent in ssData, which is included
|
||||
// only in the first packet of the first spatial layer of a key frame.
|
||||
if (!vp9_header.inter_pic_predicted &&
|
||||
vp9_header.beginning_of_frame == 1 && spatial_idx == 0) {
|
||||
num_active_spatial_layers_ = vp9_header.num_spatial_layers;
|
||||
}
|
||||
}
|
||||
|
||||
if (spatial_idx == kNoSpatialIdx)
|
||||
num_active_spatial_layers_ = 1;
|
||||
|
||||
RTC_CHECK_GT(num_active_spatial_layers_, 0);
|
||||
|
||||
if (selected_sl_ >= 0 &&
|
||||
spatial_idx ==
|
||||
std::min(num_active_spatial_layers_ - 1, selected_sl_) &&
|
||||
parsed_payload.video_header().vp9().end_of_frame) {
|
||||
end_of_frame) {
|
||||
// This layer is now the last in the superframe.
|
||||
set_marker_bit = true;
|
||||
} else {
|
||||
|
@ -101,11 +101,14 @@ class PictureIdObserver : public test::RtpRtcpObserver {
|
||||
parsed->tl0_pic_idx = parsed_payload.video_header().vp8().tl0PicIdx;
|
||||
parsed->temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
parsed->picture_id = parsed_payload.video_header().vp9().picture_id;
|
||||
parsed->tl0_pic_idx = parsed_payload.video_header().vp9().tl0_pic_idx;
|
||||
parsed->temporal_idx = parsed_payload.video_header().vp9().temporal_idx;
|
||||
case kVideoCodecVP9: {
|
||||
const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
|
||||
parsed_payload.video_header().video_type_header);
|
||||
parsed->picture_id = vp9_header.picture_id;
|
||||
parsed->tl0_pic_idx = vp9_header.tl0_pic_idx;
|
||||
parsed->temporal_idx = vp9_header.temporal_idx;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
RTC_NOTREACHED();
|
||||
break;
|
||||
|
@ -416,12 +416,19 @@ bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer(
|
||||
bool result =
|
||||
depacketizer->Parse(&parsed_payload, payload, payload_data_length);
|
||||
RTC_DCHECK(result);
|
||||
const int temporal_idx = static_cast<int>(
|
||||
is_vp8 ? parsed_payload.video_header().vp8().temporalIdx
|
||||
: parsed_payload.video_header().vp9().temporal_idx);
|
||||
const int spatial_idx = static_cast<int>(
|
||||
is_vp8 ? kNoSpatialIdx
|
||||
: parsed_payload.video_header().vp9().spatial_idx);
|
||||
|
||||
int temporal_idx;
|
||||
int spatial_idx;
|
||||
if (is_vp8) {
|
||||
temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
|
||||
spatial_idx = kNoTemporalIdx;
|
||||
} else {
|
||||
const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
|
||||
parsed_payload.video_header().video_type_header);
|
||||
temporal_idx = vp9_header.temporal_idx;
|
||||
spatial_idx = vp9_header.spatial_idx;
|
||||
}
|
||||
|
||||
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
|
||||
temporal_idx <= selected_tl_) &&
|
||||
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
|
||||
|
@ -3176,17 +3176,19 @@ class Vp9HeaderObserver : public test::SendTest {
|
||||
EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
|
||||
EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.video_header().codec);
|
||||
// Verify common fields for all configurations.
|
||||
VerifyCommonHeader(parsed.video_header().vp9());
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(parsed.video_header().video_type_header);
|
||||
VerifyCommonHeader(vp9_header);
|
||||
CompareConsecutiveFrames(header, parsed.video_header());
|
||||
// Verify configuration specific settings.
|
||||
InspectHeader(parsed.video_header().vp9());
|
||||
InspectHeader(vp9_header);
|
||||
|
||||
++packets_sent_;
|
||||
if (header.markerBit) {
|
||||
++frames_sent_;
|
||||
}
|
||||
last_header_ = header;
|
||||
last_vp9_ = parsed.video_header().vp9();
|
||||
last_vp9_ = vp9_header;
|
||||
}
|
||||
return SEND_PACKET;
|
||||
}
|
||||
@ -3371,7 +3373,8 @@ class Vp9HeaderObserver : public test::SendTest {
|
||||
|
||||
void CompareConsecutiveFrames(const RTPHeader& header,
|
||||
const RTPVideoHeader& video) const {
|
||||
const RTPVideoHeaderVP9& vp9 = video.vp9();
|
||||
const auto& vp9_header =
|
||||
absl::get<RTPVideoHeaderVP9>(video.video_type_header);
|
||||
|
||||
bool new_frame = packets_sent_ == 0 ||
|
||||
IsNewerTimestamp(header.timestamp, last_header_.timestamp);
|
||||
@ -3379,22 +3382,22 @@ class Vp9HeaderObserver : public test::SendTest {
|
||||
if (!new_frame) {
|
||||
EXPECT_FALSE(last_header_.markerBit);
|
||||
EXPECT_EQ(last_header_.timestamp, header.timestamp);
|
||||
EXPECT_EQ(last_vp9_.picture_id, vp9.picture_id);
|
||||
EXPECT_EQ(last_vp9_.temporal_idx, vp9.temporal_idx);
|
||||
EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9.tl0_pic_idx);
|
||||
VerifySpatialIdxWithinFrame(vp9);
|
||||
EXPECT_EQ(last_vp9_.picture_id, vp9_header.picture_id);
|
||||
EXPECT_EQ(last_vp9_.temporal_idx, vp9_header.temporal_idx);
|
||||
EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9_header.tl0_pic_idx);
|
||||
VerifySpatialIdxWithinFrame(vp9_header);
|
||||
return;
|
||||
}
|
||||
// New frame.
|
||||
EXPECT_TRUE(vp9.beginning_of_frame);
|
||||
EXPECT_TRUE(vp9_header.beginning_of_frame);
|
||||
|
||||
// Compare with last packet in previous frame.
|
||||
if (frames_sent_ == 0)
|
||||
return;
|
||||
EXPECT_TRUE(last_vp9_.end_of_frame);
|
||||
EXPECT_TRUE(last_header_.markerBit);
|
||||
EXPECT_TRUE(ContinuousPictureId(vp9));
|
||||
VerifyTl0Idx(vp9);
|
||||
EXPECT_TRUE(ContinuousPictureId(vp9_header));
|
||||
VerifyTl0Idx(vp9_header);
|
||||
}
|
||||
|
||||
test::FunctionVideoEncoderFactory encoder_factory_;
|
||||
|
Reference in New Issue
Block a user