Remove framemarking RTP extension.

BUG=webrtc:11637

Change-Id: I47f8e22473429c9762956444e27cfbafb201b208
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/176442
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Tommi <tommi@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31522}
This commit is contained in:
philipel
2020-06-15 12:26:39 +02:00
committed by Commit Bot
parent ef377ec6d5
commit 9465978a3b
32 changed files with 49 additions and 805 deletions

View File

@ -135,20 +135,6 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
}
case kVideoCodecH264: {
_codecSpecificInfo.codecType = kVideoCodecH264;
// The following H264 codec specific data are not used elsewhere.
// Instead they are read directly from the frame marking extension.
// These codec specific data structures should be removed
// when frame marking is used.
_codecSpecificInfo.codecSpecific.H264.temporal_idx = kNoTemporalIdx;
if (header->frame_marking.temporal_id != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.H264.temporal_idx =
header->frame_marking.temporal_id;
_codecSpecificInfo.codecSpecific.H264.base_layer_sync =
header->frame_marking.base_layer_sync;
_codecSpecificInfo.codecSpecific.H264.idr_frame =
header->frame_marking.independent_frame;
}
break;
}
default: {

View File

@ -129,9 +129,5 @@ const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const {
return rtp_video_header_;
}
const FrameMarking& RtpFrameObject::GetFrameMarking() const {
return rtp_video_header_.frame_marking;
}
} // namespace video_coding
} // namespace webrtc

View File

@ -47,7 +47,6 @@ class RtpFrameObject : public EncodedFrame {
int64_t RenderTime() const override;
bool delayed_by_retransmission() const override;
const RTPVideoHeader& GetRtpVideoHeader() const;
const FrameMarking& GetFrameMarking() const;
private:
RTPVideoHeader rtp_video_header_;

View File

@ -363,15 +363,10 @@ std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
VideoFrameType::kVideoFrameDelta;
}
// With IPPP, if this is not a keyframe, make sure there are no gaps
// in the packet sequence numbers up until this point.
const uint8_t h264tid =
buffer_[start_index] != nullptr
? buffer_[start_index]->video_header.frame_marking.temporal_id
: kNoTemporalIdx;
if (h264tid == kNoTemporalIdx && !is_h264_keyframe &&
missing_packets_.upper_bound(start_seq_num) !=
missing_packets_.begin()) {
// If this is not a keyframe, make sure there are no gaps in the packet
// sequence numbers up until this point.
if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) !=
missing_packets_.begin()) {
return found_frames;
}
}

View File

@ -108,8 +108,6 @@ RtpFrameReferenceFinder::ManageFrameInternal(RtpFrameObject* frame) {
return ManageFrameVp8(frame);
case kVideoCodecVP9:
return ManageFrameVp9(frame);
case kVideoCodecH264:
return ManageFrameH264(frame);
case kVideoCodecGeneric:
if (auto* generic_header = absl::get_if<RTPVideoHeaderLegacyGeneric>(
&frame->GetRtpVideoHeader().video_type_header)) {
@ -715,130 +713,6 @@ void RtpFrameReferenceFinder::UnwrapPictureIds(RtpFrameObject* frame) {
frame->id.picture_id = unwrapper_.Unwrap(frame->id.picture_id);
}
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264(
RtpFrameObject* frame) {
const FrameMarking& rtp_frame_marking = frame->GetFrameMarking();
uint8_t tid = rtp_frame_marking.temporal_id;
bool blSync = rtp_frame_marking.base_layer_sync;
if (tid == kNoTemporalIdx)
return ManageFramePidOrSeqNum(std::move(frame), kNoPictureId);
// Protect against corrupted packets with arbitrary large temporal idx.
if (tid >= kMaxTemporalLayers)
return kDrop;
frame->id.picture_id = frame->last_seq_num();
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
// For H264, use last_seq_num_gop_ to simply store last picture id
// as a pair of unpadded and padded sequence numbers.
if (last_seq_num_gop_.empty()) {
last_seq_num_gop_.insert(std::make_pair(
0, std::make_pair(frame->id.picture_id, frame->id.picture_id)));
}
}
// Stash if we have no keyframe yet.
if (last_seq_num_gop_.empty())
return kStash;
// Check for gap in sequence numbers. Store in |not_yet_received_seq_num_|.
if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) {
uint16_t last_pic_id_padded = last_seq_num_gop_.begin()->second.second;
if (AheadOf<uint16_t>(frame->id.picture_id, last_pic_id_padded)) {
do {
last_pic_id_padded = last_pic_id_padded + 1;
not_yet_received_seq_num_.insert(last_pic_id_padded);
} while (last_pic_id_padded != frame->id.picture_id);
}
}
int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(rtp_frame_marking.tl0_pic_idx);
// Clean up info for base layers that are too old.
int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo;
auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx);
layer_info_.erase(layer_info_.begin(), clean_layer_info_to);
// Clean up info about not yet received frames that are too old.
uint16_t old_picture_id = frame->id.picture_id - kMaxNotYetReceivedFrames * 2;
auto clean_frames_to = not_yet_received_seq_num_.lower_bound(old_picture_id);
not_yet_received_seq_num_.erase(not_yet_received_seq_num_.begin(),
clean_frames_to);
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
frame->num_references = 0;
layer_info_[unwrapped_tl0].fill(-1);
UpdateDataH264(frame, unwrapped_tl0, tid);
return kHandOff;
}
auto layer_info_it =
layer_info_.find(tid == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0);
// Stash if we have no base layer frame yet.
if (layer_info_it == layer_info_.end())
return kStash;
// Base layer frame. Copy layer info from previous base layer frame.
if (tid == 0) {
layer_info_it =
layer_info_.insert(std::make_pair(unwrapped_tl0, layer_info_it->second))
.first;
frame->num_references = 1;
frame->references[0] = layer_info_it->second[0];
UpdateDataH264(frame, unwrapped_tl0, tid);
return kHandOff;
}
// This frame only references its base layer frame.
if (blSync) {
frame->num_references = 1;
frame->references[0] = layer_info_it->second[0];
UpdateDataH264(frame, unwrapped_tl0, tid);
return kHandOff;
}
// Find all references for general frame.
frame->num_references = 0;
for (uint8_t layer = 0; layer <= tid; ++layer) {
// Stash if we have not yet received frames on this temporal layer.
if (layer_info_it->second[layer] == -1)
return kStash;
// Drop if the last frame on this layer is ahead of this frame. A layer sync
// frame was received after this frame for the same base layer frame.
uint16_t last_frame_in_layer = layer_info_it->second[layer];
if (AheadOf<uint16_t>(last_frame_in_layer, frame->id.picture_id))
return kDrop;
// Stash and wait for missing frame between this frame and the reference
auto not_received_seq_num_it =
not_yet_received_seq_num_.upper_bound(last_frame_in_layer);
if (not_received_seq_num_it != not_yet_received_seq_num_.end() &&
AheadOf<uint16_t>(frame->id.picture_id, *not_received_seq_num_it)) {
return kStash;
}
if (!(AheadOf<uint16_t>(frame->id.picture_id, last_frame_in_layer))) {
RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->id.picture_id
<< " and packet range [" << frame->first_seq_num()
<< ", " << frame->last_seq_num()
<< "] already received, "
" dropping frame.";
return kDrop;
}
++frame->num_references;
frame->references[layer] = last_frame_in_layer;
}
UpdateDataH264(frame, unwrapped_tl0, tid);
return kHandOff;
}
void RtpFrameReferenceFinder::UpdateLastPictureIdWithPaddingH264() {
auto seq_num_it = last_seq_num_gop_.begin();

View File

@ -32,13 +32,11 @@ std::unique_ptr<RtpFrameObject> CreateFrame(
uint16_t seq_num_end,
bool keyframe,
VideoCodecType codec,
const RTPVideoTypeHeader& video_type_header,
const FrameMarking& frame_markings) {
const RTPVideoTypeHeader& video_type_header) {
RTPVideoHeader video_header;
video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
video_header.video_type_header = video_type_header;
video_header.frame_marking = frame_markings;
// clang-format off
return std::make_unique<RtpFrameObject>(
@ -92,7 +90,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
bool keyframe) {
std::unique_ptr<RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric,
RTPVideoTypeHeader(), FrameMarking());
RTPVideoTypeHeader());
reference_finder_->ManageFrame(std::move(frame));
}
@ -110,9 +108,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
vp8_header.tl0PicIdx = tl0;
vp8_header.layerSync = sync;
std::unique_ptr<RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP8,
vp8_header, FrameMarking());
std::unique_ptr<RtpFrameObject> frame = CreateFrame(
seq_num_start, seq_num_end, keyframe, kVideoCodecVP8, vp8_header);
reference_finder_->ManageFrame(std::move(frame));
}
@ -140,9 +137,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
vp9_header.gof = *ss;
}
std::unique_ptr<RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP9,
vp9_header, FrameMarking());
std::unique_ptr<RtpFrameObject> frame = CreateFrame(
seq_num_start, seq_num_end, keyframe, kVideoCodecVP9, vp9_header);
reference_finder_->ManageFrame(std::move(frame));
}
@ -166,26 +162,15 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
for (size_t i = 0; i < refs.size(); ++i)
vp9_header.pid_diff[i] = refs[i];
std::unique_ptr<RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP9,
vp9_header, FrameMarking());
std::unique_ptr<RtpFrameObject> frame = CreateFrame(
seq_num_start, seq_num_end, keyframe, kVideoCodecVP9, vp9_header);
reference_finder_->ManageFrame(std::move(frame));
}
void InsertH264(uint16_t seq_num_start,
uint16_t seq_num_end,
bool keyframe,
uint8_t tid = kNoTemporalIdx,
int32_t tl0 = kNoTl0PicIdx,
bool sync = false) {
FrameMarking frame_marking{};
frame_marking.temporal_id = tid;
frame_marking.tl0_pic_idx = tl0;
frame_marking.base_layer_sync = sync;
void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) {
std::unique_ptr<RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264,
RTPVideoTypeHeader(), frame_marking);
RTPVideoTypeHeader());
reference_finder_->ManageFrame(std::move(frame));
}
@ -1440,14 +1425,28 @@ TEST_F(TestRtpFrameReferenceFinder, H264KeyFrameReferences) {
CheckReferencesH264(sn);
}
// Test with 1 temporal layer.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_0) {
TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) {
uint16_t sn = 0xFFFF;
InsertH264(sn - 1, sn - 1, true);
InsertH264(sn, sn, false);
InsertH264(sn + 1, sn + 1, false);
InsertH264(sn + 2, sn + 2, false);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn - 1);
CheckReferencesH264(sn, sn - 1);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn + 1);
}
TEST_F(TestRtpFrameReferenceFinder, H264Frames) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 1);
InsertH264(sn + 1, sn + 1, false, 0, 2);
InsertH264(sn + 2, sn + 2, false, 0, 3);
InsertH264(sn + 3, sn + 3, false, 0, 4);
InsertH264(sn, sn, true);
InsertH264(sn + 1, sn + 1, false);
InsertH264(sn + 2, sn + 2, false);
InsertH264(sn + 3, sn + 3, false);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn);
@ -1456,37 +1455,16 @@ TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_0) {
CheckReferencesH264(sn + 3, sn + 2);
}
TEST_F(TestRtpFrameReferenceFinder, H264DuplicateTl1Frames) {
TEST_F(TestRtpFrameReferenceFinder, H264Reordering) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 0);
InsertH264(sn + 1, sn + 1, false, 1, 0, true);
InsertH264(sn + 2, sn + 2, false, 0, 1);
InsertH264(sn + 3, sn + 3, false, 1, 1);
InsertH264(sn + 3, sn + 3, false, 1, 1);
InsertH264(sn + 4, sn + 4, false, 0, 2);
InsertH264(sn + 5, sn + 5, false, 1, 2);
ASSERT_EQ(6UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn + 1, sn + 2);
CheckReferencesH264(sn + 4, sn + 2);
CheckReferencesH264(sn + 5, sn + 3, sn + 4);
}
// Test with 1 temporal layer.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 1);
InsertH264(sn + 1, sn + 1, false, 0, 2);
InsertH264(sn + 3, sn + 3, false, 0, 4);
InsertH264(sn + 2, sn + 2, false, 0, 3);
InsertH264(sn + 5, sn + 5, false, 0, 6);
InsertH264(sn + 6, sn + 6, false, 0, 7);
InsertH264(sn + 4, sn + 4, false, 0, 5);
InsertH264(sn, sn, true);
InsertH264(sn + 1, sn + 1, false);
InsertH264(sn + 3, sn + 3, false);
InsertH264(sn + 2, sn + 2, false);
InsertH264(sn + 5, sn + 5, false);
InsertH264(sn + 6, sn + 6, false);
InsertH264(sn + 4, sn + 4, false);
ASSERT_EQ(7UL, frames_from_callback_.size());
CheckReferencesH264(sn);
@ -1498,258 +1476,13 @@ TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0) {
CheckReferencesH264(sn + 6, sn + 5);
}
// Test with 2 temporal layers in a 01 pattern.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_01) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 255);
InsertH264(sn + 1, sn + 1, false, 1, 255, true);
InsertH264(sn + 2, sn + 2, false, 0, 0);
InsertH264(sn + 3, sn + 3, false, 1, 0);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn + 1, sn + 2);
}
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersMultiSn_01) {
uint16_t sn = Rand();
InsertH264(sn, sn + 3, true, 0, 255);
InsertH264(sn + 4, sn + 5, false, 1, 255, true);
InsertH264(sn + 6, sn + 8, false, 0, 0);
InsertH264(sn + 9, sn + 9, false, 1, 0);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn + 3);
CheckReferencesH264(sn + 5, sn + 3);
CheckReferencesH264(sn + 8, sn + 3);
CheckReferencesH264(sn + 9, sn + 5, sn + 8);
}
// Test with 2 temporal layers in a 01 pattern.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_01) {
uint16_t sn = Rand();
InsertH264(sn + 1, sn + 1, false, 1, 255, true);
InsertH264(sn, sn, true, 0, 255);
InsertH264(sn + 3, sn + 3, false, 1, 0);
InsertH264(sn + 5, sn + 5, false, 1, 1);
InsertH264(sn + 2, sn + 2, false, 0, 0);
InsertH264(sn + 4, sn + 4, false, 0, 1);
InsertH264(sn + 6, sn + 6, false, 0, 2);
InsertH264(sn + 7, sn + 7, false, 1, 2);
ASSERT_EQ(8UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn + 1, sn + 2);
CheckReferencesH264(sn + 4, sn + 2);
CheckReferencesH264(sn + 5, sn + 3, sn + 4);
CheckReferencesH264(sn + 6, sn + 4);
CheckReferencesH264(sn + 7, sn + 5, sn + 6);
}
// Test with 3 temporal layers in a 0212 pattern.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_0212) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 55);
InsertH264(sn + 1, sn + 1, false, 2, 55, true);
InsertH264(sn + 2, sn + 2, false, 1, 55, true);
InsertH264(sn + 3, sn + 3, false, 2, 55);
InsertH264(sn + 4, sn + 4, false, 0, 56);
InsertH264(sn + 5, sn + 5, false, 2, 56, true);
InsertH264(sn + 6, sn + 6, false, 1, 56, true);
InsertH264(sn + 7, sn + 7, false, 2, 56);
InsertH264(sn + 8, sn + 8, false, 0, 57);
InsertH264(sn + 9, sn + 9, false, 2, 57, true);
InsertH264(sn + 10, sn + 10, false, 1, 57, true);
InsertH264(sn + 11, sn + 11, false, 2, 57);
ASSERT_EQ(12UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2);
CheckReferencesH264(sn + 4, sn);
CheckReferencesH264(sn + 5, sn + 4);
CheckReferencesH264(sn + 6, sn + 4);
CheckReferencesH264(sn + 7, sn + 4, sn + 5, sn + 6);
CheckReferencesH264(sn + 8, sn + 4);
CheckReferencesH264(sn + 9, sn + 8);
CheckReferencesH264(sn + 10, sn + 8);
CheckReferencesH264(sn + 11, sn + 8, sn + 9, sn + 10);
}
// Test with 3 temporal layers in a 0212 pattern.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersMissingFrame_0212) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 55, false);
InsertH264(sn + 2, sn + 2, false, 1, 55, true);
InsertH264(sn + 3, sn + 3, false, 2, 55, false);
ASSERT_EQ(2UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 2, sn);
}
// Test with 3 temporal layers in a 0212 pattern.
TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0212) {
uint16_t sn = Rand();
InsertH264(sn + 1, sn + 1, false, 2, 55, true);
InsertH264(sn, sn, true, 0, 55, false);
InsertH264(sn + 2, sn + 2, false, 1, 55, true);
InsertH264(sn + 4, sn + 4, false, 0, 56, false);
InsertH264(sn + 5, sn + 5, false, 2, 56, false);
InsertH264(sn + 3, sn + 3, false, 2, 55, false);
InsertH264(sn + 7, sn + 7, false, 2, 56, false);
InsertH264(sn + 9, sn + 9, false, 2, 57, true);
InsertH264(sn + 6, sn + 6, false, 1, 56, false);
InsertH264(sn + 8, sn + 8, false, 0, 57, false);
InsertH264(sn + 11, sn + 11, false, 2, 57, false);
InsertH264(sn + 10, sn + 10, false, 1, 57, true);
ASSERT_EQ(12UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2);
CheckReferencesH264(sn + 4, sn);
CheckReferencesH264(sn + 5, sn + 2, sn + 3, sn + 4);
CheckReferencesH264(sn + 6, sn + 2, sn + 4);
CheckReferencesH264(sn + 7, sn + 4, sn + 5, sn + 6);
CheckReferencesH264(sn + 8, sn + 4);
CheckReferencesH264(sn + 9, sn + 8);
CheckReferencesH264(sn + 10, sn + 8);
CheckReferencesH264(sn + 11, sn + 8, sn + 9, sn + 10);
}
TEST_F(TestRtpFrameReferenceFinder, H264InsertManyFrames_0212) {
uint16_t sn = Rand();
const int keyframes_to_insert = 50;
const int frames_per_keyframe = 120; // Should be a multiple of 4.
uint8_t tl0 = 128;
for (int k = 0; k < keyframes_to_insert; ++k) {
InsertH264(sn, sn, true, 0, tl0, false);
InsertH264(sn + 1, sn + 1, false, 2, tl0, true);
InsertH264(sn + 2, sn + 2, false, 1, tl0, true);
InsertH264(sn + 3, sn + 3, false, 2, tl0, false);
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2);
frames_from_callback_.clear();
++tl0;
for (int f = 4; f < frames_per_keyframe; f += 4) {
uint16_t sf = sn + f;
InsertH264(sf, sf, false, 0, tl0, false);
InsertH264(sf + 1, sf + 1, false, 2, tl0, false);
InsertH264(sf + 2, sf + 2, false, 1, tl0, false);
InsertH264(sf + 3, sf + 3, false, 2, tl0, false);
CheckReferencesH264(sf, sf - 4);
CheckReferencesH264(sf + 1, sf, sf - 1, sf - 2);
CheckReferencesH264(sf + 2, sf, sf - 2);
CheckReferencesH264(sf + 3, sf, sf + 1, sf + 2);
frames_from_callback_.clear();
++tl0;
}
sn += frames_per_keyframe;
}
}
TEST_F(TestRtpFrameReferenceFinder, H264LayerSync) {
uint16_t sn = Rand();
InsertH264(sn, sn, true, 0, 0, false);
InsertH264(sn + 1, sn + 1, false, 1, 0, true);
InsertH264(sn + 2, sn + 2, false, 0, 1, false);
ASSERT_EQ(3UL, frames_from_callback_.size());
InsertH264(sn + 4, sn + 4, false, 0, 2, false);
InsertH264(sn + 5, sn + 5, false, 1, 2, true);
InsertH264(sn + 6, sn + 6, false, 0, 3, false);
InsertH264(sn + 7, sn + 7, false, 1, 3, false);
ASSERT_EQ(7UL, frames_from_callback_.size());
CheckReferencesH264(sn);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn);
CheckReferencesH264(sn + 4, sn + 2);
CheckReferencesH264(sn + 5, sn + 4);
CheckReferencesH264(sn + 6, sn + 4);
CheckReferencesH264(sn + 7, sn + 6, sn + 5);
}
TEST_F(TestRtpFrameReferenceFinder, H264Tl1SyncFrameAfterTl1Frame) {
InsertH264(1000, 1000, true, 0, 247, true);
InsertH264(1001, 1001, false, 0, 248, false);
InsertH264(1002, 1002, false, 1, 248, false); // Will be dropped
InsertH264(1003, 1003, false, 1, 248, true); // due to this frame.
ASSERT_EQ(3UL, frames_from_callback_.size());
CheckReferencesH264(1000);
CheckReferencesH264(1001, 1000);
CheckReferencesH264(1003, 1001);
}
TEST_F(TestRtpFrameReferenceFinder, H264DetectMissingFrame_0212) {
InsertH264(1, 1, true, 0, 1, false);
InsertH264(2, 2, false, 2, 1, true);
InsertH264(3, 3, false, 1, 1, true);
InsertH264(4, 4, false, 2, 1, false);
InsertH264(6, 6, false, 2, 2, false);
InsertH264(7, 7, false, 1, 2, false);
InsertH264(8, 8, false, 2, 2, false);
ASSERT_EQ(4UL, frames_from_callback_.size());
InsertH264(5, 5, false, 0, 2, false);
ASSERT_EQ(8UL, frames_from_callback_.size());
CheckReferencesH264(1);
CheckReferencesH264(2, 1);
CheckReferencesH264(3, 1);
CheckReferencesH264(4, 3, 2, 1);
CheckReferencesH264(5, 1);
CheckReferencesH264(6, 5, 4, 3);
CheckReferencesH264(7, 5, 3);
CheckReferencesH264(8, 7, 6, 5);
}
TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) {
uint16_t sn = 0xFFFF;
InsertH264(sn - 1, sn - 1, true, 0, 1);
InsertH264(sn, sn, false, 0, 2);
InsertH264(sn + 1, sn + 1, false, 0, 3);
InsertH264(sn + 2, sn + 2, false, 0, 4);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn - 1);
CheckReferencesH264(sn, sn - 1);
CheckReferencesH264(sn + 1, sn);
CheckReferencesH264(sn + 2, sn + 1);
}
TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) {
uint16_t sn = 0xFFFF;
InsertH264(sn - 3, sn - 2, true, 0, 1);
InsertH264(sn - 1, sn + 1, false, 0, 2);
InsertH264(sn + 2, sn + 3, false, 0, 3);
InsertH264(sn + 4, sn + 7, false, 0, 4);
InsertH264(sn - 3, sn - 2, true);
InsertH264(sn - 1, sn + 1, false);
InsertH264(sn + 2, sn + 3, false);
InsertH264(sn + 4, sn + 7, false);
ASSERT_EQ(4UL, frames_from_callback_.size());
CheckReferencesH264(sn - 2);
@ -1758,35 +1491,5 @@ TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) {
CheckReferencesH264(sn + 7, sn + 3);
}
TEST_F(TestRtpFrameReferenceFinder, H264Tl0PicIdxWrap) {
int numTl0Wraps = 1000;
int64_t sn = Rand();
for (int i = 0; i < numTl0Wraps; i++) {
for (int tl0 = 0; tl0 < 256; tl0 += 16, sn += 16) {
InsertH264(sn, sn, true, 0, tl0);
reference_finder_->ClearTo(sn); // Too many stashed frames cause errors.
for (int k = 1; k < 8; k++) {
InsertH264(sn + k, sn + k, false, 0, tl0 + k);
}
// Skip a TL0 index.
for (int k = 9; k < 16; k++) {
InsertH264(sn + k, sn + k, false, 0, tl0 + k);
}
ASSERT_EQ(8UL, frames_from_callback_.size());
CheckReferencesH264(sn);
for (int k = 1; k < 8; k++) {
CheckReferencesH264(sn + k, sn + k - 1);
}
frames_from_callback_.clear();
}
}
}
} // namespace video_coding
} // namespace webrtc

View File

@ -95,8 +95,6 @@ int VCMSessionInfo::TemporalId() const {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
.temporal_idx;
} else if (packets_.front().video_header.codec == kVideoCodecH264) {
return packets_.front().video_header.frame_marking.temporal_id;
} else {
return kNoTemporalIdx;
}
@ -113,8 +111,6 @@ bool VCMSessionInfo::LayerSync() const {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
.temporal_up_switch;
} else if (packets_.front().video_header.codec == kVideoCodecH264) {
return packets_.front().video_header.frame_marking.base_layer_sync;
} else {
return false;
}
@ -131,8 +127,6 @@ int VCMSessionInfo::Tl0PicId() const {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
.tl0_pic_idx;
} else if (packets_.front().video_header.codec == kVideoCodecH264) {
return packets_.front().video_header.frame_marking.tl0_pic_idx;
} else {
return kNoTl0PicIdx;
}