Add sender controlled playout delay limits

This CL adds support for an extension on RTP frames to allow the sender
to specify the minimum and maximum playout delay limits.

The receiver makes a best-effort attempt to keep the capture-to-render delay
within this range. This allows different types of application to specify
different end-to-end delay goals. For example gaming can support rendering
of frames as soon as received on receiver to minimize delay. A movie playback
application can specify a minimum playout delay to allow fixed buffering
in presence of network jitter.

There are no tests at this time and most of testing is done with chromium
webrtc prototype.

On chromoting performance tests, this extension helps bring down end-to-end
delay by about 150 ms on small frames.

BUG=webrtc:5895

Review-Url: https://codereview.webrtc.org/2007743003
Cr-Commit-Position: refs/heads/master@{#13059}
This commit is contained in:
isheriff
2016-06-08 00:24:21 -07:00
committed by Commit bot
parent 5d910286e1
commit 6b4b5f3770
41 changed files with 859 additions and 407 deletions

View File

@ -36,8 +36,8 @@ TEST(TestDecodingState, FrameContinuity) {
packet.timestamp = 1;
packet.seqNum = 0xffff;
packet.frameType = kVideoFrameDelta;
packet.codecSpecificHeader.codec = kRtpVideoVp8;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x007F;
packet.video_header.codec = kRtpVideoVp8;
packet.video_header.codecHeader.VP8.pictureId = 0x007F;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -53,17 +53,17 @@ TEST(TestDecodingState, FrameContinuity) {
packet.frameType = kVideoFrameDelta;
// Use pictureId
packet.isFirstPacket = false;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x0002;
packet.video_header.codecHeader.VP8.pictureId = 0x0002;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.seqNum = 10;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Use sequence numbers.
packet.codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
packet.video_header.codecHeader.VP8.pictureId = kNoPictureId;
frame.Reset();
packet.seqNum = dec_state.sequence_num() - 1u;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -82,9 +82,9 @@ TEST(TestDecodingState, FrameContinuity) {
// Insert packet with temporal info.
dec_state.Reset();
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.seqNum = 1;
packet.timestamp = 1;
EXPECT_TRUE(dec_state.full_sync());
@ -93,9 +93,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// 1 layer up - still good.
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 1;
packet.seqNum = 2;
packet.timestamp = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -104,18 +104,18 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// Lost non-base layer packet => should update sync parameter.
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 3;
packet.video_header.codecHeader.VP8.pictureId = 3;
packet.seqNum = 4;
packet.timestamp = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Now insert the next non-base layer (belonging to a next tl0PicId).
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 4;
packet.seqNum = 5;
packet.timestamp = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -125,9 +125,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
// Next base layer (dropped interim non-base layers) - should update sync.
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 5;
packet.seqNum = 6;
packet.timestamp = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -137,18 +137,18 @@ TEST(TestDecodingState, FrameContinuity) {
// Check wrap for temporal layers.
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x00FF;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0x00FF;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 6;
packet.seqNum = 7;
packet.timestamp = 7;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
frame.Reset();
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x0000;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 7;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0x0000;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 7;
packet.seqNum = 8;
packet.timestamp = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -211,12 +211,12 @@ TEST(TestDecodingState, MultiLayerBehavior) {
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = kVideoFrameDelta;
packet.codecSpecificHeader.codec = kRtpVideoVp8;
packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -226,9 +226,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 1;
packet.seqNum = 1;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -238,9 +238,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 3;
packet.seqNum = 3;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 3;
packet.video_header.codecHeader.VP8.pictureId = 3;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -249,9 +249,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 4;
packet.seqNum = 4;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -263,9 +263,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.isFirstPacket = 1;
packet.timestamp = 5;
packet.seqNum = 5;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 2;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
packet.video_header.codecHeader.VP8.tl0PicIdx = 2;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -276,9 +276,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.frameType = kVideoFrameDelta;
packet.timestamp = 6;
packet.seqNum = 6;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 3;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
packet.video_header.codecHeader.VP8.tl0PicIdx = 3;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -287,9 +287,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.isFirstPacket = 1;
packet.timestamp = 8;
packet.seqNum = 8;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 8;
packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -302,10 +302,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.isFirstPacket = 1;
packet.timestamp = 9;
packet.seqNum = 9;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 9;
packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 9;
packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -323,10 +323,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 0;
packet.seqNum = 0;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.codecSpecificHeader.codecHeader.VP8.layerSync = false;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.layerSync = false;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -337,10 +337,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 0;
packet.timestamp = 1;
packet.seqNum = 1;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 1;
packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
@ -350,10 +350,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 2;
packet.seqNum = 3;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 2;
packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -365,12 +365,12 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
VCMPacket packet;
frame.Reset();
packet.frameType = kVideoFrameKey;
packet.codecSpecificHeader.codec = kRtpVideoVp8;
packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -384,8 +384,8 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.frameType = kVideoFrameDelta;
packet.timestamp += 3000;
++packet.seqNum;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -418,12 +418,12 @@ TEST(TestDecodingState, PictureIdRepeat) {
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = kVideoFrameDelta;
packet.codecSpecificHeader.codec = kRtpVideoVp8;
packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -433,15 +433,15 @@ TEST(TestDecodingState, PictureIdRepeat) {
frame.Reset();
++packet.timestamp;
++packet.seqNum;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
packet.codecSpecificHeader.codecHeader.VP8.pictureId++;
packet.video_header.codecHeader.VP8.temporalIdx++;
packet.video_header.codecHeader.VP8.pictureId++;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
frame.Reset();
// Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx += 3;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.tl0PicIdx += 3;
packet.video_header.codecHeader.VP8.temporalIdx++;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}
@ -456,9 +456,9 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
packet.codecSpecificHeader.codec = kRtpVideoVp9;
packet.video_header.codec = kRtpVideoVp9;
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@ -499,9 +499,9 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
packet.codecSpecificHeader.codec = kRtpVideoVp9;
packet.video_header.codec = kRtpVideoVp9;
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@ -554,9 +554,9 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
packet.codecSpecificHeader.codec = kRtpVideoVp9;
packet.video_header.codec = kRtpVideoVp9;
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;

View File

@ -129,7 +129,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
// Don't copy payload specific data for empty packets (e.g padding packets).
if (packet.sizeBytes > 0)
CopyCodecSpecific(&packet.codecSpecificHeader);
CopyCodecSpecific(&packet.video_header);
int retVal =
_sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
@ -153,10 +153,14 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
// (HEVC)).
if (packet.markerBit) {
RTC_DCHECK(!_rotation_set);
_rotation = packet.codecSpecificHeader.rotation;
_rotation = packet.video_header.rotation;
_rotation_set = true;
}
if (packet.isFirstPacket) {
playout_delay_ = packet.video_header.playout_delay;
}
if (_sessionInfo.complete()) {
SetState(kStateComplete);
return kCompleteSession;

View File

@ -71,7 +71,7 @@ RTPVideoTypeHeader* RtpFrameObject::GetCodecHeader() const {
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
if (!packet)
return nullptr;
return &packet->codecSpecificHeader.codecHeader;
return &packet->video_header.codecHeader;
}
} // namespace video_coding

View File

@ -124,10 +124,10 @@ void FrameList::Reset(UnorderedFrameList* free_frames) {
}
bool Vp9SsMap::Insert(const VCMPacket& packet) {
if (!packet.codecSpecificHeader.codecHeader.VP9.ss_data_available)
if (!packet.video_header.codecHeader.VP9.ss_data_available)
return false;
ss_map_[packet.timestamp] = packet.codecSpecificHeader.codecHeader.VP9.gof;
ss_map_[packet.timestamp] = packet.video_header.codecHeader.VP9.gof;
return true;
}
@ -175,7 +175,7 @@ void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
uint8_t gof_idx = packet->video_header.codecHeader.VP9.gof_idx;
if (gof_idx == kNoGofIdx)
return false; // No update needed.
@ -186,7 +186,7 @@ bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
if (gof_idx >= it->second.num_frames_in_gof)
return false; // Assume corresponding SS not yet received.
RTPVideoHeaderVP9* vp9 = &packet->codecSpecificHeader.codecHeader.VP9;
RTPVideoHeaderVP9* vp9 = &packet->video_header.codecHeader.VP9;
vp9->temporal_idx = it->second.temporal_idx[gof_idx];
vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx];
@ -497,12 +497,11 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
// complete frame, |max_wait_time_ms| decided by caller.
bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
uint32_t* timestamp) {
VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) {
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
return false;
return nullptr;
}
CleanUpOldOrEmptyFrames();
@ -520,7 +519,7 @@ bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
// Are we shutting down the jitter buffer?
if (!running_) {
crit_sect_->Leave();
return false;
return nullptr;
}
// Finding oldest frame ready for decoder.
CleanUpOldOrEmptyFrames();
@ -538,11 +537,11 @@ bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
if (decodable_frames_.empty() ||
decodable_frames_.Front()->GetState() != kStateComplete) {
crit_sect_->Leave();
return false;
return nullptr;
}
*timestamp = decodable_frames_.Front()->TimeStamp();
VCMEncodedFrame* encoded_frame = decodable_frames_.Front();
crit_sect_->Leave();
return true;
return encoded_frame;
}
bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {

View File

@ -152,9 +152,8 @@ class VCMJitterBuffer {
bool CompleteSequenceWithNextFrame();
// Wait |max_wait_time_ms| for a complete frame to arrive.
// The function returns true once such a frame is found, its corresponding
// timestamp is returned. Otherwise, returns false.
bool NextCompleteTimestamp(uint32_t max_wait_time_ms, uint32_t* timestamp);
// If found, a pointer to the frame is returned. Returns nullptr otherwise.
VCMEncodedFrame* NextCompleteFrame(uint32_t max_wait_time_ms);
// Locates a frame for decoding (even an incomplete) without delay.
// The function returns true once such a frame is found, its corresponding

View File

@ -42,13 +42,13 @@ class Vp9SsMapTest : public ::testing::Test {
packet_.markerBit = true;
packet_.frameType = kVideoFrameKey;
packet_.codec = kVideoCodecVP9;
packet_.codecSpecificHeader.codec = kRtpVideoVp9;
packet_.codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_.codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
packet_.video_header.codec = kRtpVideoVp9;
packet_.video_header.codecHeader.VP9.flexible_mode = false;
packet_.video_header.codecHeader.VP9.gof_idx = 0;
packet_.video_header.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
packet_.video_header.codecHeader.VP9.temporal_up_switch = false;
packet_.video_header.codecHeader.VP9.ss_data_available = true;
packet_.video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
}
@ -62,7 +62,7 @@ TEST_F(Vp9SsMapTest, Insert) {
}
TEST_F(Vp9SsMapTest, Insert_NoSsData) {
packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
packet_.video_header.codecHeader.VP9.ss_data_available = false;
EXPECT_FALSE(map_.Insert(packet_));
}
@ -139,53 +139,53 @@ TEST_F(Vp9SsMapTest, RemoveOld_WithWrap) {
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoSsData) {
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
packet_.video_header.codecHeader.VP9.gof_idx = 0;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = kNoGofIdx;
packet_.video_header.codecHeader.VP9.gof_idx = kNoGofIdx;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_InvalidGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 4;
packet_.video_header.codecHeader.VP9.gof_idx = 4;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket) {
EXPECT_TRUE(map_.Insert(packet_)); // kTemporalStructureMode3: 0-2-1-2..
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
packet_.video_header.codecHeader.VP9.gof_idx = 0;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(0, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(4, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(0, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_FALSE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(4, packet_.video_header.codecHeader.VP9.pid_diff[0]);
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
packet_.video_header.codecHeader.VP9.gof_idx = 1;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
packet_.video_header.codecHeader.VP9.gof_idx = 2;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.pid_diff[0]);
packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 3;
packet_.video_header.codecHeader.VP9.gof_idx = 3;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(2U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[1]);
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_FALSE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(2U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.pid_diff[1]);
}
class ProcessThreadMock : public ProcessThread {
@ -246,12 +246,10 @@ class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
}
VCMEncodedFrame* DecodeCompleteFrame() {
uint32_t timestamp = 0;
bool found_frame = jitter_buffer_->NextCompleteTimestamp(10, &timestamp);
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
if (!found_frame)
return NULL;
VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
return frame;
return nullptr;
return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
}
VCMEncodedFrame* DecodeIncompleteFrame() {
@ -409,12 +407,12 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
}
bool DecodeCompleteFrame() {
uint32_t timestamp = 0;
bool found_frame = jitter_buffer_->NextCompleteTimestamp(0, &timestamp);
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(0);
if (!found_frame)
return false;
VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
VCMEncodedFrame* frame =
jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
bool ret = (frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
return ret;
@ -932,23 +930,23 @@ TEST_P(TestBasicJitterBuffer, TestSkipForwardVp9) {
bool re = false;
packet_->codec = kVideoCodecVP9;
packet_->codecSpecificHeader.codec = kRtpVideoVp9;
packet_->video_header.codec = kRtpVideoVp9;
packet_->isFirstPacket = true;
packet_->markerBit = true;
packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->seqNum = 65485;
packet_->timestamp = 1000;
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -956,10 +954,10 @@ TEST_P(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->seqNum = 65489;
packet_->timestamp = 13000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 9;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 201;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
packet_->video_header.codecHeader.VP9.picture_id = 9;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 201;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.ss_data_available = false;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@ -985,29 +983,29 @@ TEST_P(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
bool re = false;
packet_->codec = kVideoCodecVP9;
packet_->codecSpecificHeader.codec = kRtpVideoVp9;
packet_->video_header.codec = kRtpVideoVp9;
packet_->isFirstPacket = true;
packet_->markerBit = true;
packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 2;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 2;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 7;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.codecHeader.VP9.picture_id = 7;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@ -1016,11 +1014,11 @@ TEST_P(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -1061,31 +1059,31 @@ TEST_P(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
bool re = false;
packet_->codec = kVideoCodecVP9;
packet_->codecSpecificHeader.codec = kRtpVideoVp9;
packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.codec = kRtpVideoVp9;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
packet_->markerBit = true;
packet_->seqNum = 65487;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.codecHeader.VP9.spatial_idx = 1;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@ -1093,10 +1091,10 @@ TEST_P(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->seqNum = 65485;
packet_->timestamp = 3000;
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.spatial_idx = 1;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@ -1106,12 +1104,12 @@ TEST_P(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -1177,7 +1175,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp = 0;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
packet_->isFirstPacket = false;
@ -1185,7 +1183,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@ -1210,7 +1208,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
// Insert second frame
@ -1219,7 +1217,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
packet_->isFirstPacket = false;
@ -1227,14 +1225,14 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum++;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
packet_->seqNum++;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
frame_out = DecodeIncompleteFrame();
@ -1276,7 +1274,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
// Insert second frame - an incomplete key frame.
@ -1287,7 +1285,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
// Insert a few more packets. Make sure we're waiting for the key frame to be
@ -1297,7 +1295,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@ -1340,7 +1338,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
// Insert second frame with the first packet missing. Make sure we're waiting
@ -1350,14 +1348,14 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
for (int i = 0; i < 5; ++i) {
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@ -1366,7 +1364,7 @@ TEST_P(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
packet_->seqNum -= 6;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
frame_out = DecodeIncompleteFrame();
@ -1387,9 +1385,10 @@ TEST_P(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
EXPECT_EQ(kCompleteSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t next_timestamp;
EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_EQ(packet_->timestamp, next_timestamp);
VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(next_timestamp);
VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
EXPECT_NE(frame, nullptr);
EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
EXPECT_TRUE(frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
@ -1413,7 +1412,7 @@ TEST_P(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->timestamp = timestamp_;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
}

View File

@ -32,7 +32,9 @@ VCMPacket::VCMPacket()
insertStartCode(false),
width(0),
height(0),
codecSpecificHeader() {}
video_header() {
video_header.playout_delay = {-1, -1};
}
VCMPacket::VCMPacket(const uint8_t* ptr,
const size_t size,
@ -45,7 +47,6 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
sizeBytes(size),
markerBit(rtpHeader.header.markerBit),
timesNacked(-1),
frameType(rtpHeader.frameType),
codec(kVideoCodecUnknown),
isFirstPacket(rtpHeader.type.Video.isFirstPacket),
@ -53,8 +54,18 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
insertStartCode(false),
width(rtpHeader.type.Video.width),
height(rtpHeader.type.Video.height),
codecSpecificHeader(rtpHeader.type.Video) {
video_header(rtpHeader.type.Video) {
CopyCodecSpecifics(rtpHeader.type.Video);
if (markerBit) {
video_header.rotation = rtpHeader.type.Video.rotation;
}
// Playout decisions are made entirely based on first packet in a frame.
if (isFirstPacket) {
video_header.playout_delay = rtpHeader.type.Video.playout_delay;
} else {
video_header.playout_delay = {-1, -1};
}
}
VCMPacket::VCMPacket(const uint8_t* ptr,
@ -70,7 +81,6 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
sizeBytes(size),
markerBit(mBit),
timesNacked(-1),
frameType(kVideoFrameDelta),
codec(kVideoCodecUnknown),
isFirstPacket(false),
@ -78,7 +88,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
insertStartCode(false),
width(0),
height(0),
codecSpecificHeader() {}
video_header() {}
void VCMPacket::Reset() {
payloadType = 0;
@ -96,13 +106,10 @@ void VCMPacket::Reset() {
insertStartCode = false;
width = 0;
height = 0;
memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
memset(&video_header, 0, sizeof(RTPVideoHeader));
}
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader) {
if (markerBit) {
codecSpecificHeader.rotation = videoHeader.rotation;
}
switch (videoHeader.codec) {
case kRtpVideoVp8:
// Handle all packets within a frame as depending on the previous packet

View File

@ -50,7 +50,7 @@ class VCMPacket {
// packet.
int width;
int height;
RTPVideoHeader codecSpecificHeader;
RTPVideoHeader video_header;
protected:
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);

View File

@ -98,10 +98,10 @@ class TestPacketBuffer : public ::testing::Test,
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = pid % (1 << 15);
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = tid;
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = tl0;
packet.codecSpecificHeader.codecHeader.VP8.layerSync = sync;
packet.video_header.codecHeader.VP8.pictureId = pid % (1 << 15);
packet.video_header.codecHeader.VP8.temporalIdx = tid;
packet.video_header.codecHeader.VP8.tl0PicIdx = tl0;
packet.video_header.codecHeader.VP8.layerSync = sync;
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
}
@ -127,15 +127,15 @@ class TestPacketBuffer : public ::testing::Test,
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
packet.codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet.codecSpecificHeader.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.codecSpecificHeader.codecHeader.VP9.temporal_idx = tid;
packet.codecSpecificHeader.codecHeader.VP9.spatial_idx = sid;
packet.codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = tl0;
packet.codecSpecificHeader.codecHeader.VP9.temporal_up_switch = up;
packet.video_header.codecHeader.VP9.flexible_mode = false;
packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.video_header.codecHeader.VP9.temporal_idx = tid;
packet.video_header.codecHeader.VP9.spatial_idx = sid;
packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
packet.video_header.codecHeader.VP9.temporal_up_switch = up;
if (ss != nullptr) {
packet.codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet.codecSpecificHeader.codecHeader.VP9.gof = *ss;
packet.video_header.codecHeader.VP9.ss_data_available = true;
packet.video_header.codecHeader.VP9.gof = *ss;
}
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
@ -163,15 +163,15 @@ class TestPacketBuffer : public ::testing::Test,
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
packet.codecSpecificHeader.codecHeader.VP9.inter_layer_predicted = inter;
packet.codecSpecificHeader.codecHeader.VP9.flexible_mode = true;
packet.codecSpecificHeader.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.codecSpecificHeader.codecHeader.VP9.temporal_idx = tid;
packet.codecSpecificHeader.codecHeader.VP9.spatial_idx = sid;
packet.codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = tl0;
packet.codecSpecificHeader.codecHeader.VP9.num_ref_pics = refs.size();
packet.video_header.codecHeader.VP9.inter_layer_predicted = inter;
packet.video_header.codecHeader.VP9.flexible_mode = true;
packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.video_header.codecHeader.VP9.temporal_idx = tid;
packet.video_header.codecHeader.VP9.spatial_idx = sid;
packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
packet.video_header.codecHeader.VP9.num_ref_pics = refs.size();
for (size_t i = 0; i < refs.size(); ++i)
packet.codecSpecificHeader.codecHeader.VP9.pid_diff[i] = refs[i];
packet.video_header.codecHeader.VP9.pid_diff[i] = refs[i];
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
}

View File

@ -144,15 +144,26 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
bool prefer_late_decoding) {
const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0;
int min_playout_delay_ms = -1;
int max_playout_delay_ms = -1;
// Exhaust wait time to get a complete frame for decoding.
bool found_frame =
jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
VCMEncodedFrame* found_frame =
jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
if (!found_frame)
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
if (found_frame) {
frame_timestamp = found_frame->TimeStamp();
min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
} else {
if (!jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp))
return nullptr;
}
if (!found_frame)
return NULL;
if (min_playout_delay_ms >= 0)
timing_->set_min_playout_delay(min_playout_delay_ms);
if (max_playout_delay_ms >= 0)
timing_->set_max_playout_delay(max_playout_delay_ms);
// We have a frame - Set timing and render timestamp.
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());

View File

@ -60,10 +60,10 @@ int VCMSessionInfo::HighSequenceNumber() const {
int VCMSessionInfo::PictureId() const {
if (packets_.empty())
return kNoPictureId;
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id;
if (packets_.front().video_header.codec == kRtpVideoVp8) {
return packets_.front().video_header.codecHeader.VP8.pictureId;
} else if (packets_.front().video_header.codec == kRtpVideoVp9) {
return packets_.front().video_header.codecHeader.VP9.picture_id;
} else {
return kNoPictureId;
}
@ -72,10 +72,10 @@ int VCMSessionInfo::PictureId() const {
int VCMSessionInfo::TemporalId() const {
if (packets_.empty())
return kNoTemporalIdx;
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx;
if (packets_.front().video_header.codec == kRtpVideoVp8) {
return packets_.front().video_header.codecHeader.VP8.temporalIdx;
} else if (packets_.front().video_header.codec == kRtpVideoVp9) {
return packets_.front().video_header.codecHeader.VP9.temporal_idx;
} else {
return kNoTemporalIdx;
}
@ -84,11 +84,10 @@ int VCMSessionInfo::TemporalId() const {
bool VCMSessionInfo::LayerSync() const {
if (packets_.empty())
return false;
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
return packets_.front()
.codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
if (packets_.front().video_header.codec == kRtpVideoVp8) {
return packets_.front().video_header.codecHeader.VP8.layerSync;
} else if (packets_.front().video_header.codec == kRtpVideoVp9) {
return packets_.front().video_header.codecHeader.VP9.temporal_up_switch;
} else {
return false;
}
@ -97,36 +96,34 @@ bool VCMSessionInfo::LayerSync() const {
int VCMSessionInfo::Tl0PicId() const {
if (packets_.empty())
return kNoTl0PicIdx;
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx;
if (packets_.front().video_header.codec == kRtpVideoVp8) {
return packets_.front().video_header.codecHeader.VP8.tl0PicIdx;
} else if (packets_.front().video_header.codec == kRtpVideoVp9) {
return packets_.front().video_header.codecHeader.VP9.tl0_pic_idx;
} else {
return kNoTl0PicIdx;
}
}
bool VCMSessionInfo::NonReference() const {
if (packets_.empty() ||
packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
if (packets_.empty() || packets_.front().video_header.codec != kRtpVideoVp8)
return false;
return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference;
return packets_.front().video_header.codecHeader.VP8.nonReference;
}
void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
if (packets_.empty() ||
packets_.front().codecSpecificHeader.codec != kRtpVideoVp9 ||
packets_.front().codecSpecificHeader.codecHeader.VP9.flexible_mode) {
if (packets_.empty() || packets_.front().video_header.codec != kRtpVideoVp9 ||
packets_.front().video_header.codecHeader.VP9.flexible_mode) {
return;
}
packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx =
packets_.front().video_header.codecHeader.VP9.temporal_idx =
gof_info.temporal_idx[idx];
packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch =
packets_.front().video_header.codecHeader.VP9.temporal_up_switch =
gof_info.temporal_up_switch[idx];
packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
packets_.front().video_header.codecHeader.VP9.num_ref_pics =
gof_info.num_ref_pics[idx];
for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
packets_.front().video_header.codecHeader.VP9.pid_diff[i] =
gof_info.pid_diff[idx][i];
}
}
@ -175,9 +172,8 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
// header supplied by the H264 depacketizer.
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
if (packet.codecSpecificHeader.codec == kRtpVideoH264 &&
packet.codecSpecificHeader.codecHeader.H264.packetization_type ==
kH264StapA) {
if (packet.video_header.codec == kRtpVideoH264 &&
packet.video_header.codecHeader.H264.packetization_type == kH264StapA) {
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
@ -344,8 +340,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
while (it != packets_.end()) {
const int partition_id =
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
const int partition_id = (*it).video_header.codecHeader.VP8.partitionId;
PacketIterator partition_end = FindPartitionEnd(it);
fragmentation->fragmentationOffset[partition_id] =
(*it).dataPtr - frame_buffer;
@ -381,7 +376,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
PacketIterator it) const {
while (it != packets_.end()) {
if ((*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition) {
if ((*it).video_header.codecHeader.VP8.beginningOfPartition) {
return it;
}
++it;
@ -393,13 +388,10 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
PacketIterator it) const {
assert((*it).codec == kVideoCodecVP8);
PacketIterator prev_it = it;
const int partition_id =
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
const int partition_id = (*it).video_header.codecHeader.VP8.partitionId;
while (it != packets_.end()) {
bool beginning =
(*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition;
int current_partition_id =
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
bool beginning = (*it).video_header.codecHeader.VP8.beginningOfPartition;
int current_partition_id = (*it).video_header.codecHeader.VP8.partitionId;
bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
if (packet_loss_found ||
(beginning && current_partition_id != partition_id)) {

View File

@ -28,6 +28,7 @@ VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing)
codec_timer_(new VCMCodecTimer()),
render_delay_ms_(kDefaultRenderDelayMs),
min_playout_delay_ms_(0),
max_playout_delay_ms_(10000),
jitter_delay_ms_(0),
current_delay_ms_(0),
last_decode_ms_(0),
@ -91,17 +92,32 @@ void VCMTiming::ResetDecodeTime() {
codec_timer_.reset(new VCMCodecTimer());
}
void VCMTiming::set_render_delay(uint32_t render_delay_ms) {
void VCMTiming::set_render_delay(int render_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
render_delay_ms_ = render_delay_ms;
}
void VCMTiming::set_min_playout_delay(uint32_t min_playout_delay_ms) {
void VCMTiming::set_min_playout_delay(int min_playout_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
min_playout_delay_ms_ = min_playout_delay_ms;
}
void VCMTiming::SetJitterDelay(uint32_t jitter_delay_ms) {
int VCMTiming::min_playout_delay() {
CriticalSectionScoped cs(crit_sect_);
return min_playout_delay_ms_;
}
void VCMTiming::set_max_playout_delay(int max_playout_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
max_playout_delay_ms_ = max_playout_delay_ms;
}
int VCMTiming::max_playout_delay() {
CriticalSectionScoped cs(crit_sect_);
return max_playout_delay_ms_;
}
void VCMTiming::SetJitterDelay(int jitter_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
if (jitter_delay_ms != jitter_delay_ms_) {
jitter_delay_ms_ = jitter_delay_ms;
@ -114,7 +130,7 @@ void VCMTiming::SetJitterDelay(uint32_t jitter_delay_ms) {
void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
CriticalSectionScoped cs(crit_sect_);
uint32_t target_delay_ms = TargetDelayInternal();
int target_delay_ms = TargetDelayInternal();
if (current_delay_ms_ == 0) {
// Not initialized, set current delay to target.
@ -147,7 +163,7 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
delay_diff_ms = std::max(delay_diff_ms, -max_change_ms);
delay_diff_ms = std::min(delay_diff_ms, max_change_ms);
current_delay_ms_ = current_delay_ms_ + static_cast<int32_t>(delay_diff_ms);
current_delay_ms_ = current_delay_ms_ + delay_diff_ms;
}
prev_frame_timestamp_ = frame_timestamp;
}
@ -163,7 +179,7 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
return;
}
if (current_delay_ms_ + delayed_ms <= target_delay_ms) {
current_delay_ms_ += static_cast<uint32_t>(delayed_ms);
current_delay_ms_ += delayed_ms;
} else {
current_delay_ms_ = target_delay_ms;
}
@ -211,14 +227,21 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
estimated_complete_time_ms = now_ms;
}
// Make sure that we have at least the playout delay.
uint32_t actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
if (min_playout_delay_ms_ == 0 && max_playout_delay_ms_ == 0) {
// Render as soon as possible
return now_ms;
}
// Make sure the actual delay stays in the range of |min_playout_delay_ms_|
// and |max_playout_delay_ms_|.
int actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
actual_delay = std::min(actual_delay, max_playout_delay_ms_);
return estimated_complete_time_ms + actual_delay;
}
// Must be called from inside a critical section.
int64_t VCMTiming::RequiredDecodeTimeMs() const {
const int64_t decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
int VCMTiming::RequiredDecodeTimeMs() const {
const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
assert(decode_time_ms >= 0);
return decode_time_ms;
}
@ -254,16 +277,14 @@ bool VCMTiming::EnoughTimeToDecode(
0;
}
uint32_t VCMTiming::TargetVideoDelay() const {
int VCMTiming::TargetVideoDelay() const {
CriticalSectionScoped cs(crit_sect_);
return TargetDelayInternal();
}
uint32_t VCMTiming::TargetDelayInternal() const {
int VCMTiming::TargetDelayInternal() const {
return std::max(min_playout_delay_ms_,
jitter_delay_ms_ +
static_cast<uint32_t>(RequiredDecodeTimeMs()) +
render_delay_ms_);
jitter_delay_ms_ + RequiredDecodeTimeMs() + render_delay_ms_);
}
void VCMTiming::GetTimings(int* decode_ms,
@ -275,7 +296,7 @@ void VCMTiming::GetTimings(int* decode_ms,
int* render_delay_ms) const {
CriticalSectionScoped cs(crit_sect_);
*decode_ms = last_decode_ms_;
*max_decode_ms = static_cast<int>(RequiredDecodeTimeMs());
*max_decode_ms = RequiredDecodeTimeMs();
*current_delay_ms = current_delay_ms_;
*target_delay_ms = TargetDelayInternal();
*jitter_buffer_ms = jitter_delay_ms_;

View File

@ -35,14 +35,23 @@ class VCMTiming {
void ResetDecodeTime();
// Set the amount of time needed to render an image. Defaults to 10 ms.
void set_render_delay(uint32_t render_delay_ms);
void set_render_delay(int render_delay_ms);
// Set the minimum time the video must be delayed on the receiver to
// get the desired jitter buffer level.
void SetJitterDelay(uint32_t required_delay_ms);
void SetJitterDelay(int required_delay_ms);
// Set the minimum playout delay required to sync video with audio.
void set_min_playout_delay(uint32_t min_playout_delay);
// Set the minimum playout delay from capture to render in ms.
void set_min_playout_delay(int min_playout_delay_ms);
// Returns the minimum playout delay from capture to render in ms.
int min_playout_delay();
// Set the maximum playout delay from capture to render in ms.
void set_max_playout_delay(int max_playout_delay_ms);
// Returns the maximum playout delay from capture to render in ms.
int max_playout_delay();
// Increases or decreases the current delay to get closer to the target delay.
// Calculates how long it has been since the previous call to this function,
@ -77,7 +86,7 @@ class VCMTiming {
// Returns the current target delay which is required delay + decode time +
// render delay.
uint32_t TargetVideoDelay() const;
int TargetVideoDelay() const;
// Calculates whether or not there is enough time to decode a frame given a
// certain amount of processing time.
@ -96,11 +105,10 @@ class VCMTiming {
enum { kDelayMaxChangeMsPerS = 100 };
protected:
int64_t RequiredDecodeTimeMs() const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
int RequiredDecodeTimeMs() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
uint32_t TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
int TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
private:
void UpdateHistograms() const;
@ -110,10 +118,16 @@ class VCMTiming {
bool master_ GUARDED_BY(crit_sect_);
TimestampExtrapolator* ts_extrapolator_ GUARDED_BY(crit_sect_);
std::unique_ptr<VCMCodecTimer> codec_timer_ GUARDED_BY(crit_sect_);
uint32_t render_delay_ms_ GUARDED_BY(crit_sect_);
uint32_t min_playout_delay_ms_ GUARDED_BY(crit_sect_);
uint32_t jitter_delay_ms_ GUARDED_BY(crit_sect_);
uint32_t current_delay_ms_ GUARDED_BY(crit_sect_);
int render_delay_ms_ GUARDED_BY(crit_sect_);
// Best-effort playout delay range for frames from capture to render.
// The receiver tries to keep the delay between |min_playout_delay_ms_|
// and |max_playout_delay_ms_| taking the network jitter into account.
// A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0,
// in which case the receiver tries to play the frames as they arrive.
int min_playout_delay_ms_ GUARDED_BY(crit_sect_);
int max_playout_delay_ms_ GUARDED_BY(crit_sect_);
int jitter_delay_ms_ GUARDED_BY(crit_sect_);
int current_delay_ms_ GUARDED_BY(crit_sect_);
int last_decode_ms_ GUARDED_BY(crit_sect_);
uint32_t prev_frame_timestamp_ GUARDED_BY(crit_sect_);

View File

@ -104,7 +104,7 @@ TEST(ReceiverTiming, Tests) {
clock.TimeInMilliseconds());
EXPECT_EQ(waitTime, jitterDelayMs);
uint32_t minTotalDelayMs = 200;
int minTotalDelayMs = 200;
timing.set_min_playout_delay(minTotalDelayMs);
clock.AdvanceTimeMilliseconds(5000);
timeStamp += 5 * 90000;