Reland of Work on flexible mode and screen sharing. (patchset #1 id:1 of https://codereview.webrtc.org/1438543002/ )
Reason for revert: Failed test not related to this CL (test fails on master at an earlier date), re-landing original CL.. (This time from my @webrtc account.) Original issue's description: > Revert of Work on flexible mode and screen sharing. (patchset #28 id:520001 of https://codereview.webrtc.org/1328113004/ ) > > Reason for revert: > Seems to break VideoSendStreamTest.ReconfigureBitratesSetsEncoderBitratesCorrectly on Linux Memcheck buildbot. > > Original issue's description: > > Work on flexible mode and screen sharing. > > > > Implement VP8 style screensharing but with spatial layers. > > Implement flexible mode. > > > > Files from other patches: > > generic_encoder.cc > > layer_filtering_transport.cc > > > > BUG=webrtc:4914 > > > > Committed: https://crrev.com/77ccfb4d16c148e61a316746bb5d9705e8b39f4a > > Cr-Commit-Position: refs/heads/master@{#10572} > > TBR=sprang@webrtc.org,stefan@webrtc.org,philipel@google.com,asapersson@webrtc.org,mflodman@webrtc.org,philipel@webrtc.org > NOPRESUBMIT=true > NOTREECHECKS=true > NOTRY=true > BUG=webrtc:4914 > > Committed: https://crrev.com/0be8f1d347bdb171462df89c2a4c69b3f3eb7519 > Cr-Commit-Position: refs/heads/master@{#10578} TBR=sprang@webrtc.org,stefan@webrtc.org,philipel@google.com,asapersson@webrtc.org,mflodman@webrtc.org,terelius@webrtc.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=webrtc:4914 Review URL: https://codereview.webrtc.org/1431283002 Cr-Commit-Position: refs/heads/master@{#10581}
This commit is contained in:
@ -24,7 +24,9 @@ VCMDecodingState::VCMDecodingState()
|
||||
temporal_id_(kNoTemporalIdx),
|
||||
tl0_pic_id_(kNoTl0PicIdx),
|
||||
full_sync_(true),
|
||||
in_initial_state_(true) {}
|
||||
in_initial_state_(true) {
|
||||
memset(frame_decoded_, 0, sizeof(frame_decoded_));
|
||||
}
|
||||
|
||||
VCMDecodingState::~VCMDecodingState() {}
|
||||
|
||||
@ -37,6 +39,7 @@ void VCMDecodingState::Reset() {
|
||||
tl0_pic_id_ = kNoTl0PicIdx;
|
||||
full_sync_ = true;
|
||||
in_initial_state_ = true;
|
||||
memset(frame_decoded_, 0, sizeof(frame_decoded_));
|
||||
}
|
||||
|
||||
uint32_t VCMDecodingState::time_stamp() const {
|
||||
@ -63,12 +66,33 @@ bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
|
||||
|
||||
void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
|
||||
assert(frame != NULL && frame->GetHighSeqNum() >= 0);
|
||||
UpdateSyncState(frame);
|
||||
if (!UsingFlexibleMode(frame))
|
||||
UpdateSyncState(frame);
|
||||
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
|
||||
time_stamp_ = frame->TimeStamp();
|
||||
picture_id_ = frame->PictureId();
|
||||
temporal_id_ = frame->TemporalId();
|
||||
tl0_pic_id_ = frame->Tl0PicId();
|
||||
|
||||
if (UsingFlexibleMode(frame)) {
|
||||
uint16_t frame_index = picture_id_ % kFrameDecodedLength;
|
||||
if (in_initial_state_) {
|
||||
frame_decoded_cleared_to_ = frame_index;
|
||||
} else if (frame->FrameType() == kVideoFrameKey) {
|
||||
memset(frame_decoded_, 0, sizeof(frame_decoded_));
|
||||
frame_decoded_cleared_to_ = frame_index;
|
||||
} else {
|
||||
if (AheadOfFramesDecodedClearedTo(frame_index)) {
|
||||
while (frame_decoded_cleared_to_ != frame_index) {
|
||||
frame_decoded_cleared_to_ =
|
||||
(frame_decoded_cleared_to_ + 1) % kFrameDecodedLength;
|
||||
frame_decoded_[frame_decoded_cleared_to_] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
frame_decoded_[frame_index] = true;
|
||||
}
|
||||
|
||||
in_initial_state_ = false;
|
||||
}
|
||||
|
||||
@ -80,6 +104,8 @@ void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
|
||||
tl0_pic_id_ = state.tl0_pic_id_;
|
||||
full_sync_ = state.full_sync_;
|
||||
in_initial_state_ = state.in_initial_state_;
|
||||
frame_decoded_cleared_to_ = state.frame_decoded_cleared_to_;
|
||||
memcpy(frame_decoded_, state.frame_decoded_, sizeof(frame_decoded_));
|
||||
}
|
||||
|
||||
bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
|
||||
@ -173,7 +199,11 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
|
||||
if (!full_sync_ && !frame->LayerSync())
|
||||
return false;
|
||||
if (UsingPictureId(frame)) {
|
||||
return ContinuousPictureId(frame->PictureId());
|
||||
if (UsingFlexibleMode(frame)) {
|
||||
return ContinuousFrameRefs(frame);
|
||||
} else {
|
||||
return ContinuousPictureId(frame->PictureId());
|
||||
}
|
||||
} else {
|
||||
return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
|
||||
}
|
||||
@ -216,8 +246,41 @@ bool VCMDecodingState::ContinuousLayer(int temporal_id,
|
||||
return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousFrameRefs(const VCMFrameBuffer* frame) const {
|
||||
uint8_t num_refs = frame->CodecSpecific()->codecSpecific.VP9.num_ref_pics;
|
||||
for (uint8_t r = 0; r < num_refs; ++r) {
|
||||
uint16_t frame_ref = frame->PictureId() -
|
||||
frame->CodecSpecific()->codecSpecific.VP9.p_diff[r];
|
||||
uint16_t frame_index = frame_ref % kFrameDecodedLength;
|
||||
if (AheadOfFramesDecodedClearedTo(frame_index) ||
|
||||
!frame_decoded_[frame_index]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
|
||||
return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
|
||||
return frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
|
||||
frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
|
||||
}
|
||||
|
||||
// TODO(philipel): change how check work, this check practially
|
||||
// limits the max p_diff to 64.
|
||||
bool VCMDecodingState::AheadOfFramesDecodedClearedTo(uint16_t index) const {
|
||||
// No way of knowing for sure if we are actually ahead of
|
||||
// frame_decoded_cleared_to_. We just make the assumption
|
||||
// that we are not trying to reference back to a very old
|
||||
// index, but instead are referencing a newer index.
|
||||
uint16_t diff =
|
||||
index > frame_decoded_cleared_to_
|
||||
? kFrameDecodedLength - (index - frame_decoded_cleared_to_)
|
||||
: frame_decoded_cleared_to_ - index;
|
||||
return diff > kFrameDecodedLength / 2;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -21,6 +21,11 @@ class VCMPacket;
|
||||
|
||||
class VCMDecodingState {
|
||||
public:
|
||||
// The max number of bits used to reference back
|
||||
// to a previous frame when using flexible mode.
|
||||
static const uint16_t kNumRefBits = 7;
|
||||
static const uint16_t kFrameDecodedLength = 1 << kNumRefBits;
|
||||
|
||||
VCMDecodingState();
|
||||
~VCMDecodingState();
|
||||
// Check for old frame
|
||||
@ -52,7 +57,10 @@ class VCMDecodingState {
|
||||
bool ContinuousPictureId(int picture_id) const;
|
||||
bool ContinuousSeqNum(uint16_t seq_num) const;
|
||||
bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
|
||||
bool ContinuousFrameRefs(const VCMFrameBuffer* frame) const;
|
||||
bool UsingPictureId(const VCMFrameBuffer* frame) const;
|
||||
bool UsingFlexibleMode(const VCMFrameBuffer* frame) const;
|
||||
bool AheadOfFramesDecodedClearedTo(uint16_t index) const;
|
||||
|
||||
// Keep state of last decoded frame.
|
||||
// TODO(mikhal/stefan): create designated classes to handle these types.
|
||||
@ -63,6 +71,10 @@ class VCMDecodingState {
|
||||
int tl0_pic_id_;
|
||||
bool full_sync_; // Sync flag when temporal layers are used.
|
||||
bool in_initial_state_;
|
||||
|
||||
// Used to check references in flexible mode.
|
||||
bool frame_decoded_[kFrameDecodedLength];
|
||||
uint16_t frame_decoded_cleared_to_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -446,4 +446,254 @@ TEST(TestDecodingState, PictureIdRepeat) {
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
|
||||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
packet.isFirstPacket = true;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 0xffff;
|
||||
uint8_t data[] = "I need a data pointer for this test!";
|
||||
packet.sizeBytes = sizeof(data);
|
||||
packet.dataPtr = data;
|
||||
packet.codecSpecificHeader.codec = kRtpVideoVp9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
FrameData frame_data;
|
||||
frame_data.rtt_ms = 0;
|
||||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Key frame again
|
||||
vp9_hdr.picture_id = 11;
|
||||
frame.Reset();
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to 11, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 12;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
|
||||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
packet.isFirstPacket = true;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 0xffff;
|
||||
uint8_t data[] = "I need a data pointer for this test!";
|
||||
packet.sizeBytes = sizeof(data);
|
||||
packet.dataPtr = data;
|
||||
packet.codecSpecificHeader.codec = kRtpVideoVp9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
FrameData frame_data;
|
||||
frame_data.rtt_ms = 0;
|
||||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to 10, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 15;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 5;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Out of order, last id 15, this id 12, ref to 10, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 12;
|
||||
vp9_hdr.pid_diff[0] = 2;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref 10, 12, 15, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 20;
|
||||
vp9_hdr.num_ref_pics = 3;
|
||||
vp9_hdr.pid_diff[0] = 10;
|
||||
vp9_hdr.pid_diff[1] = 8;
|
||||
vp9_hdr.pid_diff[2] = 5;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
||||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
packet.isFirstPacket = true;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 0xffff;
|
||||
uint8_t data[] = "I need a data pointer for this test!";
|
||||
packet.sizeBytes = sizeof(data);
|
||||
packet.dataPtr = data;
|
||||
packet.codecSpecificHeader.codec = kRtpVideoVp9;
|
||||
|
||||
RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
|
||||
vp9_hdr.picture_id = 10;
|
||||
vp9_hdr.flexible_mode = true;
|
||||
|
||||
FrameData frame_data;
|
||||
frame_data.rtt_ms = 0;
|
||||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Delta frame as first frame
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Key frame then delta frame
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
dec_state.SetState(&frame);
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.picture_id = 15;
|
||||
vp9_hdr.pid_diff[0] = 5;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to 11, not continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 16;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Ref to 15, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 16;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to 11 and 15, not continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 20;
|
||||
vp9_hdr.num_ref_pics = 2;
|
||||
vp9_hdr.pid_diff[0] = 9;
|
||||
vp9_hdr.pid_diff[1] = 5;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Ref to 10, 15 and 16, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 22;
|
||||
vp9_hdr.num_ref_pics = 3;
|
||||
vp9_hdr.pid_diff[0] = 12;
|
||||
vp9_hdr.pid_diff[1] = 7;
|
||||
vp9_hdr.pid_diff[2] = 6;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Key Frame, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
|
||||
vp9_hdr.num_ref_pics = 0;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Frame at last index, ref to KF, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Frame after wrapping buffer length, ref to last index, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 0;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Frame after wrapping start frame, ref to 0, continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 20;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 20;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Frame after wrapping start frame, ref to 10, not continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 23;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 13;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Key frame, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
vp9_hdr.picture_id = 25;
|
||||
vp9_hdr.num_ref_pics = 0;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to KF, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 26;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to frame previous to KF, not continuous
|
||||
frame.Reset();
|
||||
vp9_hdr.picture_id = 30;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 30;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -147,6 +147,12 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
|
||||
header->codecHeader.VP9.inter_pic_predicted;
|
||||
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
|
||||
header->codecHeader.VP9.flexible_mode;
|
||||
_codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
|
||||
header->codecHeader.VP9.num_ref_pics;
|
||||
for (uint8_t r = 0; r < header->codecHeader.VP9.num_ref_pics; ++r) {
|
||||
_codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
|
||||
header->codecHeader.VP9.pid_diff[r];
|
||||
}
|
||||
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
|
||||
header->codecHeader.VP9.ss_data_available;
|
||||
if (header->codecHeader.VP9.picture_id != kNoPictureId) {
|
||||
|
||||
@ -54,11 +54,9 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
rtp->codecHeader.VP9.inter_layer_predicted =
|
||||
info->codecSpecific.VP9.inter_layer_predicted;
|
||||
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
|
||||
|
||||
// Packetizer needs to know the number of spatial layers to correctly set
|
||||
// the marker bit, even when the number won't be written in the packet.
|
||||
rtp->codecHeader.VP9.num_spatial_layers =
|
||||
info->codecSpecific.VP9.num_spatial_layers;
|
||||
|
||||
if (info->codecSpecific.VP9.ss_data_available) {
|
||||
rtp->codecHeader.VP9.spatial_layer_resolution_present =
|
||||
info->codecSpecific.VP9.spatial_layer_resolution_present;
|
||||
@ -71,6 +69,10 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
}
|
||||
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
|
||||
}
|
||||
|
||||
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
|
||||
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
|
||||
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264:
|
||||
|
||||
@ -686,12 +686,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
|
||||
num_consecutive_old_packets_ = 0;
|
||||
|
||||
if (packet.codec == kVideoCodecVP9 &&
|
||||
packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
|
||||
// TODO(asapersson): Add support for flexible mode.
|
||||
return kGeneralError;
|
||||
}
|
||||
|
||||
VCMFrameBuffer* frame;
|
||||
FrameList* frame_list;
|
||||
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
|
||||
|
||||
Reference in New Issue
Block a user