Set marker bit on last encoded spatial layer.

In order to handle per-layer frame dropping both VP9 encoder wrapper
and RTP packetizer were modified.

- Encoder wrapper buffers last encoded frame and passes it to
packetizer after frame of next layer is encoded or encoding of
superframe is finished.
- Encoder wrapper sets end_of_superframe flag on last encoded frame of
superframe before passing it to packetizer.
- If end_of_superframe is True then packetizer sets marker bit on last
packet of frame.

Bug: webrtc:9066
Change-Id: I1d45319fbe6bc63d01721ea67bfb7440d4c29275
Reviewed-on: https://webrtc-review.googlesource.com/65540
Commit-Queue: Sergey Silkin <ssilkin@webrtc.org>
Reviewed-by: Åsa Persson <asapersson@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22722}
This commit is contained in:
Sergey Silkin
2018-04-04 11:45:41 +02:00
committed by Commit Bot
parent e803dbe210
commit 2a1f183e99
9 changed files with 216 additions and 61 deletions

View File

@ -172,6 +172,7 @@ struct RTPVideoHeaderVP9 {
gof_idx = kNoGofIdx;
num_ref_pics = 0;
num_spatial_layers = 1;
end_of_superframe = true;
}
bool inter_pic_predicted; // This layer frame is dependent on previously
@ -208,6 +209,8 @@ struct RTPVideoHeaderVP9 {
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
bool end_of_superframe; // This frame is last frame in superframe.
};
} // namespace webrtc

View File

@ -262,4 +262,60 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
}
}
TEST_F(TestVp9Impl, EndOfSuperframe) {
const size_t num_spatial_layers = 2;
const size_t num_temporal_layers = 1;
codec_settings_.VP9()->numberOfSpatialLayers =
static_cast<unsigned char>(num_spatial_layers);
codec_settings_.VP9()->numberOfTemporalLayers =
static_cast<unsigned char>(num_temporal_layers);
std::vector<SpatialLayer> layers =
GetSvcConfig(codec_settings_.width, codec_settings_.height,
num_spatial_layers, num_temporal_layers);
for (size_t i = 0; i < layers.size(); ++i) {
codec_settings_.spatialLayers[i] = layers[i];
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
// Encode both base and upper layers. Check that end-of-superframe flag is
// set on upper layer frame but not on base layer frame.
BitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, layers[0].targetBitrate * 1000);
bitrate_allocation.SetBitrate(1, 0, layers[1].targetBitrate * 1000);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->SetRateAllocation(bitrate_allocation,
codec_settings_.maxFramerate));
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific;
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_superframe);
// Encode only base layer. Check that end-of-superframe flag is
// set on base layer frame.
bitrate_allocation.SetBitrate(1, 0, 0);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->SetRateAllocation(bitrate_allocation,
codec_settings_.maxFramerate));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
EXPECT_EQ(codec_specific[0].codecSpecific.VP9.spatial_idx, 0);
EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
}
} // namespace webrtc

View File

@ -586,6 +586,9 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
}
timestamp_ += duration;
const bool end_of_superframe = true;
DeliverBufferedFrame(end_of_superframe);
return WEBRTC_VIDEO_CODEC_OK;
}
@ -688,6 +691,14 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT);
if (pkt->data.frame.sz == 0) {
// Ignore dropped frame.
return WEBRTC_VIDEO_CODEC_OK;
}
const bool end_of_superframe = false;
DeliverBufferedFrame(end_of_superframe);
if (pkt->data.frame.sz > encoded_image_._size) {
delete[] encoded_image_._buffer;
encoded_image_._size = pkt->data.frame.sz;
@ -696,15 +707,6 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
encoded_image_._length = pkt->data.frame.sz;
// No data partitioning in VP9, so 1 partition only.
int part_idx = 0;
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader(1);
frag_info.fragmentationOffset[part_idx] = 0;
frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
if (is_flexible_mode_ && codec_.mode == kScreensharing)
@ -720,32 +722,47 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
}
RTC_DCHECK_LE(encoded_image_._length, encoded_image_._size);
CodecSpecificInfo codec_specific;
PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp());
memset(&codec_specific_, 0, sizeof(codec_specific_));
PopulateCodecSpecific(&codec_specific_, *pkt, input_image_->timestamp());
if (encoded_image_._length > 0) {
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
encoded_image_._timeStamp = input_image_->timestamp();
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == kScreensharing)
? VideoContentType::SCREENSHARE
: VideoContentType::UNSPECIFIED;
encoded_image_._encodedHeight =
pkt->data.frame.height[layer_id.spatial_layer_id];
encoded_image_._encodedWidth =
pkt->data.frame.width[layer_id.spatial_layer_id];
encoded_image_.timing_.flags = TimingFrameFlags::kInvalid;
int qp = -1;
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
encoded_image_.qp_ = qp;
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
encoded_image_._timeStamp = input_image_->timestamp();
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == kScreensharing)
? VideoContentType::SCREENSHARE
: VideoContentType::UNSPECIFIED;
encoded_image_._encodedHeight =
pkt->data.frame.height[layer_id.spatial_layer_id];
encoded_image_._encodedWidth =
pkt->data.frame.width[layer_id.spatial_layer_id];
encoded_image_.timing_.flags = TimingFrameFlags::kInvalid;
int qp = -1;
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
encoded_image_.qp_ = qp;
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific,
&frag_info);
}
return WEBRTC_VIDEO_CODEC_OK;
}
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_superframe) {
if (encoded_image_._length > 0) {
codec_specific_.codecSpecific.VP9.end_of_superframe = end_of_superframe;
// No data partitioning in VP9, so 1 partition only.
int part_idx = 0;
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader(1);
frag_info.fragmentationOffset[part_idx] = 0;
frag_info.fragmentationLength[part_idx] = encoded_image_._length;
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
&frag_info);
encoded_image_._length = 0;
}
}
vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
const SuperFrameRefSettings& settings) {
static const vpx_enc_frame_flags_t kAllFlags =

View File

@ -94,6 +94,8 @@ class VP9EncoderImpl : public VP9Encoder {
static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data);
void DeliverBufferedFrame(bool end_of_superframe);
// Determine maximum target for Intra frames
//
// Input:
@ -103,6 +105,7 @@ class VP9EncoderImpl : public VP9Encoder {
uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
EncodedImage encoded_image_;
CodecSpecificInfo codec_specific_;
EncodedImageCallback* encoded_complete_callback_;
VideoCodec codec_;
bool inited_;