Revert of Work on flexible mode and screen sharing. (patchset #28 id:520001 of https://codereview.webrtc.org/1328113004/ )

Reason for revert:
Seems to break VideoSendStreamTest.ReconfigureBitratesSetsEncoderBitratesCorrectly on Linux Memcheck buildbot.

Original issue's description:
> Work on flexible mode and screen sharing.
>
> Implement VP8 style screensharing but with spatial layers.
> Implement flexible mode.
>
> Files from other patches:
> generic_encoder.cc
> layer_filtering_transport.cc
>
> BUG=webrtc:4914
>
> Committed: https://crrev.com/77ccfb4d16c148e61a316746bb5d9705e8b39f4a
> Cr-Commit-Position: refs/heads/master@{#10572}

TBR=sprang@webrtc.org,stefan@webrtc.org,philipel@google.com,asapersson@webrtc.org,mflodman@webrtc.org,philipel@webrtc.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:4914

Review URL: https://codereview.webrtc.org/1438543002

Cr-Commit-Position: refs/heads/master@{#10578}
This commit is contained in:
terelius
2015-11-10 05:31:18 -08:00
committed by Commit bot
parent 3ed348707e
commit 0be8f1d347
20 changed files with 45 additions and 1120 deletions

View File

@ -27,7 +27,6 @@
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@ -77,12 +76,9 @@ VP9EncoderImpl::VP9EncoderImpl()
raw_(NULL),
input_image_(NULL),
tl0_pic_idx_(0),
frames_since_kf_(0),
gof_idx_(0),
num_temporal_layers_(0),
num_spatial_layers_(0),
frames_encoded_(0),
// Use two spatial when screensharing with flexible mode.
spatial_layer_(new ScreenshareLayersVP9(2)) {
num_spatial_layers_(0) {
memset(&codec_, 0, sizeof(codec_));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
@ -212,7 +208,6 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
}
config_->rc_target_bitrate = new_bitrate_kbit;
codec_.maxFramerate = new_framerate;
spatial_layer_->ConfigureBitrate(new_bitrate_kbit, 0);
if (!SetSvcRates()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
@ -251,7 +246,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
int retVal = Release();
if (retVal < 0) {
return retVal;
@ -330,13 +324,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
// TODO(asapersson): Check configuration of temporal switch up and increase
// pattern length.
is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode;
if (is_flexible_mode_) {
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
config_->ts_number_layers = num_temporal_layers_;
if (codec_.mode == kScreensharing)
spatial_layer_->ConfigureBitrate(inst->startBitrate, 0);
} else if (num_temporal_layers_ == 1) {
if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
@ -407,8 +395,7 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
// 1:2 scaling in each dimension.
svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
svc_internal_.svc_params.scaling_factor_den[i] = 256;
if (codec_.mode != kScreensharing)
scaling_factor_num /= 2;
scaling_factor_num /= 2;
}
}
@ -508,35 +495,12 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
vpx_enc_frame_flags_t flags = 0;
int flags = 0;
bool send_keyframe = (frame_type == kVideoFrameKey);
if (send_keyframe) {
// Key frame request from caller.
flags = VPX_EFLAG_FORCE_KF;
}
if (is_flexible_mode_) {
SuperFrameRefSettings settings;
// These structs are copied when calling vpx_codec_control,
// therefore it is ok for them to go out of scope.
vpx_svc_ref_frame_config enc_layer_conf;
vpx_svc_layer_id layer_id;
if (codec_.mode == kRealtimeVideo) {
// Real time video not yet implemented in flexible mode.
RTC_NOTREACHED();
} else {
settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(),
send_keyframe);
}
enc_layer_conf = GenerateRefsAndFlags(settings);
layer_id.temporal_layer_id = 0;
layer_id.spatial_layer_id = settings.start_layer;
vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
}
assert(codec_.maxFramerate > 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
@ -562,8 +526,9 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
!codec_.codecSpecific.VP9.flexibleMode)
? true
: false;
if (pkt.data.frame.flags & VPX_FRAME_IS_KEY)
frames_since_kf_ = 0;
if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
gof_idx_ = 0;
}
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
@ -586,18 +551,17 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
vp9_info->ss_data_available = false;
}
if (vp9_info->flexible_mode) {
vp9_info->gof_idx = kNoGofIdx;
} else {
vp9_info->gof_idx =
static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
}
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->temporal_up_switch = true;
bool is_first_frame = false;
if (is_flexible_mode_) {
is_first_frame =
layer_id.spatial_layer_id == spatial_layer_->GetStartLayer();
} else {
is_first_frame = layer_id.spatial_layer_id == 0;
}
if (is_first_frame) {
if (layer_id.spatial_layer_id == 0) {
picture_id_ = (picture_id_ + 1) & 0x7FFF;
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = false;
@ -618,20 +582,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
// Always populate this, so that the packetizer can properly set the marker
// bit.
vp9_info->num_spatial_layers = num_spatial_layers_;
vp9_info->num_ref_pics = 0;
if (vp9_info->flexible_mode) {
vp9_info->gof_idx = kNoGofIdx;
vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id];
for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) {
vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i];
}
} else {
vp9_info->gof_idx =
static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof);
}
++frames_since_kf_;
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
@ -667,14 +617,6 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
if (is_flexible_mode_ && codec_.mode == kScreensharing)
spatial_layer_->LayerFrameEncoded(
static_cast<unsigned int>(encoded_image_._length),
layer_id.spatial_layer_id);
assert(encoded_image_._length <= encoded_image_._size);
// End of frame.
@ -696,108 +638,6 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
return WEBRTC_VIDEO_CODEC_OK;
}
vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
const SuperFrameRefSettings& settings) {
static const vpx_enc_frame_flags_t kAllFlags =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
vpx_svc_ref_frame_config sf_conf = {};
if (settings.is_keyframe) {
// Used later on to make sure we don't make any invalid references.
memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_));
for (int layer = settings.start_layer; layer <= settings.stop_layer;
++layer) {
num_ref_pics_[layer] = 0;
buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_;
// When encoding a keyframe only the alt_fb_idx is used
// to specify which layer ends up in which buffer.
sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf;
}
} else {
for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer;
++layer_idx) {
vpx_enc_frame_flags_t layer_flags = kAllFlags;
num_ref_pics_[layer_idx] = 0;
int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
settings.layer[layer_idx].ref_buf2,
settings.layer[layer_idx].ref_buf3};
for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
if (refs[ref_idx] == -1)
continue;
RTC_DCHECK_GE(refs[ref_idx], 0);
RTC_DCHECK_LE(refs[ref_idx], 7);
// Easier to remove flags from all flags rather than having to
// build the flags from 0.
switch (num_ref_pics_[layer_idx]) {
case 0: {
sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
break;
}
case 1: {
sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx];
layer_flags &= ~VP8_EFLAG_NO_REF_GF;
break;
}
case 2: {
sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
break;
}
}
// Make sure we don't reference a buffer that hasn't been
// used at all or hasn't been used since a keyframe.
RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
num_ref_pics_[layer_idx]++;
}
bool upd_buf_same_as_a_ref = false;
if (settings.layer[layer_idx].upd_buf != -1) {
for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) {
switch (ref_idx) {
case 0: {
layer_flags &= ~VP8_EFLAG_NO_UPD_LAST;
break;
}
case 1: {
layer_flags &= ~VP8_EFLAG_NO_UPD_GF;
break;
}
case 2: {
layer_flags &= ~VP8_EFLAG_NO_UPD_ARF;
break;
}
}
upd_buf_same_as_a_ref = true;
break;
}
}
if (!upd_buf_same_as_a_ref) {
// If we have three references and a buffer is specified to be
// updated, then that buffer must be the same as one of the
// three references.
RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics);
sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf;
layer_flags ^= VP8_EFLAG_NO_UPD_ARF;
}
int updated_buffer = settings.layer[layer_idx].upd_buf;
buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
sf_conf.frame_flags[layer_idx] = layer_flags;
}
}
}
++frames_encoded_;
return sf_conf;
}
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}