Copy video frames metadata between encoded and plain frames in one place

Currently some video frames metadata like rotation or ntp timestamps are
copied in every encoder and decoder separately. This CL makes copying to
happen at a single place for send or receive side. This will make it
easier to add new metadata in the future.

Also, added some missing tests.

Bug: webrtc:10460
Change-Id: Ia49072c3041e75433f125a61050d2982b2bec1da
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/133346
Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org>
Reviewed-by: Johannes Kron <kron@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27719}
This commit is contained in:
Ilya Nikolaevskiy
2019-04-23 14:55:33 +02:00
committed by Commit Bot
parent 59b64d32fc
commit 00d0a0a1a9
23 changed files with 381 additions and 365 deletions

View File

@ -1416,20 +1416,14 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
? VideoContentType::SCREENSHARE
: VideoContentType::UNSPECIFIED;
encoded_image_._encodedHeight =
pkt->data.frame.height[layer_id.spatial_layer_id];
encoded_image_._encodedWidth =
pkt->data.frame.width[layer_id.spatial_layer_id];
encoded_image_.timing_.flags = VideoSendTiming::kInvalid;
int qp = -1;
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
encoded_image_.qp_ = qp;
encoded_image_.SetColorSpace(input_image_->color_space());
if (full_superframe_drop_) {
const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
@ -1643,20 +1637,16 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
int ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_,
qp, input_image.ColorSpace());
int ret = ReturnFrame(img, input_image.Timestamp(), qp);
if (ret != 0) {
return ret;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int VP9DecoderImpl::ReturnFrame(
const vpx_image_t* img,
uint32_t timestamp,
int64_t ntp_time_ms,
int qp,
const webrtc::ColorSpace* explicit_color_space) {
int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
uint32_t timestamp,
int qp) {
if (img == nullptr) {
// Decoder OK and nullptr image => No show frame.
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
@ -1700,16 +1690,9 @@ int VP9DecoderImpl::ReturnFrame(
auto builder = VideoFrame::Builder()
.set_video_frame_buffer(img_wrapped_buffer)
.set_timestamp_ms(0)
.set_timestamp_rtp(timestamp)
.set_ntp_time_ms(ntp_time_ms)
.set_rotation(webrtc::kVideoRotation_0);
if (explicit_color_space) {
builder.set_color_space(*explicit_color_space);
} else {
builder.set_color_space(
ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
}
.set_color_space(ExtractVP9ColorSpace(img->cs, img->range,
img->bit_depth));
VideoFrame decoded_image = builder.build();