Don't copy video frame metadata in each encoder/decoder
As this is handled higher up the pipeline in a single place for all encoders/decoders Bug: webrtc:10460 Change-Id: I95b0a69aecaf07283c8776ac0d7e85d097e3576b Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/139882 Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#28172}
This commit is contained in:

committed by
Commit Bot

parent
9930929303
commit
ab62b2ee51
@ -341,8 +341,6 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
VideoFrame decoded_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(decoded_buffer)
|
||||
.set_timestamp_rtp(input_image.Timestamp())
|
||||
.set_rotation(input_image.rotation_)
|
||||
.set_ntp_time_ms(input_image.ntp_time_ms_)
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
|
@ -501,15 +501,6 @@ int32_t H264EncoderImpl::Encode(
|
||||
encoded_images_[i]._encodedWidth = configurations_[i].width;
|
||||
encoded_images_[i]._encodedHeight = configurations_[i].height;
|
||||
encoded_images_[i].SetTimestamp(input_frame.timestamp());
|
||||
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
|
||||
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
|
||||
encoded_images_[i].rotation_ = input_frame.rotation();
|
||||
encoded_images_[i].SetColorSpace(input_frame.color_space());
|
||||
encoded_images_[i].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[i].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
|
||||
|
||||
|
@ -265,8 +265,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp,
|
||||
input_image.ColorSpace());
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
if (ret != 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (ret < 0 && propagation_cnt_ > 0)
|
||||
@ -282,12 +281,9 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int LibvpxVp8Decoder::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
if (img == NULL) {
|
||||
// Decoder OK and NULL image => No show frame
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -322,8 +318,6 @@ int LibvpxVp8Decoder::ReturnFrame(
|
||||
VideoFrame decoded_image = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_color_space(explicit_color_space)
|
||||
.build();
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
|
||||
|
@ -48,11 +48,7 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
||||
|
||||
private:
|
||||
class QpSmoother;
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timeStamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp, int qp);
|
||||
const bool use_postproc_arm_;
|
||||
|
||||
I420BufferPool buffer_pool_;
|
||||
|
@ -1135,15 +1135,6 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
}
|
||||
}
|
||||
encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
|
||||
encoded_images_[encoder_idx].capture_time_ms_ =
|
||||
input_image.render_time_ms();
|
||||
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
|
||||
encoded_images_[encoder_idx].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[encoder_idx].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
encoded_images_[encoder_idx].SetRetransmissionAllowed(
|
||||
retransmission_allowed);
|
||||
|
||||
|
@ -1446,20 +1446,13 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
|
||||
encoded_image_.SetTimestamp(input_image_->timestamp());
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_._encodedHeight =
|
||||
pkt->data.frame.height[layer_id.spatial_layer_id];
|
||||
encoded_image_._encodedWidth =
|
||||
pkt->data.frame.width[layer_id.spatial_layer_id];
|
||||
encoded_image_.timing_.flags = VideoSendTiming::kInvalid;
|
||||
int qp = -1;
|
||||
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
|
||||
encoded_image_.qp_ = qp;
|
||||
encoded_image_.SetColorSpace(input_image_->color_space());
|
||||
|
||||
if (full_superframe_drop_) {
|
||||
const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
|
||||
@ -1682,20 +1675,16 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_,
|
||||
qp, input_image.ColorSpace());
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int VP9DecoderImpl::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
if (img == nullptr) {
|
||||
// Decoder OK and nullptr image => No show frame.
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -1739,16 +1728,9 @@ int VP9DecoderImpl::ReturnFrame(
|
||||
|
||||
auto builder = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(img_wrapped_buffer)
|
||||
.set_timestamp_ms(0)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_rotation(webrtc::kVideoRotation_0);
|
||||
if (explicit_color_space) {
|
||||
builder.set_color_space(*explicit_color_space);
|
||||
} else {
|
||||
builder.set_color_space(
|
||||
ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
|
||||
}
|
||||
.set_color_space(ExtractVP9ColorSpace(img->cs, img->range,
|
||||
img->bit_depth));
|
||||
|
||||
VideoFrame decoded_image = builder.build();
|
||||
|
||||
|
@ -194,11 +194,7 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
private:
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timestamp, int qp);
|
||||
|
||||
// Memory pool used to share buffers between libvpx and webrtc.
|
||||
Vp9FrameBufferPool frame_buffer_pool_;
|
||||
|
@ -126,15 +126,10 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
||||
// Write a counter to the image to make each frame unique.
|
||||
WriteCounter(encoded.data() + frame_info.layers[i].size - 4, counter);
|
||||
encoded.SetTimestamp(input_image.timestamp());
|
||||
encoded.capture_time_ms_ = input_image.render_time_ms();
|
||||
encoded._frameType = frame_info.keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
encoded._encodedWidth = simulcast_streams[i].width;
|
||||
encoded._encodedHeight = simulcast_streams[i].height;
|
||||
encoded.rotation_ = input_image.rotation();
|
||||
encoded.content_type_ = (mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded.SetSpatialIndex(i);
|
||||
CodecSpecificInfo codec_specific;
|
||||
std::unique_ptr<RTPFragmentationHeader> fragmentation =
|
||||
|
Reference in New Issue
Block a user