Revert "Copy video frames metadata between encoded and plain frames in one place"
This reverts commit 00d0a0a1a9520fb4323d7e3a1c02133b7b942978. Reason for revert: Breaks downstream tests Original change's description: > Copy video frames metadata between encoded and plain frames in one place > > Currently some video frames metadata like rotation or ntp timestamps are > copied in every encoder and decoder separately. This CL makes copying to > happen at a single place for send or receive side. This will make it > easier to add new metadata in the future. > > Also, added some missing tests. > > Bug: webrtc:10460 > Change-Id: Ia49072c3041e75433f125a61050d2982b2bec1da > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/133346 > Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org> > Reviewed-by: Johannes Kron <kron@webrtc.org> > Reviewed-by: Erik Språng <sprang@webrtc.org> > Cr-Commit-Position: refs/heads/master@{#27719} TBR=ilnik@webrtc.org,sprang@webrtc.org,kron@webrtc.org Change-Id: I8960a6cc15e552925129ba0037f197ff3fd93c25 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: webrtc:10460 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/134100 Reviewed-by: Artem Titarenko <artit@webrtc.org> Commit-Queue: Artem Titarenko <artit@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27737}
This commit is contained in:
committed by
Commit Bot
parent
eb415cd482
commit
84ae2b6efd
@ -125,6 +125,8 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
static_cast<void*>(absl::make_unique<VideoFrame>(
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(frame_buffer)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.set_timestamp_us(0)
|
||||
.build())
|
||||
.release()),
|
||||
0);
|
||||
@ -283,11 +285,16 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlaneIndex], i420_buffer->DataU());
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlaneIndex], i420_buffer->DataV());
|
||||
|
||||
const ColorSpace& color_space = ExtractH264ColorSpace(av_context_.get());
|
||||
// Pass on color space from input frame if explicitly specified.
|
||||
const ColorSpace& color_space =
|
||||
input_image.ColorSpace() ? *input_image.ColorSpace()
|
||||
: ExtractH264ColorSpace(av_context_.get());
|
||||
VideoFrame decoded_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_timestamp_us(input_frame->timestamp_us())
|
||||
.set_timestamp_rtp(input_image.Timestamp())
|
||||
.set_rotation(input_frame->rotation())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
@ -312,7 +319,9 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
VideoFrame cropped_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(cropped_buf)
|
||||
.set_timestamp_ms(decoded_frame.render_time_ms())
|
||||
.set_timestamp_rtp(decoded_frame.timestamp())
|
||||
.set_rotation(decoded_frame.rotation())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
// TODO(nisse): Timestamp and rotation are all zero here. Change decoder
|
||||
|
||||
@ -494,6 +494,15 @@ int32_t H264EncoderImpl::Encode(
|
||||
encoded_images_[i]._encodedWidth = configurations_[i].width;
|
||||
encoded_images_[i]._encodedHeight = configurations_[i].height;
|
||||
encoded_images_[i].SetTimestamp(input_frame.timestamp());
|
||||
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
|
||||
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
|
||||
encoded_images_[i].rotation_ = input_frame.rotation();
|
||||
encoded_images_[i].SetColorSpace(input_frame.color_space());
|
||||
encoded_images_[i].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[i].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
|
||||
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "common_video/test/utilities.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||
@ -48,9 +49,17 @@ class TestH264Impl : public VideoCodecUnitTest {
|
||||
#ifdef WEBRTC_USE_H264
|
||||
#define MAYBE_EncodeDecode EncodeDecode
|
||||
#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
|
||||
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||
EncodedColorSpaceEqualsInputColorSpace
|
||||
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||
DecodedColorSpaceEqualsEncodedColorSpace
|
||||
#else
|
||||
#define MAYBE_EncodeDecode DISABLED_EncodeDecode
|
||||
#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
|
||||
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||
DISABLED_EncodedColorSpaceEqualsInputColorSpace
|
||||
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||
DISABLED_DecodedColorSpaceEqualsEncodedColorSpace
|
||||
#endif
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
||||
@ -96,4 +105,45 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||
}
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
VideoFrame input_frame_w_color_space =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(input_frame_w_color_space, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// Add color space to encoded frame.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -265,7 +265,8 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp,
|
||||
input_image.ColorSpace());
|
||||
if (ret != 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (ret < 0 && propagation_cnt_ > 0)
|
||||
@ -281,9 +282,12 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
int LibvpxVp8Decoder::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
if (img == NULL) {
|
||||
// Decoder OK and NULL image => No show frame
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -318,6 +322,8 @@ int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
|
||||
VideoFrame decoded_image = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_color_space(explicit_color_space)
|
||||
.build();
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
|
||||
|
||||
@ -48,7 +48,11 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
||||
|
||||
private:
|
||||
class QpSmoother;
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp, int qp);
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timeStamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
const bool use_postproc_arm_;
|
||||
|
||||
I420BufferPool buffer_pool_;
|
||||
|
||||
@ -1073,6 +1073,15 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
}
|
||||
}
|
||||
encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
|
||||
encoded_images_[encoder_idx].capture_time_ms_ =
|
||||
input_image.render_time_ms();
|
||||
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
|
||||
encoded_images_[encoder_idx].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[encoder_idx].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
|
||||
if (send_stream_[stream_idx]) {
|
||||
if (encoded_images_[encoder_idx].size() > 0) {
|
||||
|
||||
@ -227,10 +227,51 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
|
||||
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
|
||||
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
|
||||
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
|
||||
}
|
||||
|
||||
// We only test the encoder here, since the decoded frame rotation is set based
|
||||
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||
// in the same way as done in the encoder.
|
||||
TEST_F(TestVp8Impl, EncodedRotationEqualsInputRotation) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_0);
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||
|
||||
input_frame->set_rotation(kVideoRotation_90);
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||
// Video frame without explicit color space information.
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
VideoFrame input_frame_w_color_space =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
EncodeAndWaitForFrame(input_frame_w_color_space, &encoded_frame,
|
||||
&codec_specific_info);
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
@ -249,6 +290,24 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
|
||||
codec_settings_.numberOfSimulcastStreams = 2;
|
||||
// Resolutions are not in ascending order, temporal layers do not match.
|
||||
@ -343,6 +402,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
|
||||
EXPECT_EQ(kTestNtpTimeMs, decoded_frame->ntp_time_ms());
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
#include "api/video/color_space.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "common_video/test/utilities.h"
|
||||
#include "media/base/vp9_profile.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
|
||||
@ -145,7 +146,50 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
||||
color_space.chroma_siting_vertical());
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) {
|
||||
// We only test the encoder here, since the decoded frame rotation is set based
|
||||
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||
// in the same way as done in the encoder.
|
||||
TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_0);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||
|
||||
input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_90);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||
// Video frame without explicit color space information.
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||
VideoFrame input_frame_w_hdr =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(input_frame_w_hdr, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
@ -162,6 +206,15 @@ TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) {
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
// No HDR metadata present.
|
||||
EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
|
||||
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||
|
||||
@ -1416,14 +1416,20 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
|
||||
encoded_image_.SetTimestamp(input_image_->timestamp());
|
||||
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_._encodedHeight =
|
||||
pkt->data.frame.height[layer_id.spatial_layer_id];
|
||||
encoded_image_._encodedWidth =
|
||||
pkt->data.frame.width[layer_id.spatial_layer_id];
|
||||
encoded_image_.timing_.flags = VideoSendTiming::kInvalid;
|
||||
int qp = -1;
|
||||
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
|
||||
encoded_image_.qp_ = qp;
|
||||
encoded_image_.SetColorSpace(input_image_->color_space());
|
||||
|
||||
if (full_superframe_drop_) {
|
||||
const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
|
||||
@ -1637,16 +1643,20 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_,
|
||||
qp, input_image.ColorSpace());
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
int VP9DecoderImpl::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
if (img == nullptr) {
|
||||
// Decoder OK and nullptr image => No show frame.
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -1690,9 +1700,16 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
|
||||
auto builder = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(img_wrapped_buffer)
|
||||
.set_timestamp_ms(0)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_color_space(ExtractVP9ColorSpace(img->cs, img->range,
|
||||
img->bit_depth));
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_rotation(webrtc::kVideoRotation_0);
|
||||
if (explicit_color_space) {
|
||||
builder.set_color_space(*explicit_color_space);
|
||||
} else {
|
||||
builder.set_color_space(
|
||||
ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
|
||||
}
|
||||
|
||||
VideoFrame decoded_image = builder.build();
|
||||
|
||||
|
||||
@ -193,7 +193,11 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
private:
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timestamp, int qp);
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
|
||||
// Memory pool used to share buffers between libvpx and webrtc.
|
||||
Vp9FrameBufferPool frame_buffer_pool_;
|
||||
|
||||
@ -52,10 +52,8 @@ class VCMEncodedFrame : protected EncodedImage {
|
||||
return static_cast<const webrtc::EncodedImage&>(*this);
|
||||
}
|
||||
|
||||
using EncodedImage::ColorSpace;
|
||||
using EncodedImage::data;
|
||||
using EncodedImage::set_size;
|
||||
using EncodedImage::SetColorSpace;
|
||||
using EncodedImage::SetSpatialIndex;
|
||||
using EncodedImage::SetTimestamp;
|
||||
using EncodedImage::size;
|
||||
|
||||
@ -77,12 +77,6 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
frameInfo = _timestampMap.Pop(decodedImage.timestamp());
|
||||
}
|
||||
|
||||
decodedImage.set_ntp_time_ms(frameInfo->ntp_time_ms);
|
||||
if (frameInfo->color_space) {
|
||||
decodedImage.set_color_space(*frameInfo->color_space);
|
||||
}
|
||||
decodedImage.set_rotation(frameInfo->rotation);
|
||||
|
||||
if (frameInfo == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
|
||||
"this one.";
|
||||
@ -149,6 +143,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
|
||||
decodedImage.set_timestamp_us(frameInfo->renderTimeMs *
|
||||
rtc::kNumMicrosecsPerMillisec);
|
||||
decodedImage.set_rotation(frameInfo->rotation);
|
||||
_receiveCallback->FrameToRender(decodedImage, qp, frameInfo->content_type);
|
||||
}
|
||||
|
||||
@ -217,9 +212,6 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
|
||||
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
|
||||
_frameInfos[_nextFrameInfoIdx].ntp_time_ms =
|
||||
frame.EncodedImage().ntp_time_ms_;
|
||||
_frameInfos[_nextFrameInfoIdx].color_space = frame.ColorSpace();
|
||||
// Set correctly only for key frames. Thus, use latest key frame
|
||||
// content type. If the corresponding key frame was lost, decode will fail
|
||||
// and content type will be ignored.
|
||||
|
||||
@ -34,8 +34,6 @@ struct VCMFrameInformation {
|
||||
VideoRotation rotation;
|
||||
VideoContentType content_type;
|
||||
EncodedImage::Timing timing;
|
||||
int64_t ntp_time_ms;
|
||||
const ColorSpace* color_space;
|
||||
};
|
||||
|
||||
class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||
|
||||
Reference in New Issue
Block a user