Copy video frames metadata between encoded and plain frames in one place
Currently some video frames metadata like rotation or ntp timestamps are copied in every encoder and decoder separately. This CL makes copying to happen at a single place for send or receive side. This will make it easier to add new metadata in the future. Also, added some missing tests. Bug: webrtc:10460 Change-Id: Ia49072c3041e75433f125a61050d2982b2bec1da Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/133346 Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Johannes Kron <kron@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27719}
This commit is contained in:
committed by
Commit Bot
parent
59b64d32fc
commit
00d0a0a1a9
@ -125,8 +125,6 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
static_cast<void*>(absl::make_unique<VideoFrame>(
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(frame_buffer)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.set_timestamp_us(0)
|
||||
.build())
|
||||
.release()),
|
||||
0);
|
||||
@ -285,16 +283,11 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlaneIndex], i420_buffer->DataU());
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlaneIndex], i420_buffer->DataV());
|
||||
|
||||
// Pass on color space from input frame if explicitly specified.
|
||||
const ColorSpace& color_space =
|
||||
input_image.ColorSpace() ? *input_image.ColorSpace()
|
||||
: ExtractH264ColorSpace(av_context_.get());
|
||||
const ColorSpace& color_space = ExtractH264ColorSpace(av_context_.get());
|
||||
VideoFrame decoded_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_timestamp_us(input_frame->timestamp_us())
|
||||
.set_timestamp_rtp(input_image.Timestamp())
|
||||
.set_rotation(input_frame->rotation())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
@ -319,9 +312,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
VideoFrame cropped_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(cropped_buf)
|
||||
.set_timestamp_ms(decoded_frame.render_time_ms())
|
||||
.set_timestamp_rtp(decoded_frame.timestamp())
|
||||
.set_rotation(decoded_frame.rotation())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
// TODO(nisse): Timestamp and rotation are all zero here. Change decoder
|
||||
|
||||
@ -494,15 +494,6 @@ int32_t H264EncoderImpl::Encode(
|
||||
encoded_images_[i]._encodedWidth = configurations_[i].width;
|
||||
encoded_images_[i]._encodedHeight = configurations_[i].height;
|
||||
encoded_images_[i].SetTimestamp(input_frame.timestamp());
|
||||
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
|
||||
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
|
||||
encoded_images_[i].rotation_ = input_frame.rotation();
|
||||
encoded_images_[i].SetColorSpace(input_frame.color_space());
|
||||
encoded_images_[i].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[i].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
|
||||
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "common_video/test/utilities.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||
@ -49,17 +48,9 @@ class TestH264Impl : public VideoCodecUnitTest {
|
||||
#ifdef WEBRTC_USE_H264
|
||||
#define MAYBE_EncodeDecode EncodeDecode
|
||||
#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
|
||||
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||
EncodedColorSpaceEqualsInputColorSpace
|
||||
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||
DecodedColorSpaceEqualsEncodedColorSpace
|
||||
#else
|
||||
#define MAYBE_EncodeDecode DISABLED_EncodeDecode
|
||||
#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
|
||||
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||
DISABLED_EncodedColorSpaceEqualsInputColorSpace
|
||||
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||
DISABLED_DecodedColorSpaceEqualsEncodedColorSpace
|
||||
#endif
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
||||
@ -105,45 +96,4 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||
}
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
VideoFrame input_frame_w_color_space =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(input_frame_w_color_space, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// Add color space to encoded frame.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -265,8 +265,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp,
|
||||
input_image.ColorSpace());
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
if (ret != 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (ret < 0 && propagation_cnt_ > 0)
|
||||
@ -282,12 +281,9 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int LibvpxVp8Decoder::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
if (img == NULL) {
|
||||
// Decoder OK and NULL image => No show frame
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -322,8 +318,6 @@ int LibvpxVp8Decoder::ReturnFrame(
|
||||
VideoFrame decoded_image = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_color_space(explicit_color_space)
|
||||
.build();
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
|
||||
|
||||
@ -48,11 +48,7 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
||||
|
||||
private:
|
||||
class QpSmoother;
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timeStamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp, int qp);
|
||||
const bool use_postproc_arm_;
|
||||
|
||||
I420BufferPool buffer_pool_;
|
||||
|
||||
@ -1068,15 +1068,6 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
}
|
||||
}
|
||||
encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
|
||||
encoded_images_[encoder_idx].capture_time_ms_ =
|
||||
input_image.render_time_ms();
|
||||
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
|
||||
encoded_images_[encoder_idx].content_type_ =
|
||||
(codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[encoder_idx].timing_.flags = VideoSendTiming::kInvalid;
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
|
||||
if (send_stream_[stream_idx]) {
|
||||
if (encoded_images_[encoder_idx].size() > 0) {
|
||||
|
||||
@ -227,51 +227,10 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
|
||||
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
|
||||
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
|
||||
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
|
||||
}
|
||||
|
||||
// We only test the encoder here, since the decoded frame rotation is set based
|
||||
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||
// in the same way as done in the encoder.
|
||||
TEST_F(TestVp8Impl, EncodedRotationEqualsInputRotation) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_0);
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||
|
||||
input_frame->set_rotation(kVideoRotation_90);
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||
// Video frame without explicit color space information.
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
VideoFrame input_frame_w_color_space =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
EncodeAndWaitForFrame(input_frame_w_color_space, &encoded_frame,
|
||||
&codec_specific_info);
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
@ -290,24 +249,6 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
|
||||
codec_settings_.numberOfSimulcastStreams = 2;
|
||||
// Resolutions are not in ascending order, temporal layers do not match.
|
||||
@ -402,7 +343,6 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
|
||||
EXPECT_EQ(kTestNtpTimeMs, decoded_frame->ntp_time_ms());
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
|
||||
@ -11,7 +11,6 @@
|
||||
#include "api/video/color_space.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "common_video/test/utilities.h"
|
||||
#include "media/base/vp9_profile.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
|
||||
@ -146,50 +145,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
||||
color_space.chroma_siting_vertical());
|
||||
}
|
||||
|
||||
// We only test the encoder here, since the decoded frame rotation is set based
|
||||
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||
// in the same way as done in the encoder.
|
||||
TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_0);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||
|
||||
input_frame = NextInputFrame();
|
||||
input_frame->set_rotation(kVideoRotation_90);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||
// Video frame without explicit color space information.
|
||||
VideoFrame* input_frame = NextInputFrame();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||
|
||||
// Video frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||
VideoFrame input_frame_w_hdr =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(input_frame_w_hdr, nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
@ -206,15 +162,6 @@ TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
// No HDR metadata present.
|
||||
EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
|
||||
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||
|
||||
@ -1416,20 +1416,14 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
|
||||
encoded_image_.SetTimestamp(input_image_->timestamp());
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
|
||||
encoded_image_._encodedHeight =
|
||||
pkt->data.frame.height[layer_id.spatial_layer_id];
|
||||
encoded_image_._encodedWidth =
|
||||
pkt->data.frame.width[layer_id.spatial_layer_id];
|
||||
encoded_image_.timing_.flags = VideoSendTiming::kInvalid;
|
||||
int qp = -1;
|
||||
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
|
||||
encoded_image_.qp_ = qp;
|
||||
encoded_image_.SetColorSpace(input_image_->color_space());
|
||||
|
||||
if (full_superframe_drop_) {
|
||||
const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
|
||||
@ -1643,20 +1637,16 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_,
|
||||
qp, input_image.ColorSpace());
|
||||
int ret = ReturnFrame(img, input_image.Timestamp(), qp);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int VP9DecoderImpl::ReturnFrame(
|
||||
const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space) {
|
||||
int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int qp) {
|
||||
if (img == nullptr) {
|
||||
// Decoder OK and nullptr image => No show frame.
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
@ -1700,16 +1690,9 @@ int VP9DecoderImpl::ReturnFrame(
|
||||
|
||||
auto builder = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(img_wrapped_buffer)
|
||||
.set_timestamp_ms(0)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_ntp_time_ms(ntp_time_ms)
|
||||
.set_rotation(webrtc::kVideoRotation_0);
|
||||
if (explicit_color_space) {
|
||||
builder.set_color_space(*explicit_color_space);
|
||||
} else {
|
||||
builder.set_color_space(
|
||||
ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
|
||||
}
|
||||
.set_color_space(ExtractVP9ColorSpace(img->cs, img->range,
|
||||
img->bit_depth));
|
||||
|
||||
VideoFrame decoded_image = builder.build();
|
||||
|
||||
|
||||
@ -193,11 +193,7 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
private:
|
||||
int ReturnFrame(const vpx_image_t* img,
|
||||
uint32_t timestamp,
|
||||
int64_t ntp_time_ms,
|
||||
int qp,
|
||||
const webrtc::ColorSpace* explicit_color_space);
|
||||
int ReturnFrame(const vpx_image_t* img, uint32_t timestamp, int qp);
|
||||
|
||||
// Memory pool used to share buffers between libvpx and webrtc.
|
||||
Vp9FrameBufferPool frame_buffer_pool_;
|
||||
|
||||
Reference in New Issue
Block a user