Delete CodecSpecificInfo argument from VideoDecoder::Decode
Bug: webrtc:10379 Change-Id: I079b419604bf4e9c1994fe203d7db131a0ccddb6 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/125920 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Kári Helgason <kthelgason@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27022}
This commit is contained in:
@ -228,7 +228,6 @@ int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
|
||||
|
||||
int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!IsInitialized()) {
|
||||
ReportError();
|
||||
@ -245,11 +244,6 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (codec_specific_info &&
|
||||
codec_specific_info->codecType != kVideoCodecH264) {
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
// FFmpeg requires padding due to some optimized bitstream readers reading 32
|
||||
// or 64 bits at once and could read over the end. See avcodec_decode_video2.
|
||||
|
@ -49,7 +49,6 @@ class H264DecoderImpl : public H264Decoder {
|
||||
// |missing_frames|, |fragmentation| and |render_time_ms| are ignored.
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
const CodecSpecificInfo* codec_specific_info = nullptr,
|
||||
int64_t render_time_ms = -1) override;
|
||||
|
||||
const char* ImplementationName() const override;
|
||||
|
@ -71,8 +71,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -98,8 +97,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -140,8 +138,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
// Add color space to encoded frame.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -35,7 +35,6 @@ class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
int32_t number_of_cores) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
@ -130,7 +130,6 @@ int32_t MultiplexDecoderAdapter::InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t MultiplexDecoderAdapter::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
|
||||
|
||||
@ -154,7 +153,7 @@ int32_t MultiplexDecoderAdapter::Decode(
|
||||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, missing_frames, nullptr,
|
||||
image.image_components[i].encoded_image, missing_frames,
|
||||
render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
|
@ -225,8 +225,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -243,8 +242,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -529,7 +529,7 @@ void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
|
||||
|
||||
frame_stat->decode_start_ns = rtc::TimeNanos();
|
||||
frame_stat->decode_return_code =
|
||||
decoders_->at(spatial_idx)->Decode(encoded_image, false, nullptr, 0);
|
||||
decoders_->at(spatial_idx)->Decode(encoded_image, false, 0);
|
||||
}
|
||||
|
||||
const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
|
||||
|
@ -153,7 +153,6 @@ int LibvpxVp8Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
|
||||
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
@ -34,7 +34,6 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
||||
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
|
||||
|
@ -210,8 +210,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -230,8 +229,7 @@ TEST_F(TestVp8Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -325,8 +323,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
@ -352,16 +349,15 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
// Setting complete to false -> should return an error.
|
||||
encoded_frame._completeFrame = false;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
decoder_->Decode(encoded_frame, false, -1));
|
||||
// Setting complete back to true. Forcing a delta frame.
|
||||
encoded_frame._frameType = kVideoFrameDelta;
|
||||
encoded_frame._completeFrame = true;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
decoder_->Decode(encoded_frame, false, -1));
|
||||
// Now setting a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -128,8 +128,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -198,8 +197,7 @@ TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
|
||||
// Encoded frame without explicit color space information.
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -212,8 +210,7 @@ TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||
// Encoded frame with explicit color space information.
|
||||
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||
encoded_frame.SetColorSpace(color_space);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_frame->color_space());
|
||||
@ -228,8 +225,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -1431,8 +1427,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -1507,7 +1507,6 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
|
||||
int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
@ -184,7 +184,6 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
|
||||
|
Reference in New Issue
Block a user