Relanding 4597 - Don't force key frame when decoding with errors.
Makes sure that incomplete key frame or delta frames will be released from the JB when decoding with errors. The decoder in turn will trigger a PLI until a complete key frame is received in order to start a session. TBR=stefan@webrtc.org BUG= Review URL: https://webrtc-codereview.appspot.com/2097004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@4607 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@ -150,8 +150,8 @@ CodecTest::VideoEncodedBufferToEncodedImage(VideoFrame& videoBuffer,
|
||||
image._buffer = videoBuffer.Buffer();
|
||||
image._length = videoBuffer.Length();
|
||||
image._size = videoBuffer.Size();
|
||||
//image._frameType = static_cast<VideoFrameType>
|
||||
// (videoBuffer.GetFrameType());
|
||||
// image._frameType = static_cast<VideoFrameType>
|
||||
// (videoBuffer.GetFrameType());
|
||||
image._timeStamp = videoBuffer.TimeStamp();
|
||||
image._encodedWidth = videoBuffer.Width();
|
||||
image._encodedHeight = videoBuffer.Height();
|
||||
|
@ -32,6 +32,7 @@ _refEncFrame(NULL),
|
||||
_refDecFrame(NULL),
|
||||
_refEncFrameLength(0),
|
||||
_sourceFile(NULL),
|
||||
is_key_frame_(false),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL)
|
||||
{
|
||||
@ -48,6 +49,7 @@ _refEncFrame(NULL),
|
||||
_refDecFrame(NULL),
|
||||
_refEncFrameLength(0),
|
||||
_sourceFile(NULL),
|
||||
is_key_frame_(false),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL)
|
||||
{
|
||||
@ -254,23 +256,27 @@ UnitTest::Setup()
|
||||
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
unsigned int frameLength = 0;
|
||||
int i=0;
|
||||
int i = 0;
|
||||
_inputVideoBuffer.CreateEmptyFrame(_inst.width, _inst.height, _inst.width,
|
||||
(_inst.width + 1) / 2,
|
||||
(_inst.width + 1) / 2);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
EncodedImage encodedImage;
|
||||
if (i > 0)
|
||||
{
|
||||
// Insert yet another frame
|
||||
// Insert yet another frame.
|
||||
ASSERT_TRUE(fread(_refFrame, 1, _lengthSourceFrame,
|
||||
_sourceFile) == _lengthSourceFrame);
|
||||
EXPECT_EQ(0, ConvertToI420(kI420, _refFrame, 0, 0, _width, _height,
|
||||
0, kRotateNone, &_inputVideoBuffer));
|
||||
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
|
||||
ASSERT_TRUE(WaitForEncodedFrame() > 0);
|
||||
} else {
|
||||
// The first frame is always a key frame.
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
}
|
||||
EncodedImage encodedImage;
|
||||
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
ASSERT_TRUE(_decoder->Decode(encodedImage, 0, NULL)
|
||||
== WEBRTC_VIDEO_CODEC_OK);
|
||||
@ -332,6 +338,10 @@ UnitTest::Decode()
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
if (is_key_frame_) {
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
}
|
||||
|
||||
int ret = _decoder->Decode(encodedImage, 0, NULL);
|
||||
unsigned int frameLength = WaitForDecodedFrame();
|
||||
assert(ret == WEBRTC_VIDEO_CODEC_OK && (frameLength == 0 || frameLength
|
||||
@ -526,6 +536,10 @@ UnitTest::Perform()
|
||||
memset(tmpBuf, 0, _refEncFrameLength);
|
||||
_encodedVideoBuffer.CopyFrame(_refEncFrameLength, tmpBuf);
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
if (i == 0) {
|
||||
// First frame is a key frame.
|
||||
is_key_frame_ = true;
|
||||
}
|
||||
ret = _decoder->Decode(encodedImage, false, NULL);
|
||||
EXPECT_TRUE(ret <= 0);
|
||||
if (ret == 0)
|
||||
@ -543,6 +557,8 @@ UnitTest::Perform()
|
||||
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
// first frame is a key frame.
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
@ -686,6 +702,10 @@ UnitTest::Perform()
|
||||
encTimeStamp = _encodedVideoBuffer.TimeStamp();
|
||||
EXPECT_TRUE(_inputVideoBuffer.timestamp() ==
|
||||
static_cast<unsigned>(encTimeStamp));
|
||||
if (frames == 0) {
|
||||
// First frame is always a key frame.
|
||||
is_key_frame_ = true;
|
||||
}
|
||||
|
||||
frameLength = Decode();
|
||||
if (frameLength == 0)
|
||||
|
@ -63,6 +63,7 @@ protected:
|
||||
unsigned char* _refDecFrame;
|
||||
unsigned int _refEncFrameLength;
|
||||
FILE* _sourceFile;
|
||||
bool is_key_frame_;
|
||||
|
||||
UnitTestEncodeCompleteCallback* _encodeCompleteCallback;
|
||||
UnitTestDecodeCompleteCallback* _decodeCompleteCallback;
|
||||
|
@ -106,6 +106,43 @@ class TestVp8Impl : public ::testing::Test {
|
||||
Vp8UnitTestDecodeCompleteCallback(&decoded_video_frame_));
|
||||
encoder_->RegisterEncodeCompleteCallback(encode_complete_callback_.get());
|
||||
decoder_->RegisterDecodeCompleteCallback(decode_complete_callback_.get());
|
||||
// Using a QCIF image (aligned stride (u,v planes) > width).
|
||||
// Processing only one frame.
|
||||
const VideoSource source(test::ResourcePath("paris_qcif", "yuv"), kQCIF);
|
||||
length_source_frame_ = source.GetFrameLength();
|
||||
source_buffer_.reset(new uint8_t[length_source_frame_]);
|
||||
source_file_ = fopen(source.GetFileName().c_str(), "rb");
|
||||
ASSERT_TRUE(source_file_ != NULL);
|
||||
// Set input frame.
|
||||
ASSERT_EQ(fread(source_buffer_.get(), 1, length_source_frame_,
|
||||
source_file_), length_source_frame_);
|
||||
codec_inst_.width = source.GetWidth();
|
||||
codec_inst_.height = source.GetHeight();
|
||||
codec_inst_.maxFramerate = source.GetFrameRate();
|
||||
// Setting aligned stride values.
|
||||
int stride_uv = 0;
|
||||
int stride_y = 0;
|
||||
Calc16ByteAlignedStride(codec_inst_.width, &stride_y, &stride_uv);
|
||||
EXPECT_EQ(stride_y, 176);
|
||||
EXPECT_EQ(stride_uv, 96);
|
||||
|
||||
input_frame_.CreateEmptyFrame(codec_inst_.width, codec_inst_.height,
|
||||
stride_y, stride_uv, stride_uv);
|
||||
// Using ConvertToI420 to add stride to the image.
|
||||
EXPECT_EQ(0, ConvertToI420(kI420, source_buffer_.get(), 0, 0,
|
||||
codec_inst_.width, codec_inst_.height,
|
||||
0, kRotateNone, &input_frame_));
|
||||
}
|
||||
|
||||
void SetUpEncodeDecode() {
|
||||
codec_inst_.startBitrate = 300;
|
||||
codec_inst_.maxBitrate = 4000;
|
||||
codec_inst_.qpMax = 56;
|
||||
codec_inst_.codecSpecific.VP8.denoisingOn = true;
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_inst_, 1, 1440));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
|
||||
}
|
||||
|
||||
int WaitForEncodedFrame() const {
|
||||
@ -143,6 +180,7 @@ class TestVp8Impl : public ::testing::Test {
|
||||
scoped_ptr<Vp8UnitTestDecodeCompleteCallback> decode_complete_callback_;
|
||||
scoped_array<uint8_t> source_buffer_;
|
||||
FILE* source_file_;
|
||||
I420VideoFrame input_frame_;
|
||||
scoped_ptr<VideoEncoder> encoder_;
|
||||
scoped_ptr<VideoDecoder> decoder_;
|
||||
VideoFrame encoded_video_frame_;
|
||||
@ -190,49 +228,39 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
|
||||
// Using a QCIF image (aligned stride (u,v planse) > width).
|
||||
// Processing only one frame.
|
||||
const VideoSource source(test::ResourcePath("paris_qcif", "yuv"), kQCIF);
|
||||
length_source_frame_ = source.GetFrameLength();
|
||||
source_buffer_.reset(new uint8_t[length_source_frame_]);
|
||||
source_file_ = fopen(source.GetFileName().c_str(), "rb");
|
||||
ASSERT_TRUE(source_file_ != NULL);
|
||||
codec_inst_.maxFramerate = source.GetFrameRate();
|
||||
codec_inst_.startBitrate = 300;
|
||||
codec_inst_.maxBitrate = 4000;
|
||||
codec_inst_.qpMax = 56;
|
||||
codec_inst_.width = source.GetWidth();
|
||||
codec_inst_.height = source.GetHeight();
|
||||
codec_inst_.codecSpecific.VP8.denoisingOn = true;
|
||||
|
||||
// Get input frame.
|
||||
ASSERT_EQ(fread(source_buffer_.get(), 1, length_source_frame_, source_file_),
|
||||
length_source_frame_);
|
||||
// Setting aligned stride values.
|
||||
int stride_uv = 0;
|
||||
int stride_y = 0;
|
||||
Calc16ByteAlignedStride(codec_inst_.width, &stride_y, &stride_uv);
|
||||
EXPECT_EQ(stride_y, 176);
|
||||
EXPECT_EQ(stride_uv, 96);
|
||||
|
||||
I420VideoFrame input_frame;
|
||||
input_frame.CreateEmptyFrame(codec_inst_.width, codec_inst_.height,
|
||||
stride_y, stride_uv, stride_uv);
|
||||
// Using ConvertToI420 to add stride to the image.
|
||||
EXPECT_EQ(0, ConvertToI420(kI420, source_buffer_.get(), 0, 0,
|
||||
codec_inst_.width, codec_inst_.height,
|
||||
0, kRotateNone, &input_frame));
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_inst_, 1, 1440));
|
||||
encoder_->Encode(input_frame, NULL, NULL);
|
||||
SetUpEncodeDecode();
|
||||
encoder_->Encode(input_frame_, NULL, NULL);
|
||||
EXPECT_GT(WaitForEncodedFrame(), 0);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
|
||||
EncodedImage encodedImage;
|
||||
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
|
||||
// First frame should be a key frame.
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encodedImage, false, NULL));
|
||||
EXPECT_GT(WaitForDecodedFrame(), 0);
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
EXPECT_GT(I420PSNR(&input_frame, &decoded_video_frame_), 36);
|
||||
EXPECT_GT(I420PSNR(&input_frame_, &decoded_video_frame_), 36);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, DecodeWithACompleteKeyFrame) {
|
||||
SetUpEncodeDecode();
|
||||
encoder_->Encode(input_frame_, NULL, NULL);
|
||||
EXPECT_GT(WaitForEncodedFrame(), 0);
|
||||
EncodedImage encodedImage;
|
||||
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
|
||||
// Setting complete to false -> should return an error.
|
||||
encodedImage._completeFrame = false;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encodedImage, false, NULL));
|
||||
// Setting complete back to true. Forcing a delta frame.
|
||||
encodedImage._frameType = kDeltaFrame;
|
||||
encodedImage._completeFrame = true;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encodedImage, false, NULL));
|
||||
// Now setting a key frame.
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encodedImage, false, NULL));
|
||||
EXPECT_GT(I420PSNR(&input_frame_, &decoded_video_frame_), 36);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -502,8 +502,8 @@ VP8DecoderImpl::VP8DecoderImpl()
|
||||
image_format_(VPX_IMG_FMT_NONE),
|
||||
ref_frame_(NULL),
|
||||
propagation_cnt_(-1),
|
||||
latest_keyframe_complete_(false),
|
||||
mfqe_enabled_(false) {
|
||||
mfqe_enabled_(false),
|
||||
key_frame_required_(true) {
|
||||
memset(&codec_, 0, sizeof(codec_));
|
||||
}
|
||||
|
||||
@ -518,7 +518,6 @@ int VP8DecoderImpl::Reset() {
|
||||
}
|
||||
InitDecode(&codec_, 1);
|
||||
propagation_cnt_ = -1;
|
||||
latest_keyframe_complete_ = false;
|
||||
mfqe_enabled_ = false;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
@ -571,9 +570,12 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
}
|
||||
|
||||
propagation_cnt_ = -1;
|
||||
latest_keyframe_complete_ = false;
|
||||
|
||||
inited_ = true;
|
||||
|
||||
// Always start with a complete key frame.
|
||||
key_frame_required_ = true;
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
@ -615,6 +617,18 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kKeyFrame)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
key_frame_required_ = false;
|
||||
} else {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
// Restrict error propagation using key frame requests. Disabled when
|
||||
// the feedback mode is enabled (RPS).
|
||||
// Reset on a key frame refresh.
|
||||
@ -708,9 +722,7 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
// Whenever we receive an incomplete key frame all reference buffers will
|
||||
// be corrupt. If that happens we must request new key frames until we
|
||||
// decode a complete.
|
||||
if (input_image._frameType == kKeyFrame)
|
||||
latest_keyframe_complete_ = input_image._completeFrame;
|
||||
if (!latest_keyframe_complete_)
|
||||
if (input_image._frameType == kKeyFrame && !input_image._completeFrame)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
|
||||
// Check for reference updates and last reference buffer corruption and
|
||||
|
@ -226,8 +226,8 @@ class VP8DecoderImpl : public VP8Decoder {
|
||||
int image_format_;
|
||||
vpx_ref_frame_t* ref_frame_;
|
||||
int propagation_cnt_;
|
||||
bool latest_keyframe_complete_;
|
||||
bool mfqe_enabled_;
|
||||
bool key_frame_required_;
|
||||
}; // end of VP8Decoder class
|
||||
} // namespace webrtc
|
||||
|
||||
|
@ -597,8 +597,7 @@ VCMGenericDecoder* VCMCodecDataBase::CreateAndInitDecoder(
|
||||
}
|
||||
|
||||
if (ptr_decoder->InitDecode(decoder_item->settings.get(),
|
||||
decoder_item->number_of_cores,
|
||||
decoder_item->require_key_frame) < 0) {
|
||||
decoder_item->number_of_cores) < 0) {
|
||||
ReleaseDecoder(ptr_decoder);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -133,9 +133,7 @@ _frameInfos(),
|
||||
_nextFrameInfoIdx(0),
|
||||
_decoder(decoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_isExternal(isExternal),
|
||||
_requireKeyFrame(false),
|
||||
_keyFrameDecoded(false)
|
||||
_isExternal(isExternal)
|
||||
{
|
||||
}
|
||||
|
||||
@ -144,11 +142,8 @@ VCMGenericDecoder::~VCMGenericDecoder()
|
||||
}
|
||||
|
||||
int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
|
||||
int32_t numberOfCores,
|
||||
bool requireKeyFrame)
|
||||
int32_t numberOfCores)
|
||||
{
|
||||
_requireKeyFrame = requireKeyFrame;
|
||||
_keyFrameDecoded = false;
|
||||
_codecType = settings->codecType;
|
||||
|
||||
return _decoder.InitDecode(settings, numberOfCores);
|
||||
@ -157,15 +152,6 @@ int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
|
||||
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
|
||||
int64_t nowMs)
|
||||
{
|
||||
if (_requireKeyFrame &&
|
||||
!_keyFrameDecoded &&
|
||||
frame.FrameType() != kVideoFrameKey &&
|
||||
frame.FrameType() != kVideoFrameGolden)
|
||||
{
|
||||
// Require key frame is enabled, meaning that one key frame must be decoded
|
||||
// before we can decode delta frames.
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||
@ -194,22 +180,17 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
|
||||
// No output
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
}
|
||||
// Update the key frame decoded variable so that we know whether or not we've decoded a key frame since reset.
|
||||
_keyFrameDecoded = (_keyFrameDecoded ||
|
||||
frame.FrameType() == kVideoFrameKey);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t
|
||||
VCMGenericDecoder::Release()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Release();
|
||||
}
|
||||
|
||||
int32_t VCMGenericDecoder::Reset()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Reset();
|
||||
}
|
||||
|
||||
|
@ -70,8 +70,7 @@ public:
|
||||
* Initialize the decoder with the information from the VideoCodec
|
||||
*/
|
||||
int32_t InitDecode(const VideoCodec* settings,
|
||||
int32_t numberOfCores,
|
||||
bool requireKeyFrame);
|
||||
int32_t numberOfCores);
|
||||
|
||||
/**
|
||||
* Decode to a raw I420 frame,
|
||||
@ -115,7 +114,6 @@ protected:
|
||||
VideoDecoder& _decoder;
|
||||
VideoCodecType _codecType;
|
||||
bool _isExternal;
|
||||
bool _requireKeyFrame;
|
||||
bool _keyFrameDecoded;
|
||||
|
||||
};
|
||||
|
@ -508,19 +508,13 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
|
||||
return false;
|
||||
}
|
||||
VCMFrameBuffer* oldest_frame = decodable_frames_.Front();
|
||||
|
||||
// If we have exactly one frame in the buffer, release it only if it is
|
||||
// complete. We know decodable_frames_ is not empty due to the prevoius
|
||||
// complete. We know decodable_frames_ is not empty due to the previous
|
||||
// check.
|
||||
if (decodable_frames_.size() == 1 && incomplete_frames_.empty()
|
||||
&& oldest_frame->GetState() != kStateComplete) {
|
||||
return false;
|
||||
}
|
||||
// Always start with a complete key frame.
|
||||
if (last_decoded_state_.in_initial_state() &&
|
||||
oldest_frame->FrameType() != kVideoFrameKey) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*timestamp = oldest_frame->TimeStamp();
|
||||
return true;
|
||||
|
@ -260,12 +260,19 @@ int32_t TbI420Decoder::Decode(
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
// Only send complete frames.
|
||||
if (static_cast<int>(inputImage._length) !=
|
||||
webrtc::CalcBufferSize(webrtc::kI420,_width,_height)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
int ret = ConvertToI420(webrtc::kI420, inputImage._buffer, 0, 0,
|
||||
_width, _height,
|
||||
0, webrtc::kRotateNone, &_decodedImage);
|
||||
|
||||
if (ret < 0)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
|
||||
_decodedImage.set_timestamp(inputImage._timeStamp);
|
||||
|
||||
_decodeCompleteCallback->Decoded(_decodedImage);
|
||||
|
Reference in New Issue
Block a user