Don't re-randomize picture_id/tl0_pic_idx when re-initializing internal encoders.

TESTED=video_loopback and AppRTCMobile with forced encoder reinits every 30 frames.
BUG=webrtc:7475

Review-Url: https://codereview.webrtc.org/2833493003
Cr-Commit-Position: refs/heads/master@{#17984}
This commit is contained in:
brandtr
2017-05-03 03:25:53 -07:00
committed by Commit bot
parent 4ed18da990
commit 080830c513
16 changed files with 329 additions and 95 deletions

View File

@ -44,7 +44,8 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr)); encoder_->Encode(*input_frame_, nullptr, nullptr));
EncodedImage encoded_frame; EncodedImage encoded_frame;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame)); CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey; encoded_frame._frameType = kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -60,7 +61,8 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr)); encoder_->Encode(*input_frame_, nullptr, nullptr));
EncodedImage encoded_frame; EncodedImage encoded_frame;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame)); CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey; encoded_frame._frameType = kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,

View File

@ -33,6 +33,11 @@ VideoCodecTest::FakeEncodeCompleteCallback::OnEncodedImage(
const RTPFragmentationHeader* fragmentation) { const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&test_->encoded_frame_section_); rtc::CritScope lock(&test_->encoded_frame_section_);
test_->encoded_frame_.emplace(frame); test_->encoded_frame_.emplace(frame);
RTC_DCHECK(codec_specific_info);
test_->codec_specific_info_.codecType = codec_specific_info->codecType;
// Skip |codec_name|, to avoid allocating.
test_->codec_specific_info_.codecSpecific =
codec_specific_info->codecSpecific;
test_->encoded_frame_event_.Set(); test_->encoded_frame_event_.Set();
return Result(Result::OK); return Result(Result::OK);
} }
@ -65,7 +70,9 @@ void VideoCodecTest::SetUp() {
InitCodecs(); InitCodecs();
} }
bool VideoCodecTest::WaitForEncodedFrame(EncodedImage* frame) { bool VideoCodecTest::WaitForEncodedFrame(
EncodedImage* frame,
CodecSpecificInfo* codec_specific_info) {
bool ret = encoded_frame_event_.Wait(kEncodeTimeoutMs); bool ret = encoded_frame_event_.Wait(kEncodeTimeoutMs);
EXPECT_TRUE(ret) << "Timed out while waiting for an encoded frame."; EXPECT_TRUE(ret) << "Timed out while waiting for an encoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames. // This becomes unsafe if there are multiple threads waiting for frames.
@ -74,6 +81,9 @@ bool VideoCodecTest::WaitForEncodedFrame(EncodedImage* frame) {
if (encoded_frame_) { if (encoded_frame_) {
*frame = std::move(*encoded_frame_); *frame = std::move(*encoded_frame_);
encoded_frame_.reset(); encoded_frame_.reset();
RTC_DCHECK(codec_specific_info);
codec_specific_info->codecType = codec_specific_info_.codecType;
codec_specific_info->codecSpecific = codec_specific_info_.codecSpecific;
return true; return true;
} else { } else {
return false; return false;
@ -98,18 +108,18 @@ bool VideoCodecTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
} }
void VideoCodecTest::InitCodecs() { void VideoCodecTest::InitCodecs() {
VideoCodec codec_inst = codec_settings(); codec_settings_ = codec_settings();
codec_inst.startBitrate = kStartBitrate; codec_settings_.startBitrate = kStartBitrate;
codec_inst.targetBitrate = kTargetBitrate; codec_settings_.targetBitrate = kTargetBitrate;
codec_inst.maxBitrate = kMaxBitrate; codec_settings_.maxBitrate = kMaxBitrate;
codec_inst.maxFramerate = kMaxFramerate; codec_settings_.maxFramerate = kMaxFramerate;
codec_inst.width = kWidth; codec_settings_.width = kWidth;
codec_inst.height = kHeight; codec_settings_.height = kHeight;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_inst, 1 /* number of cores */, encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */)); 0 /* max payload size (unused) */));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->InitDecode(&codec_inst, 1 /* number of cores */)); decoder_->InitDecode(&codec_settings_, 1 /* number of cores */));
} }
} // namespace webrtc } // namespace webrtc

View File

@ -18,6 +18,7 @@
#include "webrtc/base/criticalsection.h" #include "webrtc/base/criticalsection.h"
#include "webrtc/base/event.h" #include "webrtc/base/event.h"
#include "webrtc/base/thread_annotations.h" #include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/test/gtest.h" #include "webrtc/test/gtest.h"
namespace webrtc { namespace webrtc {
@ -71,10 +72,14 @@ class VideoCodecTest : public ::testing::Test {
void SetUp() override; void SetUp() override;
bool WaitForEncodedFrame(EncodedImage* frame); bool WaitForEncodedFrame(EncodedImage* frame,
CodecSpecificInfo* codec_specific_info);
bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame, bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
rtc::Optional<uint8_t>* qp); rtc::Optional<uint8_t>* qp);
// Populated by InitCodecs().
VideoCodec codec_settings_;
std::unique_ptr<VideoFrame> input_frame_; std::unique_ptr<VideoFrame> input_frame_;
std::unique_ptr<VideoEncoder> encoder_; std::unique_ptr<VideoEncoder> encoder_;
@ -89,6 +94,7 @@ class VideoCodecTest : public ::testing::Test {
rtc::Event encoded_frame_event_; rtc::Event encoded_frame_event_;
rtc::CriticalSection encoded_frame_section_; rtc::CriticalSection encoded_frame_section_;
rtc::Optional<EncodedImage> encoded_frame_ GUARDED_BY(encoded_frame_section_); rtc::Optional<EncodedImage> encoded_frame_ GUARDED_BY(encoded_frame_section_);
CodecSpecificInfo codec_specific_info_ GUARDED_BY(encoded_frame_section_);
rtc::Event decoded_frame_event_; rtc::Event decoded_frame_event_;
rtc::CriticalSection decoded_frame_section_; rtc::CriticalSection decoded_frame_section_;

View File

@ -194,6 +194,10 @@ int DefaultTemporalLayers::CurrentLayerId() const {
return temporal_ids_[pattern_idx_ % temporal_ids_.size()]; return temporal_ids_[pattern_idx_ % temporal_ids_.size()];
} }
uint8_t DefaultTemporalLayers::Tl0PicIdx() const {
return tl0_pic_idx_;
}
std::vector<uint32_t> DefaultTemporalLayers::OnRatesUpdated( std::vector<uint32_t> DefaultTemporalLayers::OnRatesUpdated(
int bitrate_kbps, int bitrate_kbps,
int max_bitrate_kbps, int max_bitrate_kbps,

View File

@ -46,6 +46,8 @@ class DefaultTemporalLayers : public TemporalLayers {
int CurrentLayerId() const override; int CurrentLayerId() const override;
uint8_t Tl0PicIdx() const override;
private: private:
const size_t num_layers_; const size_t num_layers_;
const std::vector<unsigned int> temporal_ids_; const std::vector<unsigned int> temporal_ids_;

View File

@ -83,6 +83,10 @@ int ScreenshareLayers::CurrentLayerId() const {
return 0; return 0;
} }
uint8_t ScreenshareLayers::Tl0PicIdx() const {
return tl0_pic_idx_;
}
TemporalReferences ScreenshareLayers::UpdateLayerConfig(uint32_t timestamp) { TemporalReferences ScreenshareLayers::UpdateLayerConfig(uint32_t timestamp) {
if (number_of_temporal_layers_ <= 1) { if (number_of_temporal_layers_ <= 1) {
// No flags needed for 1 layer screenshare. // No flags needed for 1 layer screenshare.

View File

@ -55,6 +55,8 @@ class ScreenshareLayers : public TemporalLayers {
int CurrentLayerId() const override; int CurrentLayerId() const override;
uint8_t Tl0PicIdx() const override;
private: private:
bool TimeToSync(int64_t timestamp) const; bool TimeToSync(int64_t timestamp) const;
uint32_t GetCodecTargetBitrateKbps() const; uint32_t GetCodecTargetBitrateKbps() const;

View File

@ -88,6 +88,10 @@ class TemporalLayers {
virtual void FrameEncoded(unsigned int size, int qp) = 0; virtual void FrameEncoded(unsigned int size, int qp) = 0;
virtual int CurrentLayerId() const = 0; virtual int CurrentLayerId() const = 0;
// Returns the current tl0_pic_idx, so it can be reused in future
// instantiations.
virtual uint8_t Tl0PicIdx() const = 0;
}; };
class TemporalLayersListener; class TemporalLayersListener;

View File

@ -19,6 +19,7 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h" #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h" #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/test/frame_utils.h" #include "webrtc/test/frame_utils.h"
#include "webrtc/test/gtest.h" #include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/fileutils.h" #include "webrtc/test/testsupport/fileutils.h"
@ -26,26 +27,31 @@
namespace webrtc { namespace webrtc {
namespace { namespace {
void Calc16ByteAlignedStride(int width, int* stride_y, int* stride_uv) { void Calc16ByteAlignedStride(int width, int* stride_y, int* stride_uv) {
*stride_y = 16 * ((width + 15) / 16); *stride_y = 16 * ((width + 15) / 16);
*stride_uv = 16 * ((width + 31) / 32); *stride_uv = 16 * ((width + 31) / 32);
} }
} // Anonymous namespace
enum { kMaxWaitEncTimeMs = 100 }; enum { kMaxWaitEncTimeMs = 100 };
enum { kMaxWaitDecTimeMs = 25 }; enum { kMaxWaitDecTimeMs = 25 };
static const uint32_t kTestTimestamp = 123; constexpr uint32_t kTestTimestamp = 123;
static const int64_t kTestNtpTimeMs = 456; constexpr int64_t kTestNtpTimeMs = 456;
constexpr uint32_t kTimestampIncrementPerFrame = 3000;
} // namespace
// TODO(mikhal): Replace these with mocks. // TODO(mikhal): Replace these with mocks.
class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback { class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
public: public:
Vp8UnitTestEncodeCompleteCallback(EncodedImage* frame, Vp8UnitTestEncodeCompleteCallback(EncodedImage* frame,
CodecSpecificInfo* codec_specific_info,
unsigned int decoderSpecificSize, unsigned int decoderSpecificSize,
void* decoderSpecificInfo) void* decoderSpecificInfo)
: encoded_frame_(frame), encode_complete_(false) {} : encoded_frame_(frame),
codec_specific_info_(codec_specific_info),
encode_complete_(false) {}
Result OnEncodedImage(const EncodedImage& encoded_frame_, Result OnEncodedImage(const EncodedImage& encoded_frame_,
const CodecSpecificInfo* codec_specific_info, const CodecSpecificInfo* codec_specific_info,
@ -54,6 +60,7 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
private: private:
EncodedImage* const encoded_frame_; EncodedImage* const encoded_frame_;
CodecSpecificInfo* const codec_specific_info_;
std::unique_ptr<uint8_t[]> frame_buffer_; std::unique_ptr<uint8_t[]> frame_buffer_;
bool encode_complete_; bool encode_complete_;
}; };
@ -77,6 +84,9 @@ Vp8UnitTestEncodeCompleteCallback::OnEncodedImage(
encoded_frame_->_frameType = encoded_frame._frameType; encoded_frame_->_frameType = encoded_frame._frameType;
encoded_frame_->_completeFrame = encoded_frame._completeFrame; encoded_frame_->_completeFrame = encoded_frame._completeFrame;
encoded_frame_->qp_ = encoded_frame.qp_; encoded_frame_->qp_ = encoded_frame.qp_;
codec_specific_info_->codecType = codec_specific_info->codecType;
// Skip |codec_name|, to avoid allocating.
codec_specific_info_->codecSpecific = codec_specific_info->codecSpecific;
encode_complete_ = true; encode_complete_ = true;
return Result(Result::OK, 0); return Result(Result::OK, 0);
} }
@ -135,9 +145,9 @@ class TestVp8Impl : public ::testing::Test {
virtual void SetUp() { virtual void SetUp() {
encoder_.reset(VP8Encoder::Create()); encoder_.reset(VP8Encoder::Create());
decoder_.reset(VP8Decoder::Create()); decoder_.reset(VP8Decoder::Create());
memset(&codec_inst_, 0, sizeof(codec_inst_)); memset(&codec_settings_, 0, sizeof(codec_settings_));
encode_complete_callback_.reset( encode_complete_callback_.reset(new Vp8UnitTestEncodeCompleteCallback(
new Vp8UnitTestEncodeCompleteCallback(&encoded_frame_, 0, NULL)); &encoded_frame_, &codec_specific_info_, 0, nullptr));
decode_complete_callback_.reset( decode_complete_callback_.reset(
new Vp8UnitTestDecodeCompleteCallback(&decoded_frame_, &decoded_qp_)); new Vp8UnitTestDecodeCompleteCallback(&decoded_frame_, &decoded_qp_));
encoder_->RegisterEncodeCompleteCallback(encode_complete_callback_.get()); encoder_->RegisterEncodeCompleteCallback(encode_complete_callback_.get());
@ -145,18 +155,18 @@ class TestVp8Impl : public ::testing::Test {
// Using a QCIF image (aligned stride (u,v planes) > width). // Using a QCIF image (aligned stride (u,v planes) > width).
// Processing only one frame. // Processing only one frame.
source_file_ = fopen(test::ResourcePath("paris_qcif", "yuv").c_str(), "rb"); source_file_ = fopen(test::ResourcePath("paris_qcif", "yuv").c_str(), "rb");
ASSERT_TRUE(source_file_ != NULL); ASSERT_TRUE(source_file_ != nullptr);
rtc::scoped_refptr<VideoFrameBuffer> compact_buffer( rtc::scoped_refptr<VideoFrameBuffer> compact_buffer(
test::ReadI420Buffer(kWidth, kHeight, source_file_)); test::ReadI420Buffer(kWidth, kHeight, source_file_));
ASSERT_TRUE(compact_buffer); ASSERT_TRUE(compact_buffer);
codec_inst_.width = kWidth; codec_settings_.width = kWidth;
codec_inst_.height = kHeight; codec_settings_.height = kHeight;
const int kFramerate = 30; const int kFramerate = 30;
codec_inst_.maxFramerate = kFramerate; codec_settings_.maxFramerate = kFramerate;
// Setting aligned stride values. // Setting aligned stride values.
int stride_uv; int stride_uv;
int stride_y; int stride_y;
Calc16ByteAlignedStride(codec_inst_.width, &stride_y, &stride_uv); Calc16ByteAlignedStride(codec_settings_.width, &stride_y, &stride_uv);
EXPECT_EQ(stride_y, 176); EXPECT_EQ(stride_y, 176);
EXPECT_EQ(stride_uv, 96); EXPECT_EQ(stride_uv, 96);
@ -172,16 +182,16 @@ class TestVp8Impl : public ::testing::Test {
} }
void SetUpEncodeDecode() { void SetUpEncodeDecode() {
codec_inst_.startBitrate = 300; codec_settings_.startBitrate = 300;
codec_inst_.maxBitrate = 4000; codec_settings_.maxBitrate = 4000;
codec_inst_.qpMax = 56; codec_settings_.qpMax = 56;
codec_inst_.VP8()->denoisingOn = true; codec_settings_.VP8()->denoisingOn = true;
codec_inst_.VP8()->tl_factory = &tl_factory_; codec_settings_.VP8()->tl_factory = &tl_factory_;
codec_inst_.VP8()->numberOfTemporalLayers = 1; codec_settings_.VP8()->numberOfTemporalLayers = 1;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_inst_, 1, 1440)); encoder_->InitEncode(&codec_settings_, 1, 1440));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_settings_, 1));
} }
size_t WaitForEncodedFrame() const { size_t WaitForEncodedFrame() const {
@ -205,6 +215,15 @@ class TestVp8Impl : public ::testing::Test {
return 0; return 0;
} }
void ExpectFrameWith(int16_t picture_id,
int tl0_pic_idx,
uint8_t temporal_idx) {
ASSERT_TRUE(WaitForEncodedFrame());
EXPECT_EQ(picture_id, codec_specific_info_.codecSpecific.VP8.pictureId);
EXPECT_EQ(tl0_pic_idx, codec_specific_info_.codecSpecific.VP8.tl0PicIdx);
EXPECT_EQ(temporal_idx, codec_specific_info_.codecSpecific.VP8.temporalIdx);
}
const int kWidth = 172; const int kWidth = 172;
const int kHeight = 144; const int kHeight = 144;
@ -216,25 +235,26 @@ class TestVp8Impl : public ::testing::Test {
std::unique_ptr<VideoEncoder> encoder_; std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<VideoDecoder> decoder_; std::unique_ptr<VideoDecoder> decoder_;
EncodedImage encoded_frame_; EncodedImage encoded_frame_;
CodecSpecificInfo codec_specific_info_;
rtc::Optional<VideoFrame> decoded_frame_; rtc::Optional<VideoFrame> decoded_frame_;
rtc::Optional<uint8_t> decoded_qp_; rtc::Optional<uint8_t> decoded_qp_;
VideoCodec codec_inst_; VideoCodec codec_settings_;
TemporalLayersFactory tl_factory_; TemporalLayersFactory tl_factory_;
}; };
TEST_F(TestVp8Impl, EncoderParameterTest) { TEST_F(TestVp8Impl, EncoderParameterTest) {
strncpy(codec_inst_.plName, "VP8", 31); strncpy(codec_settings_.plName, "VP8", 31);
codec_inst_.plType = 126; codec_settings_.plType = 126;
codec_inst_.maxBitrate = 0; codec_settings_.maxBitrate = 0;
codec_inst_.minBitrate = 0; codec_settings_.minBitrate = 0;
codec_inst_.width = 1440; codec_settings_.width = 1440;
codec_inst_.height = 1080; codec_settings_.height = 1080;
codec_inst_.maxFramerate = 30; codec_settings_.maxFramerate = 30;
codec_inst_.startBitrate = 300; codec_settings_.startBitrate = 300;
codec_inst_.qpMax = 56; codec_settings_.qpMax = 56;
codec_inst_.VP8()->complexity = kComplexityNormal; codec_settings_.VP8()->complexity = kComplexityNormal;
codec_inst_.VP8()->numberOfTemporalLayers = 1; codec_settings_.VP8()->numberOfTemporalLayers = 1;
codec_inst_.VP8()->tl_factory = &tl_factory_; codec_settings_.VP8()->tl_factory = &tl_factory_;
// Calls before InitEncode(). // Calls before InitEncode().
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
int bit_rate = 300; int bit_rate = 300;
@ -242,14 +262,15 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
bitrate_allocation.SetBitrate(0, 0, bit_rate * 1000); bitrate_allocation.SetBitrate(0, 0, bit_rate * 1000);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
encoder_->SetRateAllocation(bitrate_allocation, encoder_->SetRateAllocation(bitrate_allocation,
codec_inst_.maxFramerate)); codec_settings_.maxFramerate));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_inst_, 1, 1440)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1, 1440));
// Decoder parameter tests. // Decoder parameter tests.
// Calls before InitDecode(). // Calls before InitDecode().
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_settings_, 1));
} }
TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) { TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
@ -274,13 +295,13 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
#endif #endif
TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) { TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
SetUpEncodeDecode(); SetUpEncodeDecode();
encoder_->Encode(*input_frame_, NULL, NULL); encoder_->Encode(*input_frame_, nullptr, nullptr);
EXPECT_GT(WaitForEncodedFrame(), 0u); EXPECT_GT(WaitForEncodedFrame(), 0u);
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame_._frameType = kVideoFrameKey; encoded_frame_._frameType = kVideoFrameKey;
encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs; encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame_, false, NULL)); decoder_->Decode(encoded_frame_, false, nullptr));
EXPECT_GT(WaitForDecodedFrame(), 0u); EXPECT_GT(WaitForDecodedFrame(), 0u);
ASSERT_TRUE(decoded_frame_); ASSERT_TRUE(decoded_frame_);
// Compute PSNR on all planes (faster than SSIM). // Compute PSNR on all planes (faster than SSIM).
@ -296,23 +317,99 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
#endif #endif
TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) { TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
SetUpEncodeDecode(); SetUpEncodeDecode();
encoder_->Encode(*input_frame_, NULL, NULL); encoder_->Encode(*input_frame_, nullptr, nullptr);
EXPECT_GT(WaitForEncodedFrame(), 0u); EXPECT_GT(WaitForEncodedFrame(), 0u);
// Setting complete to false -> should return an error. // Setting complete to false -> should return an error.
encoded_frame_._completeFrame = false; encoded_frame_._completeFrame = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame_, false, NULL)); decoder_->Decode(encoded_frame_, false, nullptr));
// Setting complete back to true. Forcing a delta frame. // Setting complete back to true. Forcing a delta frame.
encoded_frame_._frameType = kVideoFrameDelta; encoded_frame_._frameType = kVideoFrameDelta;
encoded_frame_._completeFrame = true; encoded_frame_._completeFrame = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame_, false, NULL)); decoder_->Decode(encoded_frame_, false, nullptr));
// Now setting a key frame. // Now setting a key frame.
encoded_frame_._frameType = kVideoFrameKey; encoded_frame_._frameType = kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame_, false, NULL)); decoder_->Decode(encoded_frame_, false, nullptr));
ASSERT_TRUE(decoded_frame_); ASSERT_TRUE(decoded_frame_);
EXPECT_GT(I420PSNR(input_frame_.get(), &*decoded_frame_), 36); EXPECT_GT(I420PSNR(input_frame_.get(), &*decoded_frame_), 36);
} }
TEST_F(TestVp8Impl, EncoderRetainsRtpStateAfterRelease) {
SetUpEncodeDecode();
// Override default settings.
codec_settings_.VP8()->numberOfTemporalLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1, 1440));
// Temporal layer 0.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ASSERT_TRUE(WaitForEncodedFrame());
EXPECT_EQ(0, codec_specific_info_.codecSpecific.VP8.temporalIdx);
int16_t picture_id = codec_specific_info_.codecSpecific.VP8.pictureId;
int tl0_pic_idx = codec_specific_info_.codecSpecific.VP8.tl0PicIdx;
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 1) % (1 << 15), tl0_pic_idx, 1);
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 2) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 3) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
1);
// Reinit.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1, 1440));
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 4) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 5) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
1);
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 6) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 7) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
1);
}
} // namespace webrtc } // namespace webrtc

View File

@ -21,16 +21,17 @@
#include "libyuv/convert.h" // NOLINT #include "libyuv/convert.h" // NOLINT
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/random.h"
#include "webrtc/base/timeutils.h" #include "webrtc/base/timeutils.h"
#include "webrtc/base/trace_event.h" #include "webrtc/base/trace_event.h"
#include "webrtc/common_types.h" #include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/include/module_common_types.h" #include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h" #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h" #include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h" #include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h" #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/clock.h" #include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/field_trial.h" #include "webrtc/system_wrappers/include/field_trial.h"
#include "webrtc/system_wrappers/include/metrics.h" #include "webrtc/system_wrappers/include/metrics.h"
@ -165,10 +166,12 @@ VP8EncoderImpl::VP8EncoderImpl()
number_of_cores_(0), number_of_cores_(0),
rc_max_intra_target_(0), rc_max_intra_target_(0),
key_frame_request_(kMaxSimulcastStreams, false) { key_frame_request_(kMaxSimulcastStreams, false) {
uint32_t seed = rtc::Time32(); Random random(rtc::TimeMicros());
srand(seed);
picture_id_.reserve(kMaxSimulcastStreams); picture_id_.reserve(kMaxSimulcastStreams);
for (int i = 0; i < kMaxSimulcastStreams; ++i) {
picture_id_.push_back(random.Rand<uint16_t>() & 0x7FFF);
tl0_pic_idx_.push_back(random.Rand<uint8_t>());
}
temporal_layers_.reserve(kMaxSimulcastStreams); temporal_layers_.reserve(kMaxSimulcastStreams);
raw_images_.reserve(kMaxSimulcastStreams); raw_images_.reserve(kMaxSimulcastStreams);
encoded_images_.reserve(kMaxSimulcastStreams); encoded_images_.reserve(kMaxSimulcastStreams);
@ -205,10 +208,10 @@ int VP8EncoderImpl::Release() {
vpx_img_free(&raw_images_.back()); vpx_img_free(&raw_images_.back());
raw_images_.pop_back(); raw_images_.pop_back();
} }
while (!temporal_layers_.empty()) { for (size_t i = 0; i < temporal_layers_.size(); ++i) {
delete temporal_layers_.back(); tl0_pic_idx_[i] = temporal_layers_[i]->Tl0PicIdx();
temporal_layers_.pop_back();
} }
temporal_layers_.clear();
inited_ = false; inited_ = false;
return ret_val; return ret_val;
} }
@ -293,14 +296,15 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
RTC_DCHECK(codec.VP8().tl_factory != nullptr); RTC_DCHECK(codec.VP8().tl_factory != nullptr);
const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory; const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory;
if (num_streams == 1) { if (num_streams == 1) {
temporal_layers_.push_back( temporal_layers_.emplace_back(
tl_factory->Create(0, num_temporal_layers, rand())); tl_factory->Create(0, num_temporal_layers, tl0_pic_idx_[0]));
} else { } else {
for (int i = 0; i < num_streams; ++i) { for (int i = 0; i < num_streams; ++i) {
RTC_CHECK_GT(num_temporal_layers, 0); RTC_CHECK_GT(num_temporal_layers, 0);
int layers = std::max(static_cast<uint8_t>(1), int layers = std::max(static_cast<uint8_t>(1),
codec.simulcastStream[i].numberOfTemporalLayers); codec.simulcastStream[i].numberOfTemporalLayers);
temporal_layers_.push_back(tl_factory->Create(i, layers, rand())); temporal_layers_.emplace_back(
tl_factory->Create(i, layers, tl0_pic_idx_[i]));
} }
} }
} }
@ -357,7 +361,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
codec_.simulcastStream[0].height = codec_.height; codec_.simulcastStream[0].height = codec_.height;
} }
picture_id_.resize(number_of_streams);
encoded_images_.resize(number_of_streams); encoded_images_.resize(number_of_streams);
encoders_.resize(number_of_streams); encoders_.resize(number_of_streams);
configurations_.resize(number_of_streams); configurations_.resize(number_of_streams);
@ -382,8 +385,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
downsampling_factors_[number_of_streams - 1].den = 1; downsampling_factors_[number_of_streams - 1].den = 1;
} }
for (int i = 0; i < number_of_streams; ++i) { for (int i = 0; i < number_of_streams; ++i) {
// Random start, 16 bits is enough.
picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
// allocate memory for encoded image // allocate memory for encoded image
if (encoded_images_[i]._buffer != NULL) { if (encoded_images_[i]._buffer != NULL) {
delete[] encoded_images_[i]._buffer; delete[] encoded_images_[i]._buffer;

View File

@ -100,8 +100,9 @@ class VP8EncoderImpl : public VP8Encoder {
int cpu_speed_default_; int cpu_speed_default_;
int number_of_cores_; int number_of_cores_;
uint32_t rc_max_intra_target_; uint32_t rc_max_intra_target_;
std::vector<TemporalLayers*> temporal_layers_; std::vector<std::unique_ptr<TemporalLayers>> temporal_layers_;
std::vector<uint16_t> picture_id_; std::vector<uint16_t> picture_id_;
std::vector<uint8_t> tl0_pic_idx_;
std::vector<bool> key_frame_request_; std::vector<bool> key_frame_request_;
std::vector<bool> send_stream_; std::vector<bool> send_stream_;
std::vector<int> cpu_speed_; std::vector<int> cpu_speed_;

View File

@ -14,6 +14,10 @@
namespace webrtc { namespace webrtc {
namespace {
constexpr uint32_t kTimestampIncrementPerFrame = 3000;
} // namespace
class TestVp9Impl : public VideoCodecTest { class TestVp9Impl : public VideoCodecTest {
protected: protected:
VideoEncoder* CreateEncoder() override { return VP9Encoder::Create(); } VideoEncoder* CreateEncoder() override { return VP9Encoder::Create(); }
@ -21,11 +25,22 @@ class TestVp9Impl : public VideoCodecTest {
VideoDecoder* CreateDecoder() override { return VP9Decoder::Create(); } VideoDecoder* CreateDecoder() override { return VP9Decoder::Create(); }
VideoCodec codec_settings() override { VideoCodec codec_settings() override {
VideoCodec codec_inst; VideoCodec codec_settings;
codec_inst.codecType = webrtc::kVideoCodecVP9; codec_settings.codecType = webrtc::kVideoCodecVP9;
codec_inst.VP9()->numberOfTemporalLayers = 1; codec_settings.VP9()->numberOfTemporalLayers = 1;
codec_inst.VP9()->numberOfSpatialLayers = 1; codec_settings.VP9()->numberOfSpatialLayers = 1;
return codec_inst; return codec_settings;
}
void ExpectFrameWith(int16_t picture_id,
int tl0_pic_idx,
uint8_t temporal_idx) {
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(picture_id, codec_specific_info.codecSpecific.VP9.picture_id);
EXPECT_EQ(tl0_pic_idx, codec_specific_info.codecSpecific.VP9.tl0_pic_idx);
EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP9.temporal_idx);
} }
}; };
@ -38,7 +53,8 @@ TEST_F(TestVp9Impl, EncodeDecode) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr)); encoder_->Encode(*input_frame_, nullptr, nullptr));
EncodedImage encoded_frame; EncodedImage encoded_frame;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame)); CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey; encoded_frame._frameType = kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -54,7 +70,8 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr)); encoder_->Encode(*input_frame_, nullptr, nullptr));
EncodedImage encoded_frame; EncodedImage encoded_frame;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame)); CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey; encoded_frame._frameType = kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -67,4 +84,85 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
EXPECT_EQ(encoded_frame.qp_, *decoded_qp); EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
} }
TEST_F(TestVp9Impl, EncoderRetainsRtpStateAfterRelease) {
// Override default settings.
codec_settings_.VP9()->numberOfTemporalLayers = 2;
// Tl0PidIdx is only used in non-flexible mode.
codec_settings_.VP9()->flexibleMode = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
// Temporal layer 0.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
int16_t picture_id = codec_specific_info.codecSpecific.VP9.picture_id;
int tl0_pic_idx = codec_specific_info.codecSpecific.VP9.tl0_pic_idx;
EXPECT_EQ(0, codec_specific_info.codecSpecific.VP9.temporal_idx);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 1) % (1 << 15), tl0_pic_idx, 1);
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 2) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 3) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
1);
// Reinit.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 4) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 5) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
1);
// Temporal layer 0.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 6) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
0);
// Temporal layer 1.
input_frame_->set_timestamp(input_frame_->timestamp() +
kTimestampIncrementPerFrame);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame_, nullptr, nullptr));
ExpectFrameWith((picture_id + 7) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
1);
}
} // namespace webrtc } // namespace webrtc

View File

@ -22,9 +22,10 @@
#include "vpx/vp8dx.h" #include "vpx/vp8dx.h"
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/base/keep_ref_until_done.h" #include "webrtc/base/keep_ref_until_done.h"
#include "webrtc/base/logging.h" #include "webrtc/base/logging.h"
#include "webrtc/base/random.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/base/trace_event.h" #include "webrtc/base/trace_event.h"
#include "webrtc/common_video/include/video_frame_buffer.h" #include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
@ -67,14 +68,12 @@ VP9EncoderImpl::VP9EncoderImpl()
encoded_complete_callback_(NULL), encoded_complete_callback_(NULL),
inited_(false), inited_(false),
timestamp_(0), timestamp_(0),
picture_id_(0),
cpu_speed_(3), cpu_speed_(3),
rc_max_intra_target_(0), rc_max_intra_target_(0),
encoder_(NULL), encoder_(NULL),
config_(NULL), config_(NULL),
raw_(NULL), raw_(NULL),
input_image_(NULL), input_image_(NULL),
tl0_pic_idx_(0),
frames_since_kf_(0), frames_since_kf_(0),
num_temporal_layers_(0), num_temporal_layers_(0),
num_spatial_layers_(0), num_spatial_layers_(0),
@ -84,8 +83,10 @@ VP9EncoderImpl::VP9EncoderImpl()
spatial_layer_(new ScreenshareLayersVP9(2)) { spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_)); memset(&codec_, 0, sizeof(codec_));
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
uint32_t seed = rtc::Time32();
srand(seed); Random random(rtc::TimeMicros());
picture_id_ = random.Rand<uint16_t>() & 0x7FFF;
tl0_pic_idx_ = random.Rand<uint8_t>();
} }
VP9EncoderImpl::~VP9EncoderImpl() { VP9EncoderImpl::~VP9EncoderImpl() {
@ -262,10 +263,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
} }
if (encoder_ == NULL) { if (encoder_ == NULL) {
encoder_ = new vpx_codec_ctx_t; encoder_ = new vpx_codec_ctx_t;
// Only randomize pid/tl0 the first time the encoder is initialized
// in order to not make random jumps mid-stream.
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
} }
if (config_ == NULL) { if (config_ == NULL) {
config_ = new vpx_codec_enc_cfg_t; config_ = new vpx_codec_enc_cfg_t;

View File

@ -107,7 +107,6 @@ class VP9EncoderImpl : public VP9Encoder {
VideoCodec codec_; VideoCodec codec_;
bool inited_; bool inited_;
int64_t timestamp_; int64_t timestamp_;
uint16_t picture_id_;
int cpu_speed_; int cpu_speed_;
uint32_t rc_max_intra_target_; uint32_t rc_max_intra_target_;
vpx_codec_ctx_t* encoder_; vpx_codec_ctx_t* encoder_;
@ -117,7 +116,6 @@ class VP9EncoderImpl : public VP9Encoder {
const VideoFrame* input_image_; const VideoFrame* input_image_;
GofInfoVP9 gof_; // Contains each frame's temporal information for GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode. // non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
size_t frames_since_kf_; size_t frames_since_kf_;
uint8_t num_temporal_layers_; uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_; uint8_t num_spatial_layers_;
@ -129,6 +127,10 @@ class VP9EncoderImpl : public VP9Encoder {
uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers]; uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics]; uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
std::unique_ptr<ScreenshareLayersVP9> spatial_layer_; std::unique_ptr<ScreenshareLayersVP9> spatial_layer_;
// RTP state.
uint16_t picture_id_;
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
}; };
class VP9DecoderImpl : public VP9Decoder { class VP9DecoderImpl : public VP9Decoder {

View File

@ -36,6 +36,7 @@ class MockTemporalLayers : public TemporalLayers {
void(bool, CodecSpecificInfoVP8*, uint32_t)); void(bool, CodecSpecificInfoVP8*, uint32_t));
MOCK_METHOD2(FrameEncoded, void(unsigned int, int)); MOCK_METHOD2(FrameEncoded, void(unsigned int, int));
MOCK_CONST_METHOD0(CurrentLayerId, int()); MOCK_CONST_METHOD0(CurrentLayerId, int());
MOCK_CONST_METHOD0(Tl0PicIdx, uint8_t());
}; };
} // namespace } // namespace

View File

@ -25,6 +25,7 @@
#include "webrtc/base/bind.h" #include "webrtc/base/bind.h"
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/logging.h" #include "webrtc/base/logging.h"
#include "webrtc/base/random.h"
#include "webrtc/base/sequenced_task_checker.h" #include "webrtc/base/sequenced_task_checker.h"
#include "webrtc/base/task_queue.h" #include "webrtc/base/task_queue.h"
#include "webrtc/base/thread.h" #include "webrtc/base/thread.h"
@ -222,7 +223,6 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder {
int height_; // Frame height in pixels. int height_; // Frame height in pixels.
bool inited_; bool inited_;
bool use_surface_; bool use_surface_;
uint16_t picture_id_;
enum libyuv::FourCC encoder_fourcc_; // Encoder color space format. enum libyuv::FourCC encoder_fourcc_; // Encoder color space format.
int last_set_bitrate_kbps_; // Last-requested bitrate in kbps. int last_set_bitrate_kbps_; // Last-requested bitrate in kbps.
int last_set_fps_; // Last-requested frame rate. int last_set_fps_; // Last-requested frame rate.
@ -266,6 +266,7 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder {
// |input_frame_infos_|. // |input_frame_infos_|.
webrtc::VideoRotation output_rotation_; // Last output frame rotation from webrtc::VideoRotation output_rotation_; // Last output frame rotation from
// |input_frame_infos_|. // |input_frame_infos_|.
// Frame size in bytes fed to MediaCodec. // Frame size in bytes fed to MediaCodec.
int yuv_size_; int yuv_size_;
// True only when between a callback_->OnEncodedImage() call return a positive // True only when between a callback_->OnEncodedImage() call return a positive
@ -279,7 +280,6 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder {
// VP9 variables to populate codec specific structure. // VP9 variables to populate codec specific structure.
webrtc::GofInfoVP9 gof_; // Contains each frame's temporal information for webrtc::GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible VP9 mode. // non-flexible VP9 mode.
uint8_t tl0_pic_idx_;
size_t gof_idx_; size_t gof_idx_;
// EGL context - owned by factory, should not be allocated/destroyed // EGL context - owned by factory, should not be allocated/destroyed
@ -293,6 +293,10 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder {
int frames_received_since_last_key_; int frames_received_since_last_key_;
webrtc::VideoCodecMode codec_mode_; webrtc::VideoCodecMode codec_mode_;
// RTP state.
uint16_t picture_id_;
uint8_t tl0_pic_idx_;
bool sw_fallback_required_; bool sw_fallback_required_;
// All other member variables should be before WeakPtrFactory. Valid only from // All other member variables should be before WeakPtrFactory. Valid only from
@ -324,7 +328,6 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(JNIEnv* jni,
"()V"))), "()V"))),
inited_(false), inited_(false),
use_surface_(false), use_surface_(false),
picture_id_(0),
egl_context_(egl_context), egl_context_(egl_context),
sw_fallback_required_(false) { sw_fallback_required_(false) {
encoder_queue_checker_.Detach(); encoder_queue_checker_.Detach();
@ -375,7 +378,10 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(JNIEnv* jni,
ALOGW << "MediaCodecVideoEncoder ctor failed."; ALOGW << "MediaCodecVideoEncoder ctor failed.";
ProcessHWError(true /* reset_if_fallback_unavailable */); ProcessHWError(true /* reset_if_fallback_unavailable */);
} }
srand(rtc::Time32());
webrtc::Random random(rtc::TimeMicros());
picture_id_ = random.Rand<uint16_t>() & 0x7FFF;
tl0_pic_idx_ = random.Rand<uint8_t>();
} }
int32_t MediaCodecVideoEncoder::InitEncode( int32_t MediaCodecVideoEncoder::InitEncode(
@ -552,10 +558,7 @@ int32_t MediaCodecVideoEncoder::InitEncodeInternal(int width,
input_frame_infos_.clear(); input_frame_infos_.clear();
drop_next_input_frame_ = false; drop_next_input_frame_ = false;
use_surface_ = use_surface; use_surface_ = use_surface;
// TODO(ilnik): Use rand_r() instead to avoid LINT warnings below.
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
gof_.SetGofInfoVP9(webrtc::TemporalStructureMode::kTemporalStructureMode1); gof_.SetGofInfoVP9(webrtc::TemporalStructureMode::kTemporalStructureMode1);
tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
gof_idx_ = 0; gof_idx_ = 0;
last_frame_received_ms_ = -1; last_frame_received_ms_ = -1;
frames_received_since_last_key_ = kMinKeyFrameInterval; frames_received_since_last_key_ = kMinKeyFrameInterval;