Make VideoFrameType an enum class, and move to separate file and target
Bug: webrtc:5876, webrtc:6883 Change-Id: I1435cfa9e8e54c4ba2978261048ff3fbb993ce0e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/126225 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27239}
This commit is contained in:
@ -127,7 +127,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
@ -227,7 +227,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
@ -566,15 +566,19 @@ TEST_F(TestVp9Impl,
|
||||
const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
|
||||
if (is_first_upper_layer_frame) {
|
||||
if (inter_layer_pred == InterLayerPredMode::kOn) {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
} else {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
}
|
||||
} else if (sl_idx == 0 && frame_num == 0) {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
} else {
|
||||
for (size_t i = 0; i <= sl_idx; ++i) {
|
||||
EXPECT_EQ(encoded_frame[i]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[i]._frameType,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -623,7 +627,7 @@ TEST_F(TestVp9Impl,
|
||||
|
||||
for (size_t i = 0; i <= sl_idx; ++i) {
|
||||
const bool is_keyframe =
|
||||
encoded_frame[0]._frameType == kVideoFrameKey;
|
||||
encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
|
||||
const bool is_first_upper_layer_frame =
|
||||
(i == sl_idx && frame_num == 0);
|
||||
// Interframe references are there, unless it's a keyframe,
|
||||
@ -693,7 +697,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 1u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
|
||||
@ -712,7 +716,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 2u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
@ -772,7 +776,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 1u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
|
||||
true);
|
||||
@ -793,7 +797,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
|
||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 2u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
|
||||
@ -1442,7 +1446,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
||||
@ -727,7 +727,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
|
||||
// We only support one stream at the moment.
|
||||
if (frame_types && !frame_types->empty()) {
|
||||
if ((*frame_types)[0] == kVideoFrameKey) {
|
||||
if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
|
||||
force_key_frame_ = true;
|
||||
}
|
||||
}
|
||||
@ -1324,9 +1324,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
RTC_DCHECK(is_key_frame || !force_key_frame_);
|
||||
|
||||
// Check if encoded frame is a key frame.
|
||||
encoded_image_._frameType = kVideoFrameDelta;
|
||||
encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
if (is_key_frame) {
|
||||
encoded_image_._frameType = kVideoFrameKey;
|
||||
encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
|
||||
force_key_frame_ = false;
|
||||
}
|
||||
RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
|
||||
@ -1539,7 +1539,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kVideoFrameKey)
|
||||
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
|
||||
Reference in New Issue
Block a user