Make VideoFrameType an enum class, and move to separate file and target

Bug: webrtc:5876, webrtc:6883
Change-Id: I1435cfa9e8e54c4ba2978261048ff3fbb993ce0e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/126225
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27239}
This commit is contained in:
Niels Möller
2019-03-21 15:43:58 +01:00
committed by Commit Bot
parent 3198fa4956
commit 8f7ce222e7
85 changed files with 685 additions and 589 deletions

View File

@ -67,17 +67,17 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
switch (type) {
case videoFrameTypeIDR:
return kVideoFrameKey;
return VideoFrameType::kVideoFrameKey;
case videoFrameTypeSkip:
case videoFrameTypeI:
case videoFrameTypeP:
case videoFrameTypeIPMixed:
return kVideoFrameDelta;
return VideoFrameType::kVideoFrameDelta;
case videoFrameTypeInvalid:
break;
}
RTC_NOTREACHED() << "Unexpected/invalid frame type: " << type;
return kEmptyFrame;
return VideoFrameType::kEmptyFrame;
}
} // namespace
@ -409,7 +409,8 @@ int32_t H264EncoderImpl::Encode(
if (!send_key_frame && frame_types) {
for (size_t i = 0; i < frame_types->size() && i < configurations_.size();
++i) {
if ((*frame_types)[i] == kVideoFrameKey && configurations_[i].sending) {
if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
configurations_[i].sending) {
send_key_frame = true;
break;
}
@ -462,7 +463,7 @@ int32_t H264EncoderImpl::Encode(
}
if (frame_types != nullptr) {
// Skip frame?
if ((*frame_types)[i] == kEmptyFrame) {
if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
continue;
}
}

View File

@ -70,7 +70,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, 0));
std::unique_ptr<VideoFrame> decoded_frame;
@ -97,7 +97,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, 0));
std::unique_ptr<VideoFrame> decoded_frame;

View File

@ -88,7 +88,8 @@ int PackFrameHeader(uint8_t* buffer,
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
offset += sizeof(uint8_t);
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.frame_type);
ByteWriter<uint8_t>::WriteBigEndian(
buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
offset += sizeof(uint8_t);
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);

View File

@ -145,9 +145,9 @@ int MultiplexEncoderAdapter::Encode(
std::vector<VideoFrameType> adjusted_frame_types;
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
adjusted_frame_types.push_back(kVideoFrameKey);
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
} else {
adjusted_frame_types.push_back(kVideoFrameDelta);
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
}
const bool has_alpha = input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kI420A;

View File

@ -276,7 +276,7 @@ TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
const MultiplexImageComponent& component = unpacked_frame.image_components[0];
EXPECT_EQ(0, component.component_index);
EXPECT_NE(nullptr, component.encoded_image.data());
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
}
TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
@ -299,7 +299,8 @@ TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
unpacked_frame.image_components[i];
EXPECT_EQ(i, component.component_index);
EXPECT_NE(nullptr, component.encoded_image.data());
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
component.encoded_image._frameType);
}
}
@ -314,7 +315,9 @@ TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
const MultiplexImage& unpacked_frame =
MultiplexEncodedImagePacker::Unpack(encoded_frame);
EXPECT_EQ(i, unpacked_frame.image_index);
EXPECT_EQ(i ? kVideoFrameDelta : kVideoFrameKey, encoded_frame._frameType);
EXPECT_EQ(
i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
encoded_frame._frameType);
}
}

View File

@ -332,11 +332,11 @@ void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
contains_idr = true;
}
}
if (encoded_frame._frameType == kVideoFrameKey) {
if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
} else if (encoded_frame._frameType == kVideoFrameDelta) {
} else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";

View File

@ -222,7 +222,7 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
if (frame_stat.encoding_successful) {
++video_stat.num_encoded_frames;
if (frame_stat.frame_type == kVideoFrameKey) {
if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
key_frame_size_bytes.AddSample(frame_stat.length_bytes);
++video_stat.num_key_frames;
} else {

View File

@ -287,8 +287,9 @@ void VideoProcessor::ProcessFrame() {
// Encode.
const std::vector<VideoFrameType> frame_types =
(frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
: std::vector<VideoFrameType>{kVideoFrameDelta};
(frame_number == 0)
? std::vector<VideoFrameType>{VideoFrameType::kVideoFrameKey}
: std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);

View File

@ -209,7 +209,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
// Always start with a complete key frame.
if (key_frame_required_) {
if (input_image._frameType != kVideoFrameKey)
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
@ -220,7 +220,8 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
}
// Restrict error propagation using key frame requests.
// Reset on a key frame refresh.
if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
if (input_image._frameType == VideoFrameType::kVideoFrameKey &&
input_image._completeFrame) {
propagation_cnt_ = -1;
// Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&

View File

@ -756,7 +756,8 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
if (!send_key_frame && frame_types) {
for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
++i) {
if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
send_stream_[i]) {
send_key_frame = true;
break;
}
@ -925,7 +926,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
encoded_images_[encoder_idx].set_size(0);
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
encoded_images_[encoder_idx]._frameType = VideoFrameType::kVideoFrameDelta;
CodecSpecificInfo codec_specific;
const vpx_codec_cx_pkt_t* pkt = NULL;
while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
@ -947,7 +948,8 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
// check if encoded frame is a key frame
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
encoded_images_[encoder_idx]._frameType =
VideoFrameType::kVideoFrameKey;
}
encoded_images_[encoder_idx].SetSpatialIndex(stream_idx);
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,

View File

@ -209,7 +209,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, -1));
std::unique_ptr<VideoFrame> decoded_frame;
@ -323,7 +323,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, -1));
@ -354,12 +354,12 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame, false, nullptr, -1));
// Setting complete back to true. Forcing a delta frame.
encoded_frame._frameType = kVideoFrameDelta;
encoded_frame._frameType = VideoFrameType::kVideoFrameDelta;
encoded_frame._completeFrame = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame, false, nullptr, -1));
// Now setting a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, -1));
std::unique_ptr<VideoFrame> decoded_frame;
@ -484,7 +484,8 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
.Times(2)
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
auto delta_frame =
std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
encoder.Encode(*NextInputFrame(), &delta_frame);
}

View File

@ -127,7 +127,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, 0));
std::unique_ptr<VideoFrame> decoded_frame;
@ -227,7 +227,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, 0));
std::unique_ptr<VideoFrame> decoded_frame;
@ -566,15 +566,19 @@ TEST_F(TestVp9Impl,
const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
if (is_first_upper_layer_frame) {
if (inter_layer_pred == InterLayerPredMode::kOn) {
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[0]._frameType,
VideoFrameType::kVideoFrameDelta);
} else {
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
EXPECT_EQ(encoded_frame[0]._frameType,
VideoFrameType::kVideoFrameKey);
}
} else if (sl_idx == 0 && frame_num == 0) {
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
EXPECT_EQ(encoded_frame[0]._frameType,
VideoFrameType::kVideoFrameKey);
} else {
for (size_t i = 0; i <= sl_idx; ++i) {
EXPECT_EQ(encoded_frame[i]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[i]._frameType,
VideoFrameType::kVideoFrameDelta);
}
}
}
@ -623,7 +627,7 @@ TEST_F(TestVp9Impl,
for (size_t i = 0; i <= sl_idx; ++i) {
const bool is_keyframe =
encoded_frame[0]._frameType == kVideoFrameKey;
encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
const bool is_first_upper_layer_frame =
(i == sl_idx && frame_num == 0);
// Interframe references are there, unless it's a keyframe,
@ -693,7 +697,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 1u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
@ -712,7 +716,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
@ -772,7 +776,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 1u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
true);
@ -793,7 +797,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
@ -1442,7 +1446,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame.
encoded_frame._frameType = kVideoFrameKey;
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, 0));
std::unique_ptr<VideoFrame> decoded_frame;

View File

@ -727,7 +727,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
// We only support one stream at the moment.
if (frame_types && !frame_types->empty()) {
if ((*frame_types)[0] == kVideoFrameKey) {
if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
force_key_frame_ = true;
}
}
@ -1324,9 +1324,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
RTC_DCHECK(is_key_frame || !force_key_frame_);
// Check if encoded frame is a key frame.
encoded_image_._frameType = kVideoFrameDelta;
encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
if (is_key_frame) {
encoded_image_._frameType = kVideoFrameKey;
encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
force_key_frame_ = false;
}
RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
@ -1539,7 +1539,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
}
// Always start with a complete key frame.
if (key_frame_required_) {
if (input_image._frameType != kVideoFrameKey)
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {