Rename end_of_superframe to end_of_picture.
For consistency with the VP9 RTP spec which uses term "picture" for set of frames which belong to the same time instance. Bug: none Change-Id: I30e92d5debb008feb58f770b63fe10c2e0029267 Reviewed-on: https://webrtc-review.googlesource.com/72180 Reviewed-by: Sami Kalliomäki <sakal@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Reviewed-by: Åsa Persson <asapersson@webrtc.org> Commit-Queue: Sergey Silkin <ssilkin@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23040}
This commit is contained in:
committed by
Commit Bot
parent
0cb4a25e43
commit
bc0f0d3ded
@ -577,12 +577,12 @@ bool RtpPacketizerVp9::NextPacket(RtpPacketToSend* packet) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Ensure end_of_superframe is always set on top spatial layer when it is not
|
||||
// Ensure end_of_picture is always set on top spatial layer when it is not
|
||||
// dropped.
|
||||
RTC_DCHECK(hdr_.spatial_idx < hdr_.num_spatial_layers - 1 ||
|
||||
hdr_.end_of_superframe);
|
||||
hdr_.end_of_picture);
|
||||
|
||||
packet->SetMarker(packets_.empty() && hdr_.end_of_superframe);
|
||||
packet->SetMarker(packets_.empty() && hdr_.end_of_picture);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -480,7 +480,7 @@ TEST_F(RtpPacketizerVp9Test, TestSsDataDoesNotFitInAveragePacket) {
|
||||
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
|
||||
}
|
||||
|
||||
TEST_F(RtpPacketizerVp9Test, EndOfSuperframeSetsSetMarker) {
|
||||
TEST_F(RtpPacketizerVp9Test, EndOfPictureSetsSetMarker) {
|
||||
const size_t kFrameSize = 10;
|
||||
const size_t kPacketSize = 8;
|
||||
const size_t kLastPacketReductionLen = 0;
|
||||
@ -497,17 +497,17 @@ TEST_F(RtpPacketizerVp9Test, EndOfSuperframeSetsSetMarker) {
|
||||
// Drop top layer and ensure that marker bit is set on last encoded layer.
|
||||
for (size_t spatial_idx = 0; spatial_idx < vp9_header.num_spatial_layers - 1;
|
||||
++spatial_idx) {
|
||||
const bool end_of_superframe =
|
||||
const bool end_of_picture =
|
||||
spatial_idx + 1 == vp9_header.num_spatial_layers - 1;
|
||||
vp9_header.spatial_idx = spatial_idx;
|
||||
vp9_header.end_of_superframe = end_of_superframe;
|
||||
vp9_header.end_of_picture = end_of_picture;
|
||||
RtpPacketizerVp9 packetizer(vp9_header, kPacketSize,
|
||||
kLastPacketReductionLen);
|
||||
packetizer.SetPayloadData(kFrame, sizeof(kFrame), kNoFragmentation);
|
||||
ASSERT_TRUE(packetizer.NextPacket(&packet));
|
||||
EXPECT_FALSE(packet.Marker());
|
||||
ASSERT_TRUE(packetizer.NextPacket(&packet));
|
||||
EXPECT_EQ(packet.Marker(), end_of_superframe);
|
||||
EXPECT_EQ(packet.Marker(), end_of_picture);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -357,11 +357,11 @@ void VideoProcessor::FrameEncoded(
|
||||
// TODO(ssilkin): Get actual value. For now assume inter-layer prediction
|
||||
// is enabled for all frames.
|
||||
const bool inter_layer_prediction = num_spatial_layers > 1;
|
||||
bool end_of_superframe = false;
|
||||
bool end_of_picture = false;
|
||||
if (codec_type == kVideoCodecVP9) {
|
||||
const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9;
|
||||
frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted;
|
||||
end_of_superframe = vp9_info.end_of_superframe;
|
||||
end_of_picture = vp9_info.end_of_picture;
|
||||
}
|
||||
|
||||
const webrtc::EncodedImage* encoded_image_for_decode = &encoded_image;
|
||||
@ -376,7 +376,7 @@ void VideoProcessor::FrameEncoded(
|
||||
if (config_.decode) {
|
||||
DecodeFrame(*encoded_image_for_decode, spatial_idx);
|
||||
|
||||
if (end_of_superframe && inter_layer_prediction) {
|
||||
if (end_of_picture && inter_layer_prediction) {
|
||||
// If inter-layer prediction is enabled and upper layer was dropped then
|
||||
// base layer should be passed to upper layer decoder. Otherwise decoder
|
||||
// won't be able to decode next superframe.
|
||||
|
||||
@ -173,7 +173,7 @@ struct RTPVideoHeaderVP9 {
|
||||
gof_idx = kNoGofIdx;
|
||||
num_ref_pics = 0;
|
||||
num_spatial_layers = 1;
|
||||
end_of_superframe = true;
|
||||
end_of_picture = true;
|
||||
}
|
||||
|
||||
bool inter_pic_predicted; // This layer frame is dependent on previously
|
||||
@ -213,7 +213,7 @@ struct RTPVideoHeaderVP9 {
|
||||
uint16_t height[kMaxVp9NumberOfSpatialLayers];
|
||||
GofInfoVP9 gof;
|
||||
|
||||
bool end_of_superframe; // This frame is last frame in superframe.
|
||||
bool end_of_picture; // This frame is the last frame in picture.
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -262,7 +262,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, EndOfSuperframe) {
|
||||
TEST_F(TestVp9Impl, EndOfPicture) {
|
||||
const size_t num_spatial_layers = 2;
|
||||
const size_t num_temporal_layers = 1;
|
||||
codec_settings_.VP9()->numberOfSpatialLayers =
|
||||
@ -296,8 +296,8 @@ TEST_F(TestVp9Impl, EndOfSuperframe) {
|
||||
std::vector<EncodedImage> frames;
|
||||
std::vector<CodecSpecificInfo> codec_specific;
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
|
||||
EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
|
||||
EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_superframe);
|
||||
EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_picture);
|
||||
EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_picture);
|
||||
|
||||
// Encode only base layer. Check that end-of-superframe flag is
|
||||
// set on base layer frame.
|
||||
@ -315,7 +315,7 @@ TEST_F(TestVp9Impl, EndOfSuperframe) {
|
||||
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
|
||||
EXPECT_EQ(codec_specific[0].codecSpecific.VP9.spatial_idx, 0);
|
||||
EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
|
||||
EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_picture);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -580,8 +580,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
}
|
||||
timestamp_ += duration;
|
||||
|
||||
const bool end_of_superframe = true;
|
||||
DeliverBufferedFrame(end_of_superframe);
|
||||
const bool end_of_picture = true;
|
||||
DeliverBufferedFrame(end_of_picture);
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
@ -691,8 +691,8 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
// Ensure we don't buffer layers of previous picture (superframe).
|
||||
RTC_DCHECK(first_frame_in_picture || layer_id.spatial_layer_id > 0);
|
||||
|
||||
const bool end_of_superframe = false;
|
||||
DeliverBufferedFrame(end_of_superframe);
|
||||
const bool end_of_picture = false;
|
||||
DeliverBufferedFrame(end_of_picture);
|
||||
|
||||
if (pkt->data.frame.sz > encoded_image_._size) {
|
||||
delete[] encoded_image_._buffer;
|
||||
@ -738,9 +738,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_superframe) {
|
||||
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
|
||||
if (encoded_image_._length > 0) {
|
||||
codec_specific_.codecSpecific.VP9.end_of_superframe = end_of_superframe;
|
||||
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
|
||||
|
||||
// No data partitioning in VP9, so 1 partition only.
|
||||
int part_idx = 0;
|
||||
|
||||
@ -95,7 +95,7 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
|
||||
void* user_data);
|
||||
|
||||
void DeliverBufferedFrame(bool end_of_superframe);
|
||||
void DeliverBufferedFrame(bool end_of_picture);
|
||||
|
||||
// Determine maximum target for Intra frames
|
||||
//
|
||||
|
||||
@ -70,7 +70,7 @@ struct CodecSpecificInfoVP9 {
|
||||
uint8_t num_ref_pics;
|
||||
uint8_t p_diff[kMaxVp9RefPics];
|
||||
|
||||
bool end_of_superframe;
|
||||
bool end_of_picture;
|
||||
};
|
||||
|
||||
struct CodecSpecificInfoGeneric {
|
||||
|
||||
@ -1034,7 +1034,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
||||
static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
|
||||
info.codecSpecific.VP9.num_spatial_layers = 1;
|
||||
info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
info.codecSpecific.VP9.end_of_superframe = true;
|
||||
info.codecSpecific.VP9.end_of_picture = true;
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present = false;
|
||||
if (info.codecSpecific.VP9.ss_data_available) {
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present = true;
|
||||
|
||||
@ -417,7 +417,7 @@ CodecSpecificInfo VideoEncoderWrapper::ParseCodecSpecificInfo(
|
||||
static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
|
||||
info.codecSpecific.VP9.num_spatial_layers = 1;
|
||||
info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
info.codecSpecific.VP9.end_of_superframe = true;
|
||||
info.codecSpecific.VP9.end_of_picture = true;
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present = false;
|
||||
if (info.codecSpecific.VP9.ss_data_available) {
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present = true;
|
||||
|
||||
@ -72,8 +72,8 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) {
|
||||
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
||||
}
|
||||
rtp->codecHeader.VP9.end_of_superframe =
|
||||
info->codecSpecific.VP9.end_of_superframe;
|
||||
rtp->codecHeader.VP9.end_of_picture =
|
||||
info->codecSpecific.VP9.end_of_picture;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264:
|
||||
|
||||
@ -374,7 +374,7 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
codec_info.codecSpecific.VP9.spatial_idx = 0;
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 2;
|
||||
codec_info.codecSpecific.VP9.end_of_superframe = false;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = false;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(
|
||||
@ -391,8 +391,8 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header->codecHeader.VP9.num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header->codecHeader.VP9.end_of_superframe,
|
||||
codec_info.codecSpecific.VP9.end_of_superframe);
|
||||
EXPECT_EQ(header->codecHeader.VP9.end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
@ -403,7 +403,7 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
// Next spatial layer.
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
codec_info.codecSpecific.VP9.spatial_idx += 1;
|
||||
codec_info.codecSpecific.VP9.end_of_superframe = true;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = true;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(
|
||||
@ -420,8 +420,8 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header->codecHeader.VP9.num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header->codecHeader.VP9.end_of_superframe,
|
||||
codec_info.codecSpecific.VP9.end_of_superframe);
|
||||
EXPECT_EQ(header->codecHeader.VP9.end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
Reference in New Issue
Block a user