Rename end_of_superframe to end_of_picture.

For consistency with the VP9 RTP spec which uses term "picture" for set
of frames which belong to the same time instance.

Bug: none
Change-Id: I30e92d5debb008feb58f770b63fe10c2e0029267
Reviewed-on: https://webrtc-review.googlesource.com/72180
Reviewed-by: Sami Kalliomäki <sakal@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Åsa Persson <asapersson@webrtc.org>
Commit-Queue: Sergey Silkin <ssilkin@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23040}
This commit is contained in:
Sergey Silkin
2018-04-24 21:29:14 +02:00
committed by Commit Bot
parent 0cb4a25e43
commit bc0f0d3ded
12 changed files with 34 additions and 34 deletions

View File

@ -357,11 +357,11 @@ void VideoProcessor::FrameEncoded(
// TODO(ssilkin): Get actual value. For now assume inter-layer prediction
// is enabled for all frames.
const bool inter_layer_prediction = num_spatial_layers > 1;
bool end_of_superframe = false;
bool end_of_picture = false;
if (codec_type == kVideoCodecVP9) {
const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9;
frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted;
end_of_superframe = vp9_info.end_of_superframe;
end_of_picture = vp9_info.end_of_picture;
}
const webrtc::EncodedImage* encoded_image_for_decode = &encoded_image;
@ -376,7 +376,7 @@ void VideoProcessor::FrameEncoded(
if (config_.decode) {
DecodeFrame(*encoded_image_for_decode, spatial_idx);
if (end_of_superframe && inter_layer_prediction) {
if (end_of_picture && inter_layer_prediction) {
// If inter-layer prediction is enabled and upper layer was dropped then
// base layer should be passed to upper layer decoder. Otherwise decoder
// won't be able to decode next superframe.

View File

@ -173,7 +173,7 @@ struct RTPVideoHeaderVP9 {
gof_idx = kNoGofIdx;
num_ref_pics = 0;
num_spatial_layers = 1;
end_of_superframe = true;
end_of_picture = true;
}
bool inter_pic_predicted; // This layer frame is dependent on previously
@ -213,7 +213,7 @@ struct RTPVideoHeaderVP9 {
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
bool end_of_superframe; // This frame is last frame in superframe.
bool end_of_picture; // This frame is the last frame in picture.
};
} // namespace webrtc

View File

@ -262,7 +262,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
}
}
TEST_F(TestVp9Impl, EndOfSuperframe) {
TEST_F(TestVp9Impl, EndOfPicture) {
const size_t num_spatial_layers = 2;
const size_t num_temporal_layers = 1;
codec_settings_.VP9()->numberOfSpatialLayers =
@ -296,8 +296,8 @@ TEST_F(TestVp9Impl, EndOfSuperframe) {
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific;
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_superframe);
EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_picture);
EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_picture);
// Encode only base layer. Check that end-of-superframe flag is
// set on base layer frame.
@ -315,7 +315,7 @@ TEST_F(TestVp9Impl, EndOfSuperframe) {
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
EXPECT_EQ(codec_specific[0].codecSpecific.VP9.spatial_idx, 0);
EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_superframe);
EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_picture);
}
} // namespace webrtc

View File

@ -580,8 +580,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
}
timestamp_ += duration;
const bool end_of_superframe = true;
DeliverBufferedFrame(end_of_superframe);
const bool end_of_picture = true;
DeliverBufferedFrame(end_of_picture);
return WEBRTC_VIDEO_CODEC_OK;
}
@ -691,8 +691,8 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
// Ensure we don't buffer layers of previous picture (superframe).
RTC_DCHECK(first_frame_in_picture || layer_id.spatial_layer_id > 0);
const bool end_of_superframe = false;
DeliverBufferedFrame(end_of_superframe);
const bool end_of_picture = false;
DeliverBufferedFrame(end_of_picture);
if (pkt->data.frame.sz > encoded_image_._size) {
delete[] encoded_image_._buffer;
@ -738,9 +738,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
return WEBRTC_VIDEO_CODEC_OK;
}
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_superframe) {
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
if (encoded_image_._length > 0) {
codec_specific_.codecSpecific.VP9.end_of_superframe = end_of_superframe;
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
// No data partitioning in VP9, so 1 partition only.
int part_idx = 0;

View File

@ -95,7 +95,7 @@ class VP9EncoderImpl : public VP9Encoder {
static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data);
void DeliverBufferedFrame(bool end_of_superframe);
void DeliverBufferedFrame(bool end_of_picture);
// Determine maximum target for Intra frames
//