Removed _completeFrame since we never allow incomplete frames.

In the old jitter buffer the two VCMVideoProtection modes |kProtectionNone| and |kProtectionFEC| could be set on the jitter buffer for it to not wait for NACK and instead generate incomplete frames. This has not been possible for a long time.

Bug: webrtc:9378, webrtc:7408
Change-Id: I0a2d3ec34d721126c1128306d5fad88314f8d59f
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/190680
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Reviewed-by: Sami Kalliomäki <sakal@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32513}
This commit is contained in:
philipel
2020-10-28 15:50:15 +01:00
committed by Commit Bot
parent 2e08ca50cd
commit 1b0d5437c9
26 changed files with 15 additions and 109 deletions

View File

@ -165,6 +165,7 @@ class RTC_EXPORT EncodedImage {
VideoFrameType _frameType = VideoFrameType::kVideoFrameDelta;
VideoRotation rotation_ = kVideoRotation_0;
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
// TODO(philipel): Remove when downstream has been updated.
bool _completeFrame = false;
int qp_ = -1; // Quantizer value.

View File

@ -482,7 +482,6 @@ int32_t LibaomAv1Encoder::Encode(
// Get encoded image data.
EncodedImage encoded_image;
encoded_image._completeFrame = true;
aom_codec_iter_t iter = nullptr;
int data_pkt_count = 0;
while (const aom_codec_cx_pkt_t* pkt =

View File

@ -275,7 +275,6 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
codec_.simulcastStream[idx].height);
encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity));
encoded_images_[i]._completeFrame = true;
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
encoded_images_[i].set_size(0);

View File

@ -240,21 +240,14 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
if (key_frame_required_) {
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
key_frame_required_ = false;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
key_frame_required_ = false;
}
// Restrict error propagation using key frame requests.
// Reset on a key frame refresh.
if (input_image._frameType == VideoFrameType::kVideoFrameKey &&
input_image._completeFrame) {
if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
propagation_cnt_ = -1;
// Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
propagation_cnt_ == -1) {
} else if (missing_frames && propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {

View File

@ -488,9 +488,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
downsampling_factors_[number_of_streams - 1].num = 1;
downsampling_factors_[number_of_streams - 1].den = 1;
}
for (int i = 0; i < number_of_streams; ++i) {
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values
if (libvpx_->codec_enc_config_default(vpx_codec_vp8_cx(), &vpx_configs_[0],
0)) {

View File

@ -376,36 +376,6 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
}
#if defined(WEBRTC_ANDROID)
#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame
#else
#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame
#endif
TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
VideoFrame input_frame = NextInputFrame();
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
// Setting complete to false -> should return an error.
encoded_frame._completeFrame = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame, false, -1));
// Setting complete back to true. Forcing a delta frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameDelta;
encoded_frame._completeFrame = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encoded_frame, false, -1));
// Now setting a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
ASSERT_TRUE(decoded_frame);
EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
}
TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
codec_settings_.VP8()->numberOfTemporalLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,

View File

@ -589,7 +589,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
encoded_image_._completeFrame = true;
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
@ -1989,12 +1988,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
if (key_frame_required_) {
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
key_frame_required_ = false;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
key_frame_required_ = false;
}
vpx_codec_iter_t iter = nullptr;
vpx_image_t* img;

View File

@ -43,7 +43,6 @@ void VCMEncodedFrame::Reset() {
_frameType = VideoFrameType::kVideoFrameDelta;
_encodedWidth = 0;
_encodedHeight = 0;
_completeFrame = false;
_missingFrame = false;
set_size(0);
_codecSpecificInfo.codecType = kVideoCodecGeneric;

View File

@ -92,10 +92,6 @@ class RTC_EXPORT VCMEncodedFrame : protected EncodedImage {
*/
EncodedImage::Timing video_timing() const { return timing_; }
EncodedImage::Timing* video_timing_mutable() { return &timing_; }
/**
* True if this frame is complete, false otherwise
*/
bool Complete() const { return _completeFrame; }
/**
* True if there's a frame missing before this frame
*/

View File

@ -70,11 +70,6 @@ void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
gof_info.temporal_up_switch[idx];
}
bool VCMFrameBuffer::IsSessionComplete() const {
TRACE_EVENT0("webrtc", "VCMFrameBuffer::IsSessionComplete");
return _sessionInfo.complete();
}
// Insert packet
VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
int64_t timeInMs,
@ -265,7 +260,6 @@ void VCMFrameBuffer::PrepareForDecode(bool continuous) {
// Transfer frame information to EncodedFrame and create any codec
// specific information.
_frameType = _sessionInfo.FrameType();
_completeFrame = _sessionInfo.complete();
_missingFrame = !continuous;
}

View File

@ -51,7 +51,6 @@ RtpFrameObject::RtpFrameObject(
// TODO(philipel): Remove when encoded image is replaced by EncodedFrame.
// VCMEncodedFrame members
CopyCodecSpecific(&rtp_video_header_);
_completeFrame = true;
_payloadType = payload_type;
SetTimestamp(rtp_timestamp);
ntp_time_ms_ = ntp_time_ms;

View File

@ -41,9 +41,7 @@ enum {
};
enum VCMVideoProtection {
kProtectionNone,
kProtectionNack,
kProtectionFEC,
kProtectionNackFEC,
};

View File

@ -298,8 +298,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
last_decoded_state_.SetState(frame);
DropPacketsFromNackList(last_decoded_state_.sequence_num());
if ((*frame).IsSessionComplete())
UpdateAveragePacketsPerFrame(frame->NumPackets());
UpdateAveragePacketsPerFrame(frame->NumPackets());
return frame;
}

View File

@ -161,18 +161,6 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
frame->SetRenderTime(render_time_ms);
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
"render_time", frame->RenderTimeMs());
if (!frame->Complete()) {
// Update stats for incomplete frames.
bool retransmitted = false;
const int64_t last_packet_time_ms =
jitter_buffer_.LastPacketTime(frame, &retransmitted);
if (last_packet_time_ms >= 0 && !retransmitted) {
// We don't want to include timestamps which have suffered from
// retransmission here, since we compensate with extra retransmission
// delay within the jitter estimate.
timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms);
}
}
return frame;
}

View File

@ -171,7 +171,6 @@ absl::optional<EncodedImage> IvfFileReader::NextFrame() {
if (is_first_frame) {
image._frameType = VideoFrameType::kVideoFrameKey;
}
image._completeFrame = true;
return image;
}

View File

@ -84,7 +84,6 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
} else {
encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
@ -869,7 +868,6 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
encoded_frame[index]._frameType = encoded_image._frameType;
encoded_frame[index]._completeFrame = encoded_image._completeFrame;
return EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0);
}));

View File

@ -208,9 +208,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
clock_->TimeInMilliseconds());
if (first_frame_received_()) {
RTC_LOG(LS_INFO) << "Received first "
<< (frame->Complete() ? "complete" : "incomplete")
<< " decodable video frame";
RTC_LOG(LS_INFO) << "Received first complete decodable video frame";
}
const int32_t ret = Decode(*frame);

View File

@ -54,6 +54,7 @@ public class EncodedImage implements RefCounted {
public final long captureTimeNs;
public final FrameType frameType;
public final int rotation;
// TODO(philipel): Remove when downstream has been updated.
public final boolean completeFrame;
public final @Nullable Integer qp;
@ -71,7 +72,7 @@ public class EncodedImage implements RefCounted {
@CalledByNative
private EncodedImage(ByteBuffer buffer, @Nullable Runnable releaseCallback, int encodedWidth,
int encodedHeight, long captureTimeNs, FrameType frameType, int rotation,
boolean completeFrame, @Nullable Integer qp) {
@Nullable Integer qp) {
this.buffer = buffer;
this.encodedWidth = encodedWidth;
this.encodedHeight = encodedHeight;
@ -79,7 +80,7 @@ public class EncodedImage implements RefCounted {
this.captureTimeNs = captureTimeNs;
this.frameType = frameType;
this.rotation = rotation;
this.completeFrame = completeFrame;
this.completeFrame = true;
this.qp = qp;
this.refCountDelegate = new RefCountDelegate(releaseCallback);
}
@ -114,11 +115,6 @@ public class EncodedImage implements RefCounted {
return rotation;
}
@CalledByNative
private boolean getCompleteFrame() {
return completeFrame;
}
@CalledByNative
private @Nullable Integer getQp() {
return qp;
@ -136,7 +132,6 @@ public class EncodedImage implements RefCounted {
private long captureTimeNs;
private EncodedImage.FrameType frameType;
private int rotation;
private boolean completeFrame;
private @Nullable Integer qp;
private Builder() {}
@ -178,8 +173,8 @@ public class EncodedImage implements RefCounted {
return this;
}
// TODO(philipel): Remove when downstream has been updated.
public Builder setCompleteFrame(boolean completeFrame) {
this.completeFrame = completeFrame;
return this;
}
@ -190,7 +185,7 @@ public class EncodedImage implements RefCounted {
public EncodedImage createEncodedImage() {
return new EncodedImage(buffer, releaseCallback, encodedWidth, encodedHeight, captureTimeNs,
frameType, rotation, completeFrame, qp);
frameType, rotation, qp);
}
}
}

View File

@ -101,7 +101,6 @@ public class HardwareVideoEncoderTest {
.setCaptureTimeNs(frame.captureTimeNs)
.setFrameType(frame.frameType)
.setRotation(frame.rotation)
.setCompleteFrame(frame.completeFrame)
.setQp(frame.qp)
.createEncodedImage());
}

View File

@ -246,10 +246,6 @@ class AndroidVideoDecoder implements VideoDecoder, VideoSink {
Logging.e(TAG, "decode() - key frame required first");
return VideoCodecStatus.NO_OUTPUT;
}
if (!frame.completeFrame) {
Logging.e(TAG, "decode() - complete frame required first");
return VideoCodecStatus.NO_OUTPUT;
}
}
int index;

View File

@ -70,7 +70,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaEncodedImage(
static_cast<int>(image._encodedWidth),
static_cast<int>(image._encodedHeight),
image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec, frame_type,
static_cast<jint>(image.rotation_), image._completeFrame, qp);
static_cast<jint>(image.rotation_), qp);
}
ScopedJavaLocalRef<jobjectArray> NativeToJavaFrameTypeArray(
@ -98,8 +98,6 @@ EncodedImage JavaToNativeEncodedImage(JNIEnv* env,
Java_EncodedImage_getEncodedHeight(env, j_encoded_image);
frame.rotation_ =
(VideoRotation)Java_EncodedImage_getRotation(env, j_encoded_image);
frame._completeFrame =
Java_EncodedImage_getCompleteFrame(env, j_encoded_image);
frame.qp_ = JavaToNativeOptionalInt(
env, Java_EncodedImage_getQp(env, j_encoded_image))

View File

@ -233,7 +233,6 @@ public class HardwareVideoEncoderTest {
assertThat(videoFrame.encodedHeight).isEqualTo(TEST_ENCODER_SETTINGS.height);
assertThat(videoFrame.rotation).isEqualTo(0);
assertThat(videoFrame.captureTimeNs).isEqualTo(42);
assertThat(videoFrame.completeFrame).isTrue();
assertThat(videoFrame.frameType).isEqualTo(FrameType.VideoFrameKey);
CodecTestHelper.assertEqualContents(
outputData, videoFrame.buffer, /* offset= */ 0, videoFrame.buffer.capacity());

View File

@ -92,7 +92,6 @@ class ObjCEncodedImageBuffer : public webrtc::EncodedImageBufferInterface {
self.encodeFinishMs = encodedImage.timing_.encode_finish_ms;
self.frameType = static_cast<RTCFrameType>(encodedImage._frameType);
self.rotation = static_cast<RTCVideoRotation>(encodedImage.rotation_);
self.completeFrame = encodedImage._completeFrame;
self.qp = @(encodedImage.qp_);
self.contentType = (encodedImage.content_type_ == webrtc::VideoContentType::SCREENSHARE) ?
RTCVideoContentTypeScreenshare :
@ -121,7 +120,6 @@ class ObjCEncodedImageBuffer : public webrtc::EncodedImageBufferInterface {
encodedImage.timing_.encode_finish_ms = self.encodeFinishMs;
encodedImage._frameType = webrtc::VideoFrameType(self.frameType);
encodedImage.rotation_ = webrtc::VideoRotation(self.rotation);
encodedImage._completeFrame = self.completeFrame;
encodedImage.qp_ = self.qp ? self.qp.intValue : -1;
encodedImage.content_type_ = (self.contentType == RTCVideoContentTypeScreenshare) ?
webrtc::VideoContentType::SCREENSHARE :

View File

@ -44,6 +44,7 @@ RTC_OBJC_EXPORT
@property(nonatomic, assign) int64_t encodeFinishMs;
@property(nonatomic, assign) RTCFrameType frameType;
@property(nonatomic, assign) RTCVideoRotation rotation;
// TODO(philipel): Remove when downstream has been updated.
@property(nonatomic, assign) BOOL completeFrame;
@property(nonatomic, strong) NSNumber *qp;
@property(nonatomic, assign) RTCVideoContentType contentType;

View File

@ -794,7 +794,6 @@ NSUInteger GetMaxSampleRate(const webrtc::H264::ProfileLevelId &profile_level_id
}];
frame.encodedWidth = width;
frame.encodedHeight = height;
frame.completeFrame = YES;
frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
frame.captureTimeMs = renderTimeMs;
frame.timeStamp = timestamp;

View File

@ -50,7 +50,6 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
auto buffer = EncodedImageBuffer::Create(current_frame_size_);
memset(buffer->data(), 0, current_frame_size_);
encodedImage.SetEncodedData(buffer);
encodedImage._completeFrame = true;
encodedImage._encodedHeight = inputImage.height();
encodedImage._encodedWidth = inputImage.width();
encodedImage._frameType = VideoFrameType::kVideoFrameKey;