Delete VP8 feedback mode.

It depends on RTCP RPSI and SLI messages, which are being deleted.

TBR=stefan@webrtc.org  # TODO comments added to common_types.h
BUG=webrtc:7338

Review-Url: https://codereview.webrtc.org/2753783002
Cr-Commit-Position: refs/heads/master@{#17314}
This commit is contained in:
nisse
2017-03-21 01:54:13 -07:00
committed by Commit bot
parent abbacbf489
commit 3257b16156
13 changed files with 15 additions and 608 deletions

View File

@ -461,7 +461,9 @@ enum VP8ResilienceMode {
class TemporalLayersFactory; class TemporalLayersFactory;
// VP8 specific // VP8 specific
struct VideoCodecVP8 { struct VideoCodecVP8 {
// TODO(nisse): Unused, delete?
bool pictureLossIndicationOn; bool pictureLossIndicationOn;
// TODO(nisse): Delete, as soon as downstream applications are updated.
bool feedbackModeOn; bool feedbackModeOn;
VideoCodecComplexity complexity; VideoCodecComplexity complexity;
VP8ResilienceMode resilience; VP8ResilienceMode resilience;

View File

@ -202,8 +202,6 @@ rtc_static_library("webrtc_vp8") {
"codecs/vp8/default_temporal_layers.h", "codecs/vp8/default_temporal_layers.h",
"codecs/vp8/include/vp8.h", "codecs/vp8/include/vp8.h",
"codecs/vp8/include/vp8_common_types.h", "codecs/vp8/include/vp8_common_types.h",
"codecs/vp8/reference_picture_selection.cc",
"codecs/vp8/reference_picture_selection.h",
"codecs/vp8/screenshare_layers.cc", "codecs/vp8/screenshare_layers.cc",
"codecs/vp8/screenshare_layers.h", "codecs/vp8/screenshare_layers.h",
"codecs/vp8/simulcast_encoder_adapter.cc", "codecs/vp8/simulcast_encoder_adapter.cc",
@ -483,7 +481,6 @@ if (rtc_include_tests) {
"codecs/test/stats_unittest.cc", "codecs/test/stats_unittest.cc",
"codecs/test/videoprocessor_unittest.cc", "codecs/test/videoprocessor_unittest.cc",
"codecs/vp8/default_temporal_layers_unittest.cc", "codecs/vp8/default_temporal_layers_unittest.cc",
"codecs/vp8/reference_picture_selection_unittest.cc",
"codecs/vp8/screenshare_layers_unittest.cc", "codecs/vp8/screenshare_layers_unittest.cc",
"codecs/vp8/simulcast_encoder_adapter_unittest.cc", "codecs/vp8/simulcast_encoder_adapter_unittest.cc",
"codecs/vp8/simulcast_unittest.cc", "codecs/vp8/simulcast_unittest.cc",

View File

@ -1,132 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/typedefs.h"
namespace webrtc {
ReferencePictureSelection::ReferencePictureSelection()
: kRttConfidence(1.33),
update_golden_next_(true),
established_golden_(false),
received_ack_(false),
last_sent_ref_picture_id_(0),
last_sent_ref_update_time_(0),
established_ref_picture_id_(0),
last_refresh_time_(0),
rtt_(0) {}
void ReferencePictureSelection::Init() {
update_golden_next_ = true;
established_golden_ = false;
received_ack_ = false;
last_sent_ref_picture_id_ = 0;
last_sent_ref_update_time_ = 0;
established_ref_picture_id_ = 0;
last_refresh_time_ = 0;
rtt_ = 0;
}
void ReferencePictureSelection::ReceivedRPSI(int rpsi_picture_id) {
// Assume RPSI is signaled with 14 bits.
if ((rpsi_picture_id & 0x3fff) == (last_sent_ref_picture_id_ & 0x3fff)) {
// Remote peer has received our last reference frame, switch frame type.
received_ack_ = true;
established_golden_ = update_golden_next_;
update_golden_next_ = !update_golden_next_;
established_ref_picture_id_ = last_sent_ref_picture_id_;
}
}
bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
bool send_refresh = false;
// Don't send a refresh more than once per round-trip time.
// This is to avoid too frequent refreshes, since the receiver
// will signal an SLI for every corrupt frame.
if (TimestampDiff(now_ts, last_refresh_time_) > rtt_) {
send_refresh = true;
last_refresh_time_ = now_ts;
}
return send_refresh;
}
int ReferencePictureSelection::EncodeFlags(int picture_id,
bool send_refresh,
uint32_t now_ts) {
int flags = 0;
// We can't refresh the decoder until we have established the key frame.
if (send_refresh && received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame
}
// Make sure we don't update the reference frames too often. We must wait long
// enough for an RPSI to arrive after the decoder decoded the reference frame.
// Ideally that should happen after one round-trip time.
// Add a margin defined by |kRttConfidence|.
int64_t update_interval = static_cast<int64_t>(kRttConfidence * rtt_);
const int64_t kMinUpdateInterval = 90 * 10; // Timestamp frequency
if (update_interval < kMinUpdateInterval)
update_interval = kMinUpdateInterval;
// Don't send reference frame updates until we have an established reference.
if (TimestampDiff(now_ts, last_sent_ref_update_time_) > update_interval &&
received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame.
if (update_golden_next_) {
flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update alt-ref.
flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
} else {
flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
}
last_sent_ref_picture_id_ = picture_id;
last_sent_ref_update_time_ = now_ts;
} else {
// No update of golden or alt-ref. We can therefore freely reference the
// established reference frame and the last frame.
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
}
return flags;
}
void ReferencePictureSelection::EncodedKeyFrame(int picture_id) {
last_sent_ref_picture_id_ = picture_id;
received_ack_ = false;
}
void ReferencePictureSelection::SetRtt(int64_t rtt) {
// Convert from milliseconds to timestamp frequency.
rtt_ = 90 * rtt;
}
int64_t ReferencePictureSelection::TimestampDiff(uint32_t new_ts,
uint32_t old_ts) {
if (old_ts > new_ts) {
// Assuming this is a wrap, doing a compensated subtraction.
return (new_ts + (static_cast<int64_t>(1) << 32)) - old_ts;
}
return new_ts - old_ts;
}
} // namespace webrtc

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This file defines classes for doing reference picture selection, primarily
* with VP8.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_REFERENCE_PICTURE_SELECTION_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_REFERENCE_PICTURE_SELECTION_H_
#include "webrtc/typedefs.h"
namespace webrtc {
class ReferencePictureSelection {
public:
ReferencePictureSelection();
void Init();
// Report a received reference picture selection indication. This will
// introduce a new established reference if the received RPSI isn't too late.
void ReceivedRPSI(int rpsi_picture_id);
// Report a received slice loss indication. Returns true if a refresh frame
// must be sent to the receiver, which is accomplished by only predicting
// from the established reference.
// |now_ts| is the RTP timestamp corresponding to the current time. Typically
// the capture timestamp of the frame currently being processed.
// Returns true if it's time to encode a decoder refresh, otherwise false.
bool ReceivedSLI(uint32_t now_ts);
// Returns the recommended VP8 encode flags needed. May refresh the decoder
// and/or update the reference buffers.
// |picture_id| picture id of the frame to be encoded.
// |send_refresh| should be set to true if a decoder refresh should be
// encoded, otherwise false.
// |now_ts| is the RTP timestamp corresponding to the current time. Typically
// the capture timestamp of the frame currently being processed.
// Returns the flags to be given to the libvpx encoder when encoding the next
// frame.
int EncodeFlags(int picture_id, bool send_refresh, uint32_t now_ts);
// Notify the RPS that the frame with picture id |picture_id| was encoded as
// a key frame, effectively updating all reference buffers.
void EncodedKeyFrame(int picture_id);
// Set the round-trip time between the sender and the receiver to |rtt|
// milliseconds.
void SetRtt(int64_t rtt);
private:
static int64_t TimestampDiff(uint32_t new_ts, uint32_t old_ts);
const double kRttConfidence;
bool update_golden_next_;
bool established_golden_;
bool received_ack_;
int last_sent_ref_picture_id_;
uint32_t last_sent_ref_update_time_;
int established_ref_picture_id_;
uint32_t last_refresh_time_;
int64_t rtt_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_REFERENCE_PICTURE_SELECTION_H_

View File

@ -1,100 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
#include "webrtc/test/gtest.h"
using webrtc::ReferencePictureSelection;
// The minimum time between reference frame updates. Should match the values
// set in reference_picture_selection.h
static const uint32_t kMinUpdateInterval = 10;
// The minimum time between decoder refreshes through restricted prediction.
// Should match the values set in reference_picture_selection.h
static const int kRtt = 10;
static const int kNoPropagationGolden =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
static const int kNoPropagationAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_REF_LAST;
static const int kRefreshFromGolden =
VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
static const int kRefreshFromAltRef =
VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
class TestRPS : public ::testing::Test {
protected:
virtual void SetUp() {
rps_.Init();
// Initialize with sending a key frame and acknowledging it.
rps_.EncodedKeyFrame(0);
rps_.ReceivedRPSI(0);
rps_.SetRtt(kRtt);
}
ReferencePictureSelection rps_;
};
TEST_F(TestRPS, TestPropagateReferenceFrames) {
// Should propagate the alt-ref reference.
uint32_t time = (4 * kMinUpdateInterval) / 3 + 1;
EXPECT_EQ(rps_.EncodeFlags(1, false, 90 * time), kPropagateAltRef);
rps_.ReceivedRPSI(1);
time += (4 * (time + kMinUpdateInterval)) / 3 + 1;
// Should propagate the golden reference.
EXPECT_EQ(rps_.EncodeFlags(2, false, 90 * time), kPropagateGolden);
rps_.ReceivedRPSI(2);
// Should propagate the alt-ref reference.
time = (4 * (time + kMinUpdateInterval)) / 3 + 1;
EXPECT_EQ(rps_.EncodeFlags(3, false, 90 * time), kPropagateAltRef);
rps_.ReceivedRPSI(3);
// Shouldn't propagate any reference frames (except last), and the established
// reference is alt-ref.
time = time + kMinUpdateInterval;
EXPECT_EQ(rps_.EncodeFlags(4, false, 90 * time), kNoPropagationAltRef);
}
TEST_F(TestRPS, TestDecoderRefresh) {
uint32_t time = kRtt + 1;
// No more than one refresh per RTT.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
time += 5;
EXPECT_EQ(rps_.ReceivedSLI(90 * time), false);
time += kRtt - 4;
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
// Enough time have elapsed since the previous reference propagation, we will
// therefore get both a refresh from golden and a propagation of alt-ref.
EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
kRefreshFromGolden | kPropagateAltRef);
rps_.ReceivedRPSI(5);
time += kRtt + 1;
// Enough time for a new refresh, but not enough time for a reference
// propagation.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
kRefreshFromAltRef | kNoPropagationAltRef);
}
TEST_F(TestRPS, TestWrap) {
EXPECT_EQ(rps_.ReceivedSLI(0xffffffff), true);
EXPECT_EQ(rps_.ReceivedSLI(1), false);
EXPECT_EQ(rps_.ReceivedSLI(90 * 100), true);
EXPECT_EQ(rps_.EncodeFlags(7, false, 0xffffffff), kPropagateAltRef);
EXPECT_EQ(rps_.EncodeFlags(8, false, 1), kNoPropagationGolden);
EXPECT_EQ(rps_.EncodeFlags(10, false, 90 * 100), kPropagateAltRef);
}

View File

@ -75,9 +75,6 @@ int VerifyCodec(const webrtc::VideoCodec* inst) {
if (inst->width <= 1 || inst->height <= 1) { if (inst->width <= 1 || inst->height <= 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} }
if (inst->VP8().feedbackModeOn && inst->numberOfSimulcastStreams > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) { if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} }

View File

@ -80,10 +80,6 @@ TEST_F(TestSimulcastEncoderAdapter, TestSwitchingToOneOddStream) {
TestVp8Simulcast::TestSwitchingToOneOddStream(); TestVp8Simulcast::TestSwitchingToOneOddStream();
} }
TEST_F(TestSimulcastEncoderAdapter, TestRPSIEncodeDecode) {
TestVp8Simulcast::TestRPSIEncodeDecode();
}
TEST_F(TestSimulcastEncoderAdapter, TestStrideEncodeDecode) { TEST_F(TestSimulcastEncoderAdapter, TestStrideEncodeDecode) {
TestVp8Simulcast::TestStrideEncodeDecode(); TestVp8Simulcast::TestStrideEncodeDecode();
} }
@ -96,10 +92,6 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
TestVp8Simulcast::TestSpatioTemporalLayers321PatternEncoder(); TestVp8Simulcast::TestSpatioTemporalLayers321PatternEncoder();
} }
TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestRPSIEncoder) {
TestVp8Simulcast::TestRPSIEncoder();
}
class MockVideoEncoder : public VideoEncoder { class MockVideoEncoder : public VideoEncoder {
public: public:
// TODO(nisse): Valid overrides commented out, because the gmock // TODO(nisse): Valid overrides commented out, because the gmock
@ -301,7 +293,6 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
EXPECT_EQ(ref.maxFramerate, target.maxFramerate); EXPECT_EQ(ref.maxFramerate, target.maxFramerate);
EXPECT_EQ(ref.VP8().pictureLossIndicationOn, EXPECT_EQ(ref.VP8().pictureLossIndicationOn,
target.VP8().pictureLossIndicationOn); target.VP8().pictureLossIndicationOn);
EXPECT_EQ(ref.VP8().feedbackModeOn, target.VP8().feedbackModeOn);
EXPECT_EQ(ref.VP8().complexity, target.VP8().complexity); EXPECT_EQ(ref.VP8().complexity, target.VP8().complexity);
EXPECT_EQ(ref.VP8().resilience, target.VP8().resilience); EXPECT_EQ(ref.VP8().resilience, target.VP8().resilience);
EXPECT_EQ(ref.VP8().numberOfTemporalLayers, EXPECT_EQ(ref.VP8().numberOfTemporalLayers,

View File

@ -67,14 +67,6 @@ TEST_F(TestVp8Impl, TestSwitchingToOneSmallStream) {
TestVp8Simulcast::TestSwitchingToOneSmallStream(); TestVp8Simulcast::TestSwitchingToOneSmallStream();
} }
TEST_F(TestVp8Impl, TestRPSIEncoder) {
TestVp8Simulcast::TestRPSIEncoder();
}
TEST_F(TestVp8Impl, TestRPSIEncodeDecode) {
TestVp8Simulcast::TestRPSIEncodeDecode();
}
TEST_F(TestVp8Impl, TestSaptioTemporalLayers333PatternEncoder) { TEST_F(TestVp8Impl, TestSaptioTemporalLayers333PatternEncoder) {
TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder(); TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder();
} }

View File

@ -224,7 +224,6 @@ class TestVp8Simulcast : public ::testing::Test {
settings->VP8()->denoisingOn = true; settings->VP8()->denoisingOn = true;
settings->VP8()->errorConcealmentOn = false; settings->VP8()->errorConcealmentOn = false;
settings->VP8()->automaticResizeOn = false; settings->VP8()->automaticResizeOn = false;
settings->VP8()->feedbackModeOn = false;
settings->VP8()->frameDroppingOn = true; settings->VP8()->frameDroppingOn = true;
settings->VP8()->keyFrameInterval = 3000; settings->VP8()->keyFrameInterval = 3000;
} }
@ -572,147 +571,6 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); } void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); }
void TestRPSIEncoder() {
Vp8TestEncodedImageCallback encoder_callback;
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
SetRates(kMaxBitrates[2], 30); // To get all three streams.
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
int picture_id = -1;
int temporal_layer = -1;
bool layer_sync = false;
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(0, temporal_layer);
EXPECT_TRUE(layer_sync);
int key_frame_picture_id = picture_id;
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(2, temporal_layer);
EXPECT_TRUE(layer_sync);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(1, temporal_layer);
EXPECT_TRUE(layer_sync);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(2, temporal_layer);
EXPECT_FALSE(layer_sync);
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecVP8;
codec_specific.codecSpecific.VP8.hasReceivedRPSI = true;
// Must match last key frame to trigger.
codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id;
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(0, temporal_layer);
EXPECT_TRUE(layer_sync);
// Must match last key frame to trigger, test bad id.
codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17;
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(2, temporal_layer);
// The previous frame was a base layer sync (since it was a frame that
// only predicts from key frame and hence resets the temporal pattern),
// so this frame (the next one) must have |layer_sync| set to true.
EXPECT_TRUE(layer_sync);
}
void TestRPSIEncodeDecode() {
Vp8TestEncodedImageCallback encoder_callback;
Vp8TestDecodedImageCallback decoder_callback;
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
SetRates(kMaxBitrates[2], 30); // To get all three streams.
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
CreateImage(input_buffer_, plane_offset);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
int picture_id = -1;
int temporal_layer = -1;
bool layer_sync = false;
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
&layer_sync, 0);
EXPECT_EQ(0, temporal_layer);
EXPECT_TRUE(layer_sync);
int key_frame_picture_id = picture_id;
// Change color.
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
// Change color.
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
// Change color.
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecVP8;
codec_specific.codecSpecific.VP8.hasReceivedRPSI = true;
// Must match last key frame to trigger.
codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id;
// Change color back to original.
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
EncodedImage encoded_frame;
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
decoder_->Decode(encoded_frame, false, NULL);
encoder_callback.GetLastEncodedFrame(&encoded_frame);
decoder_->Decode(encoded_frame, false, NULL);
EXPECT_EQ(2, decoder_callback.DecodedFrames());
}
// Test the layer pattern and sync flag for various spatial-temporal patterns. // Test the layer pattern and sync flag for various spatial-temporal patterns.
// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
// temporal_layer id and layer_sync is expected for all streams. // temporal_layer id and layer_sync is expected for all streams.

View File

@ -134,7 +134,6 @@ VP8EncoderImpl::VP8EncoderImpl()
: encoded_complete_callback_(nullptr), : encoded_complete_callback_(nullptr),
inited_(false), inited_(false),
timestamp_(0), timestamp_(0),
feedback_mode_(false),
qp_max_(56), // Setting for max quantizer. qp_max_(56), // Setting for max quantizer.
cpu_speed_default_(-6), cpu_speed_default_(-6),
number_of_cores_(0), number_of_cores_(0),
@ -350,8 +349,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
feedback_mode_ = inst->VP8().feedbackModeOn;
number_of_cores_ = number_of_cores; number_of_cores_ = number_of_cores;
timestamp_ = 0; timestamp_ = 0;
codec_ = *inst; codec_ = *inst;
@ -452,11 +449,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Set the maximum target size of any key-frame. // Set the maximum target size of any key-frame.
rc_max_intra_target_ = MaxIntraTarget(configurations_[0].rc_buf_optimal_sz); rc_max_intra_target_ = MaxIntraTarget(configurations_[0].rc_buf_optimal_sz);
if (feedback_mode_) { if (inst->VP8().keyFrameInterval > 0) {
// Disable periodic key frames if we get feedback from the decoder
// through SLI and RPSI.
configurations_[0].kf_mode = VPX_KF_DISABLED;
} else if (inst->VP8().keyFrameInterval > 0) {
configurations_[0].kf_mode = VPX_KF_AUTO; configurations_[0].kf_mode = VPX_KF_AUTO;
configurations_[0].kf_max_dist = inst->VP8().keyFrameInterval; configurations_[0].kf_max_dist = inst->VP8().keyFrameInterval;
} else { } else {
@ -540,7 +533,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]);
} }
rps_.Init();
return InitAndSetControlSettings(); return InitAndSetControlSettings();
} }
@ -770,47 +762,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
flags[i] = VPX_EFLAG_FORCE_KF; flags[i] = VPX_EFLAG_FORCE_KF;
} }
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
} else if (codec_specific_info &&
codec_specific_info->codecType == kVideoCodecVP8) {
if (feedback_mode_) {
// Handle RPSI and SLI messages and set up the appropriate encode flags.
bool sendRefresh = false;
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
}
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
sendRefresh = rps_.ReceivedSLI(frame.timestamp());
}
for (size_t i = 0; i < encoders_.size(); ++i) {
flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh,
frame.timestamp());
}
} else {
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
// Is this our last key frame? If not ignore.
// |picture_id_| is defined per spatial stream/layer, so check that
// |RPSI| matches the last key frame from any of the spatial streams.
// If so, then all spatial streams for this encoding will predict from
// its long-term reference (last key frame).
int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI;
for (size_t i = 0; i < encoders_.size(); ++i) {
if (last_key_frame_picture_id_[i] == RPSI) {
// Request for a long term reference frame.
// Note 1: overwrites any temporal settings.
// Note 2: VP8_EFLAG_NO_UPD_ENTROPY is not needed as that flag is
// set by error_resilient mode.
for (size_t j = 0; j < encoders_.size(); ++j) {
flags[j] = VP8_EFLAG_NO_UPD_ARF;
flags[j] |= VP8_EFLAG_NO_REF_GF;
flags[j] |= VP8_EFLAG_NO_REF_LAST;
}
only_predict_from_key_frame = true;
break;
}
}
}
}
} }
// Set the encoder frame flags and temporal layer_id for each spatial stream. // Set the encoder frame flags and temporal layer_id for each spatial stream.
// Note that |temporal_layers_| are defined starting from lowest resolution at // Note that |temporal_layers_| are defined starting from lowest resolution at
// position 0 to highest resolution at position |encoders_.size() - 1|, // position 0 to highest resolution at position |encoders_.size() - 1|,
@ -960,7 +913,6 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
// check if encoded frame is a key frame // check if encoded frame is a key frame
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
encoded_images_[encoder_idx]._frameType = kVideoFrameKey; encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
rps_.EncodedKeyFrame(picture_id_[stream_idx]);
} }
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, PopulateCodecSpecific(&codec_specific, *pkt, stream_idx,
input_image.timestamp(), input_image.timestamp(),
@ -1011,7 +963,6 @@ VideoEncoder::ScalingSettings VP8EncoderImpl::GetScalingSettings() const {
} }
int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) { int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
rps_.SetRtt(rtt);
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;
} }
@ -1025,7 +976,6 @@ VP8DecoderImpl::VP8DecoderImpl()
: buffer_pool_(false, 300 /* max_number_of_buffers*/), : buffer_pool_(false, 300 /* max_number_of_buffers*/),
decode_complete_callback_(NULL), decode_complete_callback_(NULL),
inited_(false), inited_(false),
feedback_mode_(false),
decoder_(NULL), decoder_(NULL),
image_format_(VPX_IMG_FMT_NONE), image_format_(VPX_IMG_FMT_NONE),
ref_frame_(NULL), ref_frame_(NULL),
@ -1050,9 +1000,6 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
decoder_ = new vpx_codec_ctx_t; decoder_ = new vpx_codec_ctx_t;
memset(decoder_, 0, sizeof(*decoder_)); memset(decoder_, 0, sizeof(*decoder_));
} }
if (inst && inst->codecType == kVideoCodecVP8) {
feedback_mode_ = inst->VP8().feedbackModeOn;
}
vpx_codec_dec_cfg_t cfg; vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1) // Setting number of threads to a constant value (1)
cfg.threads = 1; cfg.threads = 1;
@ -1141,21 +1088,18 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;
} }
} }
// Restrict error propagation using key frame requests. Disabled when // Restrict error propagation using key frame requests.
// the feedback mode is enabled (RPS).
// Reset on a key frame refresh. // Reset on a key frame refresh.
if (!feedback_mode_) { if (input_image._frameType == kVideoFrameKey &&
if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
input_image._completeFrame) {
propagation_cnt_ = -1; propagation_cnt_ = -1;
// Start count on first loss. // Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) && } else if ((!input_image._completeFrame || missing_frames) &&
propagation_cnt_ == -1) { propagation_cnt_ == -1) {
propagation_cnt_ = 0; propagation_cnt_ = 0;
} }
if (propagation_cnt_ >= 0) { if (propagation_cnt_ >= 0) {
propagation_cnt_++; propagation_cnt_++;
}
} }
vpx_codec_iter_t iter = NULL; vpx_codec_iter_t iter = NULL;
@ -1200,48 +1144,6 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
propagation_cnt_ = 0; propagation_cnt_ = 0;
return ret; return ret;
} }
if (feedback_mode_) {
// Whenever we receive an incomplete key frame all reference buffers will
// be corrupt. If that happens we must request new key frames until we
// decode a complete key frame.
if (input_image._frameType == kVideoFrameKey && !input_image._completeFrame)
return WEBRTC_VIDEO_CODEC_ERROR;
// Check for reference updates and last reference buffer corruption and
// signal successful reference propagation or frame corruption to the
// encoder.
int reference_updates = 0;
if (vpx_codec_control(decoder_, VP8D_GET_LAST_REF_UPDATES,
&reference_updates)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0) {
propagation_cnt_ = 0;
}
return WEBRTC_VIDEO_CODEC_ERROR;
}
int corrupted = 0;
if (vpx_codec_control(decoder_, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
int16_t picture_id = -1;
if (codec_specific_info) {
picture_id = codec_specific_info->codecSpecific.VP8.pictureId;
}
if (picture_id > -1) {
if (((reference_updates & VP8_GOLD_FRAME) ||
(reference_updates & VP8_ALTR_FRAME)) &&
!corrupted) {
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
}
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
}
if (corrupted) {
// we can decode but with artifacts
return WEBRTC_VIDEO_CODEC_REQUEST_SLI;
}
}
// Check Vs. threshold // Check Vs. threshold
if (propagation_cnt_ > kVp8ErrorPropagationTh) { if (propagation_cnt_ > kVp8ErrorPropagationTh) {
// Reset to avoid requesting key frames too often. // Reset to avoid requesting key frames too often.

View File

@ -27,7 +27,6 @@
#include "webrtc/common_video/include/i420_buffer_pool.h" #include "webrtc/common_video/include/i420_buffer_pool.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h" #include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h" #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
#include "webrtc/modules/video_coding/utility/quality_scaler.h" #include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/video_frame.h" #include "webrtc/video_frame.h"
@ -97,13 +96,11 @@ class VP8EncoderImpl : public VP8Encoder {
VideoCodec codec_; VideoCodec codec_;
bool inited_; bool inited_;
int64_t timestamp_; int64_t timestamp_;
bool feedback_mode_;
int qp_max_; int qp_max_;
int cpu_speed_default_; int cpu_speed_default_;
int number_of_cores_; int number_of_cores_;
uint32_t rc_max_intra_target_; uint32_t rc_max_intra_target_;
int token_partitions_; int token_partitions_;
ReferencePictureSelection rps_;
std::vector<TemporalLayers*> temporal_layers_; std::vector<TemporalLayers*> temporal_layers_;
bool down_scale_requested_; bool down_scale_requested_;
uint32_t down_scale_bitrate_; uint32_t down_scale_bitrate_;
@ -156,7 +153,6 @@ class VP8DecoderImpl : public VP8Decoder {
I420BufferPool buffer_pool_; I420BufferPool buffer_pool_;
DecodedImageCallback* decode_complete_callback_; DecodedImageCallback* decode_complete_callback_;
bool inited_; bool inited_;
bool feedback_mode_;
vpx_codec_ctx_t* decoder_; vpx_codec_ctx_t* decoder_;
VideoCodec codec_; VideoCodec codec_;
int image_format_; int image_format_;

View File

@ -28,10 +28,6 @@ class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted // Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below. // with a copy-constructor. See below.
struct CodecSpecificInfoVP8 { struct CodecSpecificInfoVP8 {
bool hasReceivedSLI;
uint8_t pictureIdSLI;
bool hasReceivedRPSI;
uint64_t pictureIdRPSI;
int16_t pictureId; // Negative value to skip pictureId. int16_t pictureId; // Negative value to skip pictureId.
bool nonReference; bool nonReference;
uint8_t simulcastIdx; uint8_t simulcastIdx;
@ -42,10 +38,6 @@ struct CodecSpecificInfoVP8 {
}; };
struct CodecSpecificInfoVP9 { struct CodecSpecificInfoVP9 {
bool has_received_sli;
uint8_t picture_id_sli;
bool has_received_rpsi;
uint64_t picture_id_rpsi;
int16_t picture_id; // Negative value to skip pictureId. int16_t picture_id; // Negative value to skip pictureId.
bool inter_pic_predicted; // This layer frame is dependent on previously bool inter_pic_predicted; // This layer frame is dependent on previously

View File

@ -608,20 +608,6 @@ void ViEEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
overuse_detector_.FrameCaptured(video_frame, time_when_posted_us); overuse_detector_.FrameCaptured(video_frame, time_when_posted_us);
if (codec_type_ == webrtc::kVideoCodecVP8) {
webrtc::CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = webrtc::kVideoCodecVP8;
codec_specific_info.codecSpecific.VP8.hasReceivedRPSI = has_received_rpsi_;
codec_specific_info.codecSpecific.VP8.hasReceivedSLI = has_received_sli_;
codec_specific_info.codecSpecific.VP8.pictureIdRPSI = picture_id_rpsi_;
codec_specific_info.codecSpecific.VP8.pictureIdSLI = picture_id_sli_;
has_received_sli_ = false;
has_received_rpsi_ = false;
video_sender_.AddVideoFrame(video_frame, &codec_specific_info);
return;
}
video_sender_.AddVideoFrame(video_frame, nullptr); video_sender_.AddVideoFrame(video_frame, nullptr);
} }
@ -672,6 +658,7 @@ void ViEEncoder::SendStatistics(uint32_t bit_rate, uint32_t frame_rate) {
stats_proxy_->OnEncoderStatsUpdate(frame_rate, bit_rate); stats_proxy_->OnEncoderStatsUpdate(frame_rate, bit_rate);
} }
// TODO(nisse): Delete.
void ViEEncoder::OnReceivedSLI(uint8_t picture_id) { void ViEEncoder::OnReceivedSLI(uint8_t picture_id) {
if (!encoder_queue_.IsCurrent()) { if (!encoder_queue_.IsCurrent()) {
encoder_queue_.PostTask([this, picture_id] { OnReceivedSLI(picture_id); }); encoder_queue_.PostTask([this, picture_id] { OnReceivedSLI(picture_id); });
@ -682,6 +669,7 @@ void ViEEncoder::OnReceivedSLI(uint8_t picture_id) {
has_received_sli_ = true; has_received_sli_ = true;
} }
// TODO(nisse): Delete.
void ViEEncoder::OnReceivedRPSI(uint64_t picture_id) { void ViEEncoder::OnReceivedRPSI(uint64_t picture_id) {
if (!encoder_queue_.IsCurrent()) { if (!encoder_queue_.IsCurrent()) {
encoder_queue_.PostTask([this, picture_id] { OnReceivedRPSI(picture_id); }); encoder_queue_.PostTask([this, picture_id] { OnReceivedRPSI(picture_id); });