change PacketBuffer to return it's result rather that use callback

Bug: None
Change-Id: I8cc05dd46e811d6db37af520d2106af21c671def
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/157893
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29589}
This commit is contained in:
Danil Chapovalov
2019-10-22 17:12:42 +02:00
committed by Commit Bot
parent 2522b25f19
commit ce1ffcdc06
8 changed files with 173 additions and 191 deletions

View File

@ -84,6 +84,7 @@ rtc_library("video_coding") {
"../../system_wrappers:field_trial",
"../../system_wrappers:metrics",
"../rtp_rtcp:rtp_video_header",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
]

View File

@ -33,15 +33,13 @@ namespace video_coding {
PacketBuffer::PacketBuffer(Clock* clock,
size_t start_buffer_size,
size_t max_buffer_size,
OnAssembledFrameCallback* assembled_frame_callback)
size_t max_buffer_size)
: clock_(clock),
max_size_(max_buffer_size),
first_seq_num_(0),
first_packet_received_(false),
is_cleared_to_first_seq_num_(false),
buffer_(start_buffer_size),
assembled_frame_callback_(assembled_frame_callback),
unique_frames_seen_(0),
sps_pps_idr_is_h264_keyframe_(
field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) {
@ -55,76 +53,70 @@ PacketBuffer::~PacketBuffer() {
Clear();
}
bool PacketBuffer::InsertPacket(VCMPacket* packet) {
std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
{
rtc::CritScope lock(&crit_);
PacketBuffer::InsertResult PacketBuffer::InsertPacket(VCMPacket* packet) {
PacketBuffer::InsertResult result;
rtc::CritScope lock(&crit_);
OnTimestampReceived(packet->timestamp);
OnTimestampReceived(packet->timestamp);
uint16_t seq_num = packet->seqNum;
size_t index = seq_num % buffer_.size();
uint16_t seq_num = packet->seqNum;
size_t index = seq_num % buffer_.size();
if (!first_packet_received_) {
first_seq_num_ = seq_num;
first_packet_received_ = true;
} else if (AheadOf(first_seq_num_, seq_num)) {
// If we have explicitly cleared past this packet then it's old,
// don't insert it, just silently ignore it.
if (is_cleared_to_first_seq_num_) {
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
return true;
}
first_seq_num_ = seq_num;
if (!first_packet_received_) {
first_seq_num_ = seq_num;
first_packet_received_ = true;
} else if (AheadOf(first_seq_num_, seq_num)) {
// If we have explicitly cleared past this packet then it's old,
// don't insert it, just silently ignore it.
if (is_cleared_to_first_seq_num_) {
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
return result;
}
if (buffer_[index].used) {
// Duplicate packet, just delete the payload.
if (buffer_[index].seq_num() == packet->seqNum) {
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
return true;
}
// The packet buffer is full, try to expand the buffer.
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used) {
}
index = seq_num % buffer_.size();
// Packet buffer is still full since we were unable to expand the buffer.
if (buffer_[index].used) {
// Clear the buffer, delete payload, and return false to signal that a
// new keyframe is needed.
RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
Clear();
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
return false;
}
}
StoredPacket& new_entry = buffer_[index];
new_entry.continuous = false;
new_entry.used = true;
new_entry.data = *packet;
packet->dataPtr = nullptr;
UpdateMissingPackets(packet->seqNum);
int64_t now_ms = clock_->TimeInMilliseconds();
last_received_packet_ms_ = now_ms;
if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey)
last_received_keyframe_packet_ms_ = now_ms;
found_frames = FindFrames(seq_num);
first_seq_num_ = seq_num;
}
for (std::unique_ptr<RtpFrameObject>& frame : found_frames)
assembled_frame_callback_->OnAssembledFrame(std::move(frame));
if (buffer_[index].used) {
// Duplicate packet, just delete the payload.
if (buffer_[index].seq_num() == packet->seqNum) {
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
return result;
}
return true;
// The packet buffer is full, try to expand the buffer.
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used) {
}
index = seq_num % buffer_.size();
// Packet buffer is still full since we were unable to expand the buffer.
if (buffer_[index].used) {
// Clear the buffer, delete payload, and return false to signal that a
// new keyframe is needed.
RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
Clear();
delete[] packet->dataPtr;
packet->dataPtr = nullptr;
result.buffer_cleared = true;
return result;
}
}
StoredPacket& new_entry = buffer_[index];
new_entry.continuous = false;
new_entry.used = true;
new_entry.data = *packet;
packet->dataPtr = nullptr;
UpdateMissingPackets(packet->seqNum);
int64_t now_ms = clock_->TimeInMilliseconds();
last_received_packet_ms_ = now_ms;
if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey)
last_received_keyframe_packet_ms_ = now_ms;
result.frames = FindFrames(seq_num);
return result;
}
void PacketBuffer::ClearTo(uint16_t seq_num) {
@ -198,16 +190,12 @@ void PacketBuffer::Clear() {
missing_packets_.clear();
}
void PacketBuffer::PaddingReceived(uint16_t seq_num) {
std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
{
rtc::CritScope lock(&crit_);
UpdateMissingPackets(seq_num);
found_frames = FindFrames(static_cast<uint16_t>(seq_num + 1));
}
for (std::unique_ptr<RtpFrameObject>& frame : found_frames)
assembled_frame_callback_->OnAssembledFrame(std::move(frame));
PacketBuffer::InsertResult PacketBuffer::InsertPadding(uint16_t seq_num) {
PacketBuffer::InsertResult result;
rtc::CritScope lock(&crit_);
UpdateMissingPackets(seq_num);
result.frames = FindFrames(static_cast<uint16_t>(seq_num + 1));
return result;
}
absl::optional<int64_t> PacketBuffer::LastReceivedPacketMs() const {

View File

@ -16,43 +16,37 @@
#include <set>
#include <vector>
#include "absl/base/attributes.h"
#include "api/video/encoded_image.h"
#include "modules/video_coding/frame_object.h"
#include "modules/video_coding/packet.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/numerics/sequence_number_util.h"
#include "rtc_base/thread_annotations.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
class Clock;
namespace video_coding {
class RtpFrameObject;
// A frame is assembled when all of its packets have been received.
class OnAssembledFrameCallback {
public:
virtual ~OnAssembledFrameCallback() {}
virtual void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame) = 0;
};
class PacketBuffer {
public:
struct InsertResult {
std::vector<std::unique_ptr<RtpFrameObject>> frames;
// Indicates if the packet buffer was cleared, which means that a key
// frame request should be sent.
bool buffer_cleared = false;
};
// Both |start_buffer_size| and |max_buffer_size| must be a power of 2.
PacketBuffer(Clock* clock,
size_t start_buffer_size,
size_t max_buffer_size,
OnAssembledFrameCallback* frame_callback);
PacketBuffer(Clock* clock, size_t start_buffer_size, size_t max_buffer_size);
~PacketBuffer();
// Returns true unless the packet buffer is cleared, which means that a key
// frame request should be sent. The PacketBuffer will always take ownership
// of the |packet.dataPtr| when this function is called.
bool InsertPacket(VCMPacket* packet);
// The PacketBuffer will always take ownership of the |packet.dataPtr| when
// this function is called.
InsertResult InsertPacket(VCMPacket* packet) ABSL_MUST_USE_RESULT;
InsertResult InsertPadding(uint16_t seq_num) ABSL_MUST_USE_RESULT;
void ClearTo(uint16_t seq_num);
void Clear();
void PaddingReceived(uint16_t seq_num);
// Timestamp (not RTP timestamp) of the last received packet/keyframe packet.
absl::optional<int64_t> LastReceivedPacketMs() const;
@ -132,10 +126,6 @@ class PacketBuffer {
// determine continuity between them.
std::vector<StoredPacket> buffer_ RTC_GUARDED_BY(crit_);
// Called when all packets in a frame are received, allowing the frame
// to be assembled.
OnAssembledFrameCallback* const assembled_frame_callback_;
// Timestamp (not RTP timestamp) of the last received packet/keyframe packet.
absl::optional<int64_t> last_received_packet_ms_ RTC_GUARDED_BY(crit_);
absl::optional<int64_t> last_received_keyframe_packet_ms_

View File

@ -21,23 +21,47 @@
#include "rtc_base/random.h"
#include "system_wrappers/include/clock.h"
#include "test/field_trial.h"
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
namespace video_coding {
namespace {
class PacketBufferTest : public ::testing::Test,
public OnAssembledFrameCallback {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::SizeIs;
void IgnoreResult(PacketBuffer::InsertResult /*result*/) {}
std::vector<uint16_t> FirstSeqNums(
rtc::ArrayView<const std::unique_ptr<RtpFrameObject>> frames) {
std::vector<uint16_t> result;
for (const auto& frame : frames) {
result.push_back(frame->first_seq_num());
}
return result;
}
MATCHER(KeyFrame, "") {
return arg->frame_type() == VideoFrameType::kVideoFrameKey;
}
MATCHER(DeltaFrame, "") {
return arg->frame_type() == VideoFrameType::kVideoFrameDelta;
}
class PacketBufferTest : public ::testing::Test {
protected:
explicit PacketBufferTest(std::string field_trials = "")
: scoped_field_trials_(field_trials),
rand_(0x7732213),
clock_(new SimulatedClock(0)),
packet_buffer_(clock_.get(), kStartSize, kMaxSize, this) {}
packet_buffer_(clock_.get(), kStartSize, kMaxSize) {}
uint16_t Rand() { return rand_.Rand<uint16_t>(); }
void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame) override {
void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame) {
uint16_t first_seq_num = frame->first_seq_num();
if (frames_from_callback_.find(first_seq_num) !=
frames_from_callback_.end()) {
@ -73,7 +97,17 @@ class PacketBufferTest : public ::testing::Test,
packet.sizeBytes = data_size;
packet.dataPtr = data;
return packet_buffer_.InsertPacket(&packet);
return HandleResult(packet_buffer_.InsertPacket(&packet));
}
// TODO(danilchap): Instead of using this helper, update all tests to validate
// result of the InsertPacket/InsertPadding directly for cleaner expectations
// and error messages when test fail.
bool HandleResult(PacketBuffer::InsertResult result) {
for (auto& frame : result.frames) {
OnAssembledFrame(std::move(frame));
}
return !result.buffer_cleared;
}
void CheckFrame(uint16_t first_seq_num) {
@ -170,25 +204,24 @@ TEST_F(PacketBufferTest, NackCount) {
packet.video_header.is_last_packet_in_frame = false;
packet.timesNacked = 0;
packet_buffer_.InsertPacket(&packet);
IgnoreResult(packet_buffer_.InsertPacket(&packet));
packet.seqNum++;
packet.video_header.is_first_packet_in_frame = false;
packet.timesNacked = 1;
packet_buffer_.InsertPacket(&packet);
IgnoreResult(packet_buffer_.InsertPacket(&packet));
packet.seqNum++;
packet.timesNacked = 3;
packet_buffer_.InsertPacket(&packet);
IgnoreResult(packet_buffer_.InsertPacket(&packet));
packet.seqNum++;
packet.video_header.is_last_packet_in_frame = true;
packet.timesNacked = 1;
packet_buffer_.InsertPacket(&packet);
auto frames = packet_buffer_.InsertPacket(&packet).frames;
ASSERT_EQ(1UL, frames_from_callback_.size());
RtpFrameObject* frame = frames_from_callback_.begin()->second.get();
EXPECT_EQ(3, frame->times_nacked());
ASSERT_THAT(frames, SizeIs(1));
EXPECT_EQ(frames.front()->times_nacked(), 3);
}
TEST_F(PacketBufferTest, FrameSize) {
@ -581,7 +614,7 @@ class PacketBufferH264Test : public PacketBufferTest {
packet.sizeBytes = data_size;
packet.dataPtr = data;
return packet_buffer_.InsertPacket(&packet);
return HandleResult(packet_buffer_.InsertPacket(&packet));
}
bool InsertH264KeyFrameWithAud(
@ -613,7 +646,7 @@ class PacketBufferH264Test : public PacketBufferTest {
packet.video_header.is_last_packet_in_frame = false;
packet.sizeBytes = 0;
packet.dataPtr = nullptr;
if (packet_buffer_.InsertPacket(&packet)) {
if (HandleResult(packet_buffer_.InsertPacket(&packet))) {
// insert IDR
return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp,
data_size, data, width, height);
@ -690,16 +723,13 @@ TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
packet.sizeBytes = sizeof(data_data);
packet.video_header.is_first_packet_in_frame = true;
packet.video_header.is_last_packet_in_frame = true;
packet_buffer_.InsertPacket(&packet);
auto frames = packet_buffer_.InsertPacket(&packet).frames;
ASSERT_EQ(1UL, frames_from_callback_.size());
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().size(),
sizeof(data_data));
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().capacity(),
sizeof(data_data));
EXPECT_EQ(memcmp(frames_from_callback_[seq_num]->data(), data_data,
sizeof(data_data)),
0);
ASSERT_THAT(frames, SizeIs(1));
EXPECT_EQ(frames[0]->first_seq_num(), seq_num);
EXPECT_EQ(frames[0]->EncodedImage().size(), sizeof(data_data));
EXPECT_EQ(frames[0]->EncodedImage().capacity(), sizeof(data_data));
EXPECT_EQ(memcmp(frames[0]->data(), data_data, sizeof(data_data)), 0);
}
TEST_P(PacketBufferH264ParameterizedTest, FrameResolution) {
@ -894,7 +924,7 @@ TEST_F(PacketBufferTest, IncomingCodecChange) {
packet.timestamp = 1;
packet.seqNum = 1;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(packet_buffer_.InsertPacket(&packet));
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(1));
packet.video_header.codec = kVideoCodecH264;
auto& h264_header =
@ -902,17 +932,14 @@ TEST_F(PacketBufferTest, IncomingCodecChange) {
h264_header.nalus_length = 1;
packet.timestamp = 3;
packet.seqNum = 3;
EXPECT_TRUE(packet_buffer_.InsertPacket(&packet));
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
packet.video_header.codec = kVideoCodecVP8;
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
packet.timestamp = 2;
packet.seqNum = 2;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(packet_buffer_.InsertPacket(&packet));
EXPECT_EQ(3UL, frames_from_callback_.size());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(2));
}
TEST_F(PacketBufferTest, TooManyNalusInPacket) {
@ -928,9 +955,7 @@ TEST_F(PacketBufferTest, TooManyNalusInPacket) {
h264_header.nalus_length = kMaxNalusPerPacket;
packet.sizeBytes = 0;
packet.dataPtr = nullptr;
EXPECT_TRUE(packet_buffer_.InsertPacket(&packet));
EXPECT_EQ(0UL, frames_from_callback_.size());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
}
TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
@ -990,10 +1015,10 @@ TEST_P(PacketBufferH264ParameterizedTest, FindFramesOnPadding) {
InsertH264(2, kDeltaFrame, kFirst, kLast, 1000);
ASSERT_EQ(1UL, frames_from_callback_.size());
packet_buffer_.PaddingReceived(1);
ASSERT_EQ(2UL, frames_from_callback_.size());
CheckFrame(0);
CheckFrame(2);
EXPECT_THAT(FirstSeqNums(packet_buffer_.InsertPadding(1).frames),
ElementsAre(2));
}
class PacketBufferH264XIsKeyframeTest : public PacketBufferH264Test {
@ -1024,11 +1049,8 @@ TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
h264_header.nalus[0].type = H264::NaluType::kIdr;
h264_header.nalus_length = 1;
packet_buffer_.InsertPacket(&packet_);
ASSERT_EQ(1u, frames_from_callback_.size());
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
frames_from_callback_[kSeqNum]->frame_type());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
ElementsAre(KeyFrame()));
}
TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
@ -1039,11 +1061,8 @@ TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
h264_header.nalus[2].type = H264::NaluType::kIdr;
h264_header.nalus_length = 3;
packet_buffer_.InsertPacket(&packet_);
ASSERT_EQ(1u, frames_from_callback_.size());
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
frames_from_callback_[kSeqNum]->frame_type());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
ElementsAre(KeyFrame()));
}
class PacketBufferH264SpsPpsIdrIsKeyframeTest
@ -1059,11 +1078,8 @@ TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
h264_header.nalus[0].type = H264::NaluType::kIdr;
h264_header.nalus_length = 1;
packet_buffer_.InsertPacket(&packet_);
ASSERT_EQ(1u, frames_from_callback_.size());
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
frames_from_callback_[5]->frame_type());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
ElementsAre(DeltaFrame()));
}
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
@ -1073,11 +1089,8 @@ TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
h264_header.nalus[1].type = H264::NaluType::kPps;
h264_header.nalus_length = 2;
packet_buffer_.InsertPacket(&packet_);
ASSERT_EQ(1u, frames_from_callback_.size());
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
frames_from_callback_[kSeqNum]->frame_type());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
ElementsAre(DeltaFrame()));
}
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
@ -1088,12 +1101,10 @@ TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
h264_header.nalus[2].type = H264::NaluType::kIdr;
h264_header.nalus_length = 3;
packet_buffer_.InsertPacket(&packet_);
ASSERT_EQ(1u, frames_from_callback_.size());
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
frames_from_callback_[kSeqNum]->frame_type());
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
ElementsAre(KeyFrame()));
}
} // namespace
} // namespace video_coding
} // namespace webrtc

View File

@ -14,20 +14,16 @@
#include "test/fuzzers/fuzz_data_helper.h"
namespace webrtc {
namespace {
class NullCallback : public video_coding::OnAssembledFrameCallback {
void OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame) {}
};
} // namespace
void IgnoreResult(video_coding::PacketBuffer::InsertResult result) {}
void FuzzOneInput(const uint8_t* data, size_t size) {
if (size > 200000) {
return;
}
VCMPacket packet;
NullCallback callback;
SimulatedClock clock(0);
video_coding::PacketBuffer packet_buffer(&clock, 8, 1024, &callback);
video_coding::PacketBuffer packet_buffer(&clock, 8, 1024);
test::FuzzDataHelper helper(rtc::ArrayView<const uint8_t>(data, size));
while (helper.BytesLeft()) {
@ -59,7 +55,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
packet.sizeBytes = payload_size;
packet.dataPtr = new uint8_t[payload_size];
packet_buffer.InsertPacket(&packet);
IgnoreResult(packet_buffer.InsertPacket(&packet));
}
}

View File

@ -38,11 +38,9 @@ FrameDecryptorInterface::Result DecryptFail() {
} // namespace
class BufferedFrameDecryptorTest
: public ::testing::Test,
public OnDecryptedFrameCallback,
public OnDecryptionStatusChangeCallback,
public video_coding::OnAssembledFrameCallback {
class BufferedFrameDecryptorTest : public ::testing::Test,
public OnDecryptedFrameCallback,
public OnDecryptionStatusChangeCallback {
public:
// Implements the OnDecryptedFrameCallbackInterface
void OnDecryptedFrame(
@ -54,10 +52,6 @@ class BufferedFrameDecryptorTest
++decryption_status_change_count_;
}
// Implements the OnAssembledFrameCallback interface.
void OnAssembledFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame) override {}
// Returns a new fake RtpFrameObject it abstracts the difficult construction
// of the RtpFrameObject to simplify testing.
std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject(

View File

@ -212,10 +212,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
// TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
// directly with |rtp_rtcp_|.
rtcp_feedback_buffer_(this, nack_sender, this),
packet_buffer_(clock_,
kPacketBufferStartSize,
PacketBufferMaxSize(),
this),
packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()),
has_received_frame_(false),
frames_decryptable_(false) {
constexpr bool remb_candidate = true;
@ -464,9 +461,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
}
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
if (!packet_buffer_.InsertPacket(&packet)) {
RequestKeyFrame();
}
OnInsertedPacket(packet_buffer_.InsertPacket(&packet));
}
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
@ -580,6 +575,16 @@ bool RtpVideoStreamReceiver::IsDecryptable() const {
return frames_decryptable_.load();
}
void RtpVideoStreamReceiver::OnInsertedPacket(
video_coding::PacketBuffer::InsertResult result) {
for (std::unique_ptr<video_coding::RtpFrameObject>& frame : result.frames) {
OnAssembledFrame(std::move(frame));
}
if (result.buffer_cleared) {
RequestKeyFrame();
}
}
void RtpVideoStreamReceiver::OnAssembledFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame) {
RTC_DCHECK_RUN_ON(&network_tc_);
@ -780,7 +785,7 @@ void RtpVideoStreamReceiver::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
rtc::CritScope lock(&reference_finder_lock_);
reference_finder_->PaddingReceived(seq_num);
}
packet_buffer_.PaddingReceived(seq_num);
OnInsertedPacket(packet_buffer_.InsertPadding(seq_num));
if (nack_module_) {
nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
/* is _recovered = */ false);

View File

@ -61,7 +61,6 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
public RecoveredPacketReceiver,
public RtpPacketSinkInterface,
public KeyFrameRequestSender,
public video_coding::OnAssembledFrameCallback,
public video_coding::OnCompleteFrameCallback,
public OnDecryptedFrameCallback,
public OnDecryptionStatusChangeCallback {
@ -139,10 +138,6 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
// Don't use, still experimental.
void RequestPacketRetransmit(const std::vector<uint16_t>& sequence_numbers);
// Implements OnAssembledFrameCallback.
void OnAssembledFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame) override;
// Implements OnCompleteFrameCallback.
void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) override;
@ -246,6 +241,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
void UpdateHistograms();
bool IsRedEnabled() const;
void InsertSpsPpsIntoTracker(uint8_t payload_type);
void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result);
void OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
Clock* const clock_;
// Ownership of this object lies with VideoReceiveStream, which owns |this|.