Pass and store PacketBuffer::Packet by unique_ptr
to avoid expensive move of the Packet and prepare PacketBuffer to return list of packets as a frame. Bug: None Change-Id: I19f0452c52238228bbe28284ebb197491eb2bf4e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/167063 Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Commit-Queue: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#30404}
This commit is contained in:

committed by
Commit Bot

parent
d7fade5738
commit
97ffbefdab
@ -79,7 +79,7 @@ PacketBuffer::~PacketBuffer() {
|
||||
}
|
||||
|
||||
PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
||||
PacketBuffer::Packet* packet) {
|
||||
std::unique_ptr<PacketBuffer::Packet> packet) {
|
||||
PacketBuffer::InsertResult result;
|
||||
rtc::CritScope lock(&crit_);
|
||||
|
||||
@ -99,19 +99,19 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
||||
first_seq_num_ = seq_num;
|
||||
}
|
||||
|
||||
if (buffer_[index].used) {
|
||||
if (buffer_[index].used()) {
|
||||
// Duplicate packet, just delete the payload.
|
||||
if (buffer_[index].seq_num() == packet->seq_num) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// The packet buffer is full, try to expand the buffer.
|
||||
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used) {
|
||||
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used()) {
|
||||
}
|
||||
index = seq_num % buffer_.size();
|
||||
|
||||
// Packet buffer is still full since we were unable to expand the buffer.
|
||||
if (buffer_[index].used) {
|
||||
if (buffer_[index].used()) {
|
||||
// Clear the buffer, delete payload, and return false to signal that a
|
||||
// new keyframe is needed.
|
||||
RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
|
||||
@ -131,8 +131,7 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
||||
|
||||
StoredPacket& new_entry = buffer_[index];
|
||||
new_entry.continuous = false;
|
||||
new_entry.used = true;
|
||||
new_entry.data = std::move(*packet);
|
||||
new_entry.packet = std::move(packet);
|
||||
|
||||
UpdateMissingPackets(seq_num);
|
||||
|
||||
@ -158,10 +157,9 @@ void PacketBuffer::ClearTo(uint16_t seq_num) {
|
||||
size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
|
||||
size_t iterations = std::min(diff, buffer_.size());
|
||||
for (size_t i = 0; i < iterations; ++i) {
|
||||
size_t index = first_seq_num_ % buffer_.size();
|
||||
if (AheadOf<uint16_t>(seq_num, buffer_[index].seq_num())) {
|
||||
buffer_[index].data.video_payload = {};
|
||||
buffer_[index].used = false;
|
||||
StoredPacket& stored = buffer_[first_seq_num_ % buffer_.size()];
|
||||
if (stored.used() && AheadOf<uint16_t>(seq_num, stored.seq_num())) {
|
||||
stored.packet = nullptr;
|
||||
}
|
||||
++first_seq_num_;
|
||||
}
|
||||
@ -186,8 +184,7 @@ void PacketBuffer::ClearInterval(uint16_t start_seq_num,
|
||||
for (size_t i = 0; i < iterations; ++i) {
|
||||
size_t index = seq_num % buffer_.size();
|
||||
RTC_DCHECK_EQ(buffer_[index].seq_num(), seq_num);
|
||||
buffer_[index].data.video_payload = {};
|
||||
buffer_[index].used = false;
|
||||
buffer_[index].packet = nullptr;
|
||||
|
||||
++seq_num;
|
||||
}
|
||||
@ -196,8 +193,7 @@ void PacketBuffer::ClearInterval(uint16_t start_seq_num,
|
||||
void PacketBuffer::Clear() {
|
||||
rtc::CritScope lock(&crit_);
|
||||
for (StoredPacket& entry : buffer_) {
|
||||
entry.data.video_payload = {};
|
||||
entry.used = false;
|
||||
entry.packet = nullptr;
|
||||
}
|
||||
|
||||
first_packet_received_ = false;
|
||||
@ -236,7 +232,7 @@ bool PacketBuffer::ExpandBufferSize() {
|
||||
size_t new_size = std::min(max_size_, 2 * buffer_.size());
|
||||
std::vector<StoredPacket> new_buffer(new_size);
|
||||
for (StoredPacket& entry : buffer_) {
|
||||
if (entry.used) {
|
||||
if (entry.used()) {
|
||||
new_buffer[entry.seq_num() % new_size] = std::move(entry);
|
||||
}
|
||||
}
|
||||
@ -251,17 +247,17 @@ bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
|
||||
const StoredPacket& entry = buffer_[index];
|
||||
const StoredPacket& prev_entry = buffer_[prev_index];
|
||||
|
||||
if (!entry.used)
|
||||
if (!entry.used())
|
||||
return false;
|
||||
if (entry.seq_num() != seq_num)
|
||||
return false;
|
||||
if (entry.frame_begin())
|
||||
return true;
|
||||
if (!prev_entry.used)
|
||||
if (!prev_entry.used())
|
||||
return false;
|
||||
if (prev_entry.seq_num() != static_cast<uint16_t>(entry.seq_num() - 1))
|
||||
return false;
|
||||
if (prev_entry.data.timestamp != entry.data.timestamp)
|
||||
if (prev_entry.packet->timestamp != entry.packet->timestamp)
|
||||
return false;
|
||||
if (prev_entry.continuous)
|
||||
return true;
|
||||
@ -285,10 +281,10 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
// the |frame_begin| flag is set.
|
||||
int start_index = index;
|
||||
size_t tested_packets = 0;
|
||||
int64_t frame_timestamp = buffer_[start_index].data.timestamp;
|
||||
int64_t frame_timestamp = buffer_[start_index].packet->timestamp;
|
||||
|
||||
// Identify H.264 keyframes by means of SPS, PPS, and IDR.
|
||||
bool is_h264 = buffer_[start_index].data.codec() == kVideoCodecH264;
|
||||
bool is_h264 = buffer_[start_index].packet->codec() == kVideoCodecH264;
|
||||
bool has_h264_sps = false;
|
||||
bool has_h264_pps = false;
|
||||
bool has_h264_idr = false;
|
||||
@ -303,7 +299,7 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
|
||||
if (is_h264) {
|
||||
const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
|
||||
&buffer_[start_index].data.video_header.video_type_header);
|
||||
&buffer_[start_index].packet->video_header.video_type_header);
|
||||
if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
|
||||
return found_frames;
|
||||
|
||||
@ -324,10 +320,10 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
// smallest index and valid resolution; typically its IDR or SPS
|
||||
// packet; there may be packet preceeding this packet, IDR's
|
||||
// resolution will be applied to them.
|
||||
if (buffer_[start_index].data.width() > 0 &&
|
||||
buffer_[start_index].data.height() > 0) {
|
||||
idr_width = buffer_[start_index].data.width();
|
||||
idr_height = buffer_[start_index].data.height();
|
||||
if (buffer_[start_index].packet->width() > 0 &&
|
||||
buffer_[start_index].packet->height() > 0) {
|
||||
idr_width = buffer_[start_index].packet->width();
|
||||
idr_height = buffer_[start_index].packet->height();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -344,8 +340,8 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
// the PacketBuffer to hand out incomplete frames.
|
||||
// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
|
||||
if (is_h264 &&
|
||||
(!buffer_[start_index].used ||
|
||||
buffer_[start_index].data.timestamp != frame_timestamp)) {
|
||||
(!buffer_[start_index].used() ||
|
||||
buffer_[start_index].packet->timestamp != frame_timestamp)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -369,23 +365,27 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
// determines if the RtpFrameObject is a key frame or delta frame.
|
||||
const size_t first_packet_index = start_seq_num % buffer_.size();
|
||||
if (is_h264_keyframe) {
|
||||
buffer_[first_packet_index].data.video_header.frame_type =
|
||||
buffer_[first_packet_index].packet->video_header.frame_type =
|
||||
VideoFrameType::kVideoFrameKey;
|
||||
if (idr_width > 0 && idr_height > 0) {
|
||||
// IDR frame was finalized and we have the correct resolution for
|
||||
// IDR; update first packet to have same resolution as IDR.
|
||||
buffer_[first_packet_index].data.video_header.width = idr_width;
|
||||
buffer_[first_packet_index].data.video_header.height = idr_height;
|
||||
buffer_[first_packet_index].packet->video_header.width = idr_width;
|
||||
buffer_[first_packet_index].packet->video_header.height =
|
||||
idr_height;
|
||||
}
|
||||
} else {
|
||||
buffer_[first_packet_index].data.video_header.frame_type =
|
||||
buffer_[first_packet_index].packet->video_header.frame_type =
|
||||
VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
// With IPPP, if this is not a keyframe, make sure there are no gaps
|
||||
// in the packet sequence numbers up until this point.
|
||||
const uint8_t h264tid =
|
||||
buffer_[start_index].data.video_header.frame_marking.temporal_id;
|
||||
buffer_[start_index].used()
|
||||
? buffer_[start_index]
|
||||
.packet->video_header.frame_marking.temporal_id
|
||||
: kNoTemporalIdx;
|
||||
if (h264tid == kNoTemporalIdx && !is_h264_keyframe &&
|
||||
missing_packets_.upper_bound(start_seq_num) !=
|
||||
missing_packets_.begin()) {
|
||||
@ -480,9 +480,9 @@ std::unique_ptr<RtpFrameObject> PacketBuffer::AssembleFrame(
|
||||
|
||||
const PacketBuffer::Packet& PacketBuffer::GetPacket(uint16_t seq_num) const {
|
||||
const StoredPacket& entry = buffer_[seq_num % buffer_.size()];
|
||||
RTC_DCHECK(entry.used);
|
||||
RTC_DCHECK(entry.used());
|
||||
RTC_DCHECK_EQ(seq_num, entry.seq_num());
|
||||
return entry.data;
|
||||
return *entry.packet;
|
||||
}
|
||||
|
||||
void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
|
||||
|
@ -41,9 +41,9 @@ class PacketBuffer {
|
||||
int64_t ntp_time_ms,
|
||||
int64_t receive_time_ms);
|
||||
Packet(const Packet&) = delete;
|
||||
Packet(Packet&&) = default;
|
||||
Packet(Packet&&) = delete;
|
||||
Packet& operator=(const Packet&) = delete;
|
||||
Packet& operator=(Packet&&) = default;
|
||||
Packet& operator=(Packet&&) = delete;
|
||||
~Packet() = default;
|
||||
|
||||
VideoCodecType codec() const { return video_header.codec; }
|
||||
@ -82,9 +82,8 @@ class PacketBuffer {
|
||||
PacketBuffer(Clock* clock, size_t start_buffer_size, size_t max_buffer_size);
|
||||
~PacketBuffer();
|
||||
|
||||
// The PacketBuffer will always take ownership of the |packet.dataPtr| when
|
||||
// this function is called.
|
||||
InsertResult InsertPacket(Packet* packet) ABSL_MUST_USE_RESULT;
|
||||
InsertResult InsertPacket(std::unique_ptr<Packet> packet)
|
||||
ABSL_MUST_USE_RESULT;
|
||||
InsertResult InsertPadding(uint16_t seq_num) ABSL_MUST_USE_RESULT;
|
||||
void ClearTo(uint16_t seq_num);
|
||||
void Clear();
|
||||
@ -95,21 +94,21 @@ class PacketBuffer {
|
||||
|
||||
private:
|
||||
struct StoredPacket {
|
||||
uint16_t seq_num() const { return data.seq_num; }
|
||||
uint16_t seq_num() const { return packet->seq_num; }
|
||||
|
||||
// If this is the first packet of the frame.
|
||||
bool frame_begin() const { return data.is_first_packet_in_frame(); }
|
||||
bool frame_begin() const { return packet->is_first_packet_in_frame(); }
|
||||
|
||||
// If this is the last packet of the frame.
|
||||
bool frame_end() const { return data.is_last_packet_in_frame(); }
|
||||
bool frame_end() const { return packet->is_last_packet_in_frame(); }
|
||||
|
||||
// If this slot is currently used.
|
||||
bool used = false;
|
||||
bool used() const { return packet != nullptr; }
|
||||
|
||||
// If all its previous packets have been inserted into the packet buffer.
|
||||
bool continuous = false;
|
||||
|
||||
Packet data;
|
||||
std::unique_ptr<Packet> packet;
|
||||
};
|
||||
|
||||
Clock* const clock_;
|
||||
|
@ -111,18 +111,19 @@ class PacketBufferTest : public ::testing::Test {
|
||||
IsLast last, // is last packet of frame
|
||||
rtc::ArrayView<const uint8_t> data = {},
|
||||
uint32_t timestamp = 123u) { // rtp timestamp
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.timestamp = timestamp;
|
||||
packet.seq_num = seq_num;
|
||||
packet.video_header.frame_type = keyframe == kKeyFrame
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = first == kFirst;
|
||||
packet.video_header.is_last_packet_in_frame = last == kLast;
|
||||
packet.video_payload.SetData(data.data(), data.size());
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecGeneric;
|
||||
packet->timestamp = timestamp;
|
||||
packet->seq_num = seq_num;
|
||||
packet->video_header.frame_type = keyframe == kKeyFrame
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
packet->video_header.is_first_packet_in_frame = first == kFirst;
|
||||
packet->video_header.is_last_packet_in_frame = last == kLast;
|
||||
packet->video_payload.SetData(data.data(), data.size());
|
||||
|
||||
return PacketBufferInsertResult(packet_buffer_.InsertPacket(&packet));
|
||||
return PacketBufferInsertResult(
|
||||
packet_buffer_.InsertPacket(std::move(packet)));
|
||||
}
|
||||
|
||||
const test::ScopedFieldTrials scoped_field_trials_;
|
||||
@ -181,29 +182,38 @@ TEST_F(PacketBufferTest, InsertOldPackets) {
|
||||
TEST_F(PacketBufferTest, NackCount) {
|
||||
const uint16_t seq_num = Rand();
|
||||
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.seq_num = seq_num;
|
||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = false;
|
||||
packet.times_nacked = 0;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecGeneric;
|
||||
packet->seq_num = seq_num;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = false;
|
||||
packet->times_nacked = 0;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||
|
||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
||||
packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->seq_num = seq_num + 1;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet->video_header.is_first_packet_in_frame = false;
|
||||
packet->video_header.is_last_packet_in_frame = false;
|
||||
packet->times_nacked = 1;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||
|
||||
packet.seq_num++;
|
||||
packet.video_header.is_first_packet_in_frame = false;
|
||||
packet.times_nacked = 1;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
||||
packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->seq_num = seq_num + 2;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet->video_header.is_first_packet_in_frame = false;
|
||||
packet->video_header.is_last_packet_in_frame = false;
|
||||
packet->times_nacked = 3;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||
|
||||
packet.seq_num++;
|
||||
packet.times_nacked = 3;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
||||
|
||||
packet.seq_num++;
|
||||
packet.video_header.is_last_packet_in_frame = true;
|
||||
packet.times_nacked = 1;
|
||||
auto frames = packet_buffer_.InsertPacket(&packet).frames;
|
||||
packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->seq_num = seq_num + 3;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet->video_header.is_first_packet_in_frame = false;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
packet->times_nacked = 1;
|
||||
auto frames = packet_buffer_.InsertPacket(std::move(packet)).frames;
|
||||
|
||||
ASSERT_THAT(frames, SizeIs(1));
|
||||
EXPECT_EQ(frames.front()->times_nacked(), 3);
|
||||
@ -428,22 +438,22 @@ TEST_F(PacketBufferTest, GetBitstreamAv1) {
|
||||
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
||||
const uint8_t data2[] = {0b10'01'0000, 'b', 'i', 't', 's', 0};
|
||||
|
||||
PacketBuffer::Packet packet1;
|
||||
packet1.video_header.codec = kVideoCodecAV1;
|
||||
packet1.seq_num = 13;
|
||||
packet1.video_header.is_first_packet_in_frame = true;
|
||||
packet1.video_header.is_last_packet_in_frame = false;
|
||||
packet1.video_payload = data1;
|
||||
auto frames = packet_buffer_.InsertPacket(&packet1).frames;
|
||||
auto packet1 = std::make_unique<PacketBuffer::Packet>();
|
||||
packet1->video_header.codec = kVideoCodecAV1;
|
||||
packet1->seq_num = 13;
|
||||
packet1->video_header.is_first_packet_in_frame = true;
|
||||
packet1->video_header.is_last_packet_in_frame = false;
|
||||
packet1->video_payload = data1;
|
||||
auto frames = packet_buffer_.InsertPacket(std::move(packet1)).frames;
|
||||
EXPECT_THAT(frames, IsEmpty());
|
||||
|
||||
PacketBuffer::Packet packet2;
|
||||
packet2.video_header.codec = kVideoCodecAV1;
|
||||
packet2.seq_num = 14;
|
||||
packet2.video_header.is_first_packet_in_frame = false;
|
||||
packet2.video_header.is_last_packet_in_frame = true;
|
||||
packet2.video_payload = data2;
|
||||
frames = packet_buffer_.InsertPacket(&packet2).frames;
|
||||
auto packet2 = std::make_unique<PacketBuffer::Packet>();
|
||||
packet2->video_header.codec = kVideoCodecAV1;
|
||||
packet2->seq_num = 14;
|
||||
packet2->video_header.is_first_packet_in_frame = false;
|
||||
packet2->video_header.is_last_packet_in_frame = true;
|
||||
packet2->video_payload = data2;
|
||||
frames = packet_buffer_.InsertPacket(std::move(packet2)).frames;
|
||||
|
||||
ASSERT_THAT(frames, SizeIs(1));
|
||||
EXPECT_EQ(frames[0]->first_seq_num(), 13);
|
||||
@ -458,22 +468,22 @@ TEST_F(PacketBufferTest, GetBitstreamInvalidAv1) {
|
||||
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
||||
const uint8_t data2[] = {0b00'01'0000, 'b', 'i', 't', 's', 0};
|
||||
|
||||
PacketBuffer::Packet packet1;
|
||||
packet1.video_header.codec = kVideoCodecAV1;
|
||||
packet1.seq_num = 13;
|
||||
packet1.video_header.is_first_packet_in_frame = true;
|
||||
packet1.video_header.is_last_packet_in_frame = false;
|
||||
packet1.video_payload = data1;
|
||||
auto frames = packet_buffer_.InsertPacket(&packet1).frames;
|
||||
auto packet1 = std::make_unique<PacketBuffer::Packet>();
|
||||
packet1->video_header.codec = kVideoCodecAV1;
|
||||
packet1->seq_num = 13;
|
||||
packet1->video_header.is_first_packet_in_frame = true;
|
||||
packet1->video_header.is_last_packet_in_frame = false;
|
||||
packet1->video_payload = data1;
|
||||
auto frames = packet_buffer_.InsertPacket(std::move(packet1)).frames;
|
||||
EXPECT_THAT(frames, IsEmpty());
|
||||
|
||||
PacketBuffer::Packet packet2;
|
||||
packet2.video_header.codec = kVideoCodecAV1;
|
||||
packet2.seq_num = 14;
|
||||
packet2.video_header.is_first_packet_in_frame = false;
|
||||
packet2.video_header.is_last_packet_in_frame = true;
|
||||
packet2.video_payload = data2;
|
||||
frames = packet_buffer_.InsertPacket(&packet2).frames;
|
||||
auto packet2 = std::make_unique<PacketBuffer::Packet>();
|
||||
packet2->video_header.codec = kVideoCodecAV1;
|
||||
packet2->seq_num = 14;
|
||||
packet2->video_header.is_first_packet_in_frame = false;
|
||||
packet2->video_header.is_last_packet_in_frame = true;
|
||||
packet2->video_payload = data2;
|
||||
frames = packet_buffer_.InsertPacket(std::move(packet2)).frames;
|
||||
|
||||
EXPECT_THAT(frames, IsEmpty());
|
||||
}
|
||||
@ -526,12 +536,12 @@ class PacketBufferH264Test : public PacketBufferTest {
|
||||
rtc::ArrayView<const uint8_t> data = {},
|
||||
uint32_t width = 0, // width of frame (SPS/IDR)
|
||||
uint32_t height = 0) { // height of frame (SPS/IDR)
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
auto& h264_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet.seq_num = seq_num;
|
||||
packet.timestamp = timestamp;
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->seq_num = seq_num;
|
||||
packet->timestamp = timestamp;
|
||||
if (keyframe == kKeyFrame) {
|
||||
if (sps_pps_idr_is_keyframe_) {
|
||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||
@ -543,13 +553,14 @@ class PacketBufferH264Test : public PacketBufferTest {
|
||||
h264_header.nalus_length = 1;
|
||||
}
|
||||
}
|
||||
packet.video_header.width = width;
|
||||
packet.video_header.height = height;
|
||||
packet.video_header.is_first_packet_in_frame = first == kFirst;
|
||||
packet.video_header.is_last_packet_in_frame = last == kLast;
|
||||
packet.video_payload.SetData(data.data(), data.size());
|
||||
packet->video_header.width = width;
|
||||
packet->video_header.height = height;
|
||||
packet->video_header.is_first_packet_in_frame = first == kFirst;
|
||||
packet->video_header.is_last_packet_in_frame = last == kLast;
|
||||
packet->video_payload.SetData(data.data(), data.size());
|
||||
|
||||
return PacketBufferInsertResult(packet_buffer_.InsertPacket(&packet));
|
||||
return PacketBufferInsertResult(
|
||||
packet_buffer_.InsertPacket(std::move(packet)));
|
||||
}
|
||||
|
||||
PacketBufferInsertResult InsertH264KeyFrameWithAud(
|
||||
@ -561,12 +572,12 @@ class PacketBufferH264Test : public PacketBufferTest {
|
||||
rtc::ArrayView<const uint8_t> data = {},
|
||||
uint32_t width = 0, // width of frame (SPS/IDR)
|
||||
uint32_t height = 0) { // height of frame (SPS/IDR)
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
auto& h264_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet.seq_num = seq_num;
|
||||
packet.timestamp = timestamp;
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->seq_num = seq_num;
|
||||
packet->timestamp = timestamp;
|
||||
|
||||
// this should be the start of frame.
|
||||
RTC_CHECK(first == kFirst);
|
||||
@ -574,9 +585,9 @@ class PacketBufferH264Test : public PacketBufferTest {
|
||||
// Insert a AUD NALU / packet without width/height.
|
||||
h264_header.nalus[0].type = H264::NaluType::kAud;
|
||||
h264_header.nalus_length = 1;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = false;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = false;
|
||||
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||
// insert IDR
|
||||
return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp, data,
|
||||
width, height);
|
||||
@ -633,18 +644,18 @@ TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
|
||||
uint16_t seq_num = Rand();
|
||||
uint8_t data[] = "some plain old data";
|
||||
|
||||
PacketBuffer::Packet packet;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
auto& h264_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus_length = 1;
|
||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||
h264_header.packetization_type = kH264SingleNalu;
|
||||
packet.seq_num = seq_num;
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
packet.video_payload = data;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = true;
|
||||
auto frames = packet_buffer_.InsertPacket(&packet).frames;
|
||||
packet->seq_num = seq_num;
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
packet->video_payload = data;
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
auto frames = packet_buffer_.InsertPacket(std::move(packet)).frames;
|
||||
|
||||
ASSERT_THAT(frames, SizeIs(1));
|
||||
EXPECT_EQ(frames[0]->first_seq_num(), seq_num);
|
||||
@ -807,45 +818,51 @@ TEST_F(PacketBufferTest,
|
||||
}
|
||||
|
||||
TEST_F(PacketBufferTest, IncomingCodecChange) {
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = true;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
packet->video_header.codec = kVideoCodecVP8;
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet->timestamp = 1;
|
||||
packet->seq_num = 1;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, SizeIs(1));
|
||||
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet.timestamp = 1;
|
||||
packet.seq_num = 1;
|
||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(1));
|
||||
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
auto& h264_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus_length = 1;
|
||||
packet.timestamp = 3;
|
||||
packet.seq_num = 3;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
|
||||
packet->timestamp = 3;
|
||||
packet->seq_num = 3;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, IsEmpty());
|
||||
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet.timestamp = 2;
|
||||
packet.seq_num = 2;
|
||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(2));
|
||||
packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
packet->video_header.codec = kVideoCodecVP8;
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet->timestamp = 2;
|
||||
packet->seq_num = 2;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, SizeIs(2));
|
||||
}
|
||||
|
||||
TEST_F(PacketBufferTest, TooManyNalusInPacket) {
|
||||
PacketBuffer::Packet packet;
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
packet.timestamp = 1;
|
||||
packet.seq_num = 1;
|
||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = true;
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
packet->timestamp = 1;
|
||||
packet->seq_num = 1;
|
||||
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
auto& h264_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus_length = kMaxNalusPerPacket;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, IsEmpty());
|
||||
}
|
||||
|
||||
TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
|
||||
@ -902,15 +919,17 @@ class PacketBufferH264XIsKeyframeTest : public PacketBufferH264Test {
|
||||
const uint16_t kSeqNum = 5;
|
||||
|
||||
explicit PacketBufferH264XIsKeyframeTest(bool sps_pps_idr_is_keyframe)
|
||||
: PacketBufferH264Test(sps_pps_idr_is_keyframe) {
|
||||
packet_.video_header.codec = kVideoCodecH264;
|
||||
packet_.seq_num = kSeqNum;
|
||||
: PacketBufferH264Test(sps_pps_idr_is_keyframe) {}
|
||||
|
||||
packet_.video_header.is_first_packet_in_frame = true;
|
||||
packet_.video_header.is_last_packet_in_frame = true;
|
||||
std::unique_ptr<PacketBuffer::Packet> CreatePacket() {
|
||||
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||
packet->video_header.codec = kVideoCodecH264;
|
||||
packet->seq_num = kSeqNum;
|
||||
|
||||
packet->video_header.is_first_packet_in_frame = true;
|
||||
packet->video_header.is_last_packet_in_frame = true;
|
||||
return packet;
|
||||
}
|
||||
|
||||
PacketBuffer::Packet packet_;
|
||||
};
|
||||
|
||||
class PacketBufferH264IdrIsKeyframeTest
|
||||
@ -921,23 +940,25 @@ class PacketBufferH264IdrIsKeyframeTest
|
||||
};
|
||||
|
||||
TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
|
||||
auto packet = CreatePacket();
|
||||
auto& h264_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||
h264_header.nalus_length = 1;
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||
ElementsAre(KeyFrame()));
|
||||
}
|
||||
|
||||
TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
||||
auto packet = CreatePacket();
|
||||
auto& h264_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||
h264_header.nalus_length = 3;
|
||||
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||
ElementsAre(KeyFrame()));
|
||||
}
|
||||
|
||||
@ -949,35 +970,38 @@ class PacketBufferH264SpsPpsIdrIsKeyframeTest
|
||||
};
|
||||
|
||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
|
||||
auto packet = CreatePacket();
|
||||
auto& h264_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||
h264_header.nalus_length = 1;
|
||||
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||
ElementsAre(DeltaFrame()));
|
||||
}
|
||||
|
||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
|
||||
auto packet = CreatePacket();
|
||||
auto& h264_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||
h264_header.nalus_length = 2;
|
||||
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||
ElementsAre(DeltaFrame()));
|
||||
}
|
||||
|
||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
||||
auto packet = CreatePacket();
|
||||
auto& h264_header =
|
||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||
h264_header.nalus_length = 3;
|
||||
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
||||
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||
ElementsAre(KeyFrame()));
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,9 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/video_coding/frame_object.h"
|
||||
#include "modules/video_coding/packet_buffer.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
@ -26,20 +29,20 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
||||
test::FuzzDataHelper helper(rtc::ArrayView<const uint8_t>(data, size));
|
||||
|
||||
while (helper.BytesLeft()) {
|
||||
video_coding::PacketBuffer::Packet packet;
|
||||
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>();
|
||||
// Fuzz POD members of the packet.
|
||||
helper.CopyTo(&packet.marker_bit);
|
||||
helper.CopyTo(&packet.payload_type);
|
||||
helper.CopyTo(&packet.seq_num);
|
||||
helper.CopyTo(&packet.timestamp);
|
||||
helper.CopyTo(&packet.ntp_time_ms);
|
||||
helper.CopyTo(&packet.times_nacked);
|
||||
helper.CopyTo(&packet->marker_bit);
|
||||
helper.CopyTo(&packet->payload_type);
|
||||
helper.CopyTo(&packet->seq_num);
|
||||
helper.CopyTo(&packet->timestamp);
|
||||
helper.CopyTo(&packet->ntp_time_ms);
|
||||
helper.CopyTo(&packet->times_nacked);
|
||||
|
||||
// Fuzz non-POD member of the packet.
|
||||
packet.video_payload.SetSize(helper.ReadOrDefaultValue<uint8_t>(0));
|
||||
packet->video_payload.SetSize(helper.ReadOrDefaultValue<uint8_t>(0));
|
||||
// TODO(danilchap): Fuzz other non-POD members of the |packet|.
|
||||
|
||||
IgnoreResult(packet_buffer.InsertPacket(&packet));
|
||||
IgnoreResult(packet_buffer.InsertPacket(std::move(packet)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -331,23 +331,23 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
const RtpPacketReceived& rtp_packet,
|
||||
const RTPVideoHeader& video) {
|
||||
RTC_DCHECK_RUN_ON(&worker_task_checker_);
|
||||
video_coding::PacketBuffer::Packet packet(
|
||||
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
|
||||
rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
|
||||
clock_->TimeInMilliseconds());
|
||||
|
||||
// Try to extrapolate absolute capture time if it is missing.
|
||||
// TODO(bugs.webrtc.org/10739): Add support for estimated capture clock
|
||||
// offset.
|
||||
packet.packet_info.set_absolute_capture_time(
|
||||
packet->packet_info.set_absolute_capture_time(
|
||||
absolute_capture_time_receiver_.OnReceivePacket(
|
||||
AbsoluteCaptureTimeReceiver::GetSource(packet.packet_info.ssrc(),
|
||||
packet.packet_info.csrcs()),
|
||||
packet.packet_info.rtp_timestamp(),
|
||||
AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
|
||||
packet->packet_info.csrcs()),
|
||||
packet->packet_info.rtp_timestamp(),
|
||||
// Assume frequency is the same one for all video frames.
|
||||
kVideoPayloadTypeFrequency,
|
||||
packet.packet_info.absolute_capture_time()));
|
||||
packet->packet_info.absolute_capture_time()));
|
||||
|
||||
RTPVideoHeader& video_header = packet.video_header;
|
||||
RTPVideoHeader& video_header = packet->video_header;
|
||||
video_header.rotation = kVideoRotation_0;
|
||||
video_header.content_type = VideoContentType::UNSPECIFIED;
|
||||
video_header.video_timing.flags = VideoSendTiming::kInvalid;
|
||||
@ -368,7 +368,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
||||
|
||||
RtpGenericFrameDescriptor& generic_descriptor =
|
||||
packet.generic_descriptor.emplace();
|
||||
packet->generic_descriptor.emplace();
|
||||
if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
|
||||
&generic_descriptor)) {
|
||||
if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
|
||||
@ -382,36 +382,36 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
generic_descriptor.SetByteRepresentation(
|
||||
rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
|
||||
} else {
|
||||
packet.generic_descriptor = absl::nullopt;
|
||||
packet->generic_descriptor = absl::nullopt;
|
||||
}
|
||||
if (packet.generic_descriptor != absl::nullopt) {
|
||||
if (packet->generic_descriptor != absl::nullopt) {
|
||||
video_header.is_first_packet_in_frame =
|
||||
packet.generic_descriptor->FirstPacketInSubFrame();
|
||||
packet->generic_descriptor->FirstPacketInSubFrame();
|
||||
video_header.is_last_packet_in_frame =
|
||||
packet.generic_descriptor->LastPacketInSubFrame();
|
||||
packet->generic_descriptor->LastPacketInSubFrame();
|
||||
|
||||
if (packet.generic_descriptor->FirstPacketInSubFrame()) {
|
||||
if (packet->generic_descriptor->FirstPacketInSubFrame()) {
|
||||
video_header.frame_type =
|
||||
packet.generic_descriptor->FrameDependenciesDiffs().empty()
|
||||
packet->generic_descriptor->FrameDependenciesDiffs().empty()
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
|
||||
auto& descriptor = video_header.generic.emplace();
|
||||
int64_t frame_id =
|
||||
frame_id_unwrapper_.Unwrap(packet.generic_descriptor->FrameId());
|
||||
frame_id_unwrapper_.Unwrap(packet->generic_descriptor->FrameId());
|
||||
descriptor.frame_id = frame_id;
|
||||
descriptor.spatial_index = packet.generic_descriptor->SpatialLayer();
|
||||
descriptor.temporal_index = packet.generic_descriptor->TemporalLayer();
|
||||
descriptor.spatial_index = packet->generic_descriptor->SpatialLayer();
|
||||
descriptor.temporal_index = packet->generic_descriptor->TemporalLayer();
|
||||
descriptor.discardable =
|
||||
packet.generic_descriptor->Discardable().value_or(false);
|
||||
packet->generic_descriptor->Discardable().value_or(false);
|
||||
for (uint16_t fdiff :
|
||||
packet.generic_descriptor->FrameDependenciesDiffs()) {
|
||||
packet->generic_descriptor->FrameDependenciesDiffs()) {
|
||||
descriptor.dependencies.push_back(frame_id - fdiff);
|
||||
}
|
||||
}
|
||||
|
||||
video_header.width = packet.generic_descriptor->Width();
|
||||
video_header.height = packet.generic_descriptor->Height();
|
||||
video_header.width = packet->generic_descriptor->Width();
|
||||
video_header.height = packet->generic_descriptor->Height();
|
||||
}
|
||||
|
||||
// Color space should only be transmitted in the last packet of a frame,
|
||||
@ -435,7 +435,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
|
||||
RTC_LOG(LS_INFO)
|
||||
<< "LossNotificationController does not support reordering.";
|
||||
} else if (!packet.generic_descriptor) {
|
||||
} else if (!packet->generic_descriptor) {
|
||||
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
|
||||
"frame descriptor, but it is missing.";
|
||||
} else {
|
||||
@ -460,31 +460,31 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
video_header.is_first_packet_in_frame &&
|
||||
video_header.frame_type == VideoFrameType::kVideoFrameKey;
|
||||
|
||||
packet.times_nacked = nack_module_->OnReceivedPacket(
|
||||
packet->times_nacked = nack_module_->OnReceivedPacket(
|
||||
rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
|
||||
} else {
|
||||
packet.times_nacked = -1;
|
||||
packet->times_nacked = -1;
|
||||
}
|
||||
|
||||
if (codec_payload.size() == 0) {
|
||||
NotifyReceiverOfEmptyPacket(packet.seq_num);
|
||||
NotifyReceiverOfEmptyPacket(packet->seq_num);
|
||||
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
||||
return;
|
||||
}
|
||||
|
||||
if (packet.codec() == kVideoCodecH264) {
|
||||
if (packet->codec() == kVideoCodecH264) {
|
||||
// Only when we start to receive packets will we know what payload type
|
||||
// that will be used. When we know the payload type insert the correct
|
||||
// sps/pps into the tracker.
|
||||
if (packet.payload_type != last_payload_type_) {
|
||||
last_payload_type_ = packet.payload_type;
|
||||
InsertSpsPpsIntoTracker(packet.payload_type);
|
||||
if (packet->payload_type != last_payload_type_) {
|
||||
last_payload_type_ = packet->payload_type;
|
||||
InsertSpsPpsIntoTracker(packet->payload_type);
|
||||
}
|
||||
|
||||
video_coding::H264SpsPpsTracker::FixedBitstream fixed =
|
||||
tracker_.CopyAndFixBitstream(
|
||||
rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
|
||||
&packet.video_header);
|
||||
&packet->video_header);
|
||||
|
||||
switch (fixed.action) {
|
||||
case video_coding::H264SpsPpsTracker::kRequestKeyframe:
|
||||
@ -494,17 +494,17 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
case video_coding::H264SpsPpsTracker::kDrop:
|
||||
return;
|
||||
case video_coding::H264SpsPpsTracker::kInsert:
|
||||
packet.video_payload = std::move(fixed.bitstream);
|
||||
packet->video_payload = std::move(fixed.bitstream);
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
packet.video_payload = std::move(codec_payload);
|
||||
packet->video_payload = std::move(codec_payload);
|
||||
}
|
||||
|
||||
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
||||
frame_counter_.Add(packet.timestamp);
|
||||
OnInsertedPacket(packet_buffer_.InsertPacket(&packet));
|
||||
frame_counter_.Add(packet->timestamp);
|
||||
OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
|
||||
}
|
||||
|
||||
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
|
||||
|
Reference in New Issue
Block a user