Pass and store PacketBuffer::Packet by unique_ptr
to avoid expensive move of the Packet and prepare PacketBuffer to return list of packets as a frame. Bug: None Change-Id: I19f0452c52238228bbe28284ebb197491eb2bf4e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/167063 Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Commit-Queue: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#30404}
This commit is contained in:

committed by
Commit Bot

parent
d7fade5738
commit
97ffbefdab
@ -79,7 +79,7 @@ PacketBuffer::~PacketBuffer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
||||||
PacketBuffer::Packet* packet) {
|
std::unique_ptr<PacketBuffer::Packet> packet) {
|
||||||
PacketBuffer::InsertResult result;
|
PacketBuffer::InsertResult result;
|
||||||
rtc::CritScope lock(&crit_);
|
rtc::CritScope lock(&crit_);
|
||||||
|
|
||||||
@ -99,19 +99,19 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
|||||||
first_seq_num_ = seq_num;
|
first_seq_num_ = seq_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buffer_[index].used) {
|
if (buffer_[index].used()) {
|
||||||
// Duplicate packet, just delete the payload.
|
// Duplicate packet, just delete the payload.
|
||||||
if (buffer_[index].seq_num() == packet->seq_num) {
|
if (buffer_[index].seq_num() == packet->seq_num) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The packet buffer is full, try to expand the buffer.
|
// The packet buffer is full, try to expand the buffer.
|
||||||
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used) {
|
while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()].used()) {
|
||||||
}
|
}
|
||||||
index = seq_num % buffer_.size();
|
index = seq_num % buffer_.size();
|
||||||
|
|
||||||
// Packet buffer is still full since we were unable to expand the buffer.
|
// Packet buffer is still full since we were unable to expand the buffer.
|
||||||
if (buffer_[index].used) {
|
if (buffer_[index].used()) {
|
||||||
// Clear the buffer, delete payload, and return false to signal that a
|
// Clear the buffer, delete payload, and return false to signal that a
|
||||||
// new keyframe is needed.
|
// new keyframe is needed.
|
||||||
RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
|
RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
|
||||||
@ -131,8 +131,7 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket(
|
|||||||
|
|
||||||
StoredPacket& new_entry = buffer_[index];
|
StoredPacket& new_entry = buffer_[index];
|
||||||
new_entry.continuous = false;
|
new_entry.continuous = false;
|
||||||
new_entry.used = true;
|
new_entry.packet = std::move(packet);
|
||||||
new_entry.data = std::move(*packet);
|
|
||||||
|
|
||||||
UpdateMissingPackets(seq_num);
|
UpdateMissingPackets(seq_num);
|
||||||
|
|
||||||
@ -158,10 +157,9 @@ void PacketBuffer::ClearTo(uint16_t seq_num) {
|
|||||||
size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
|
size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
|
||||||
size_t iterations = std::min(diff, buffer_.size());
|
size_t iterations = std::min(diff, buffer_.size());
|
||||||
for (size_t i = 0; i < iterations; ++i) {
|
for (size_t i = 0; i < iterations; ++i) {
|
||||||
size_t index = first_seq_num_ % buffer_.size();
|
StoredPacket& stored = buffer_[first_seq_num_ % buffer_.size()];
|
||||||
if (AheadOf<uint16_t>(seq_num, buffer_[index].seq_num())) {
|
if (stored.used() && AheadOf<uint16_t>(seq_num, stored.seq_num())) {
|
||||||
buffer_[index].data.video_payload = {};
|
stored.packet = nullptr;
|
||||||
buffer_[index].used = false;
|
|
||||||
}
|
}
|
||||||
++first_seq_num_;
|
++first_seq_num_;
|
||||||
}
|
}
|
||||||
@ -186,8 +184,7 @@ void PacketBuffer::ClearInterval(uint16_t start_seq_num,
|
|||||||
for (size_t i = 0; i < iterations; ++i) {
|
for (size_t i = 0; i < iterations; ++i) {
|
||||||
size_t index = seq_num % buffer_.size();
|
size_t index = seq_num % buffer_.size();
|
||||||
RTC_DCHECK_EQ(buffer_[index].seq_num(), seq_num);
|
RTC_DCHECK_EQ(buffer_[index].seq_num(), seq_num);
|
||||||
buffer_[index].data.video_payload = {};
|
buffer_[index].packet = nullptr;
|
||||||
buffer_[index].used = false;
|
|
||||||
|
|
||||||
++seq_num;
|
++seq_num;
|
||||||
}
|
}
|
||||||
@ -196,8 +193,7 @@ void PacketBuffer::ClearInterval(uint16_t start_seq_num,
|
|||||||
void PacketBuffer::Clear() {
|
void PacketBuffer::Clear() {
|
||||||
rtc::CritScope lock(&crit_);
|
rtc::CritScope lock(&crit_);
|
||||||
for (StoredPacket& entry : buffer_) {
|
for (StoredPacket& entry : buffer_) {
|
||||||
entry.data.video_payload = {};
|
entry.packet = nullptr;
|
||||||
entry.used = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
first_packet_received_ = false;
|
first_packet_received_ = false;
|
||||||
@ -236,7 +232,7 @@ bool PacketBuffer::ExpandBufferSize() {
|
|||||||
size_t new_size = std::min(max_size_, 2 * buffer_.size());
|
size_t new_size = std::min(max_size_, 2 * buffer_.size());
|
||||||
std::vector<StoredPacket> new_buffer(new_size);
|
std::vector<StoredPacket> new_buffer(new_size);
|
||||||
for (StoredPacket& entry : buffer_) {
|
for (StoredPacket& entry : buffer_) {
|
||||||
if (entry.used) {
|
if (entry.used()) {
|
||||||
new_buffer[entry.seq_num() % new_size] = std::move(entry);
|
new_buffer[entry.seq_num() % new_size] = std::move(entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -251,17 +247,17 @@ bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
|
|||||||
const StoredPacket& entry = buffer_[index];
|
const StoredPacket& entry = buffer_[index];
|
||||||
const StoredPacket& prev_entry = buffer_[prev_index];
|
const StoredPacket& prev_entry = buffer_[prev_index];
|
||||||
|
|
||||||
if (!entry.used)
|
if (!entry.used())
|
||||||
return false;
|
return false;
|
||||||
if (entry.seq_num() != seq_num)
|
if (entry.seq_num() != seq_num)
|
||||||
return false;
|
return false;
|
||||||
if (entry.frame_begin())
|
if (entry.frame_begin())
|
||||||
return true;
|
return true;
|
||||||
if (!prev_entry.used)
|
if (!prev_entry.used())
|
||||||
return false;
|
return false;
|
||||||
if (prev_entry.seq_num() != static_cast<uint16_t>(entry.seq_num() - 1))
|
if (prev_entry.seq_num() != static_cast<uint16_t>(entry.seq_num() - 1))
|
||||||
return false;
|
return false;
|
||||||
if (prev_entry.data.timestamp != entry.data.timestamp)
|
if (prev_entry.packet->timestamp != entry.packet->timestamp)
|
||||||
return false;
|
return false;
|
||||||
if (prev_entry.continuous)
|
if (prev_entry.continuous)
|
||||||
return true;
|
return true;
|
||||||
@ -285,10 +281,10 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
// the |frame_begin| flag is set.
|
// the |frame_begin| flag is set.
|
||||||
int start_index = index;
|
int start_index = index;
|
||||||
size_t tested_packets = 0;
|
size_t tested_packets = 0;
|
||||||
int64_t frame_timestamp = buffer_[start_index].data.timestamp;
|
int64_t frame_timestamp = buffer_[start_index].packet->timestamp;
|
||||||
|
|
||||||
// Identify H.264 keyframes by means of SPS, PPS, and IDR.
|
// Identify H.264 keyframes by means of SPS, PPS, and IDR.
|
||||||
bool is_h264 = buffer_[start_index].data.codec() == kVideoCodecH264;
|
bool is_h264 = buffer_[start_index].packet->codec() == kVideoCodecH264;
|
||||||
bool has_h264_sps = false;
|
bool has_h264_sps = false;
|
||||||
bool has_h264_pps = false;
|
bool has_h264_pps = false;
|
||||||
bool has_h264_idr = false;
|
bool has_h264_idr = false;
|
||||||
@ -303,7 +299,7 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
|
|
||||||
if (is_h264) {
|
if (is_h264) {
|
||||||
const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
|
const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
|
||||||
&buffer_[start_index].data.video_header.video_type_header);
|
&buffer_[start_index].packet->video_header.video_type_header);
|
||||||
if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
|
if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
|
||||||
return found_frames;
|
return found_frames;
|
||||||
|
|
||||||
@ -324,10 +320,10 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
// smallest index and valid resolution; typically its IDR or SPS
|
// smallest index and valid resolution; typically its IDR or SPS
|
||||||
// packet; there may be packet preceeding this packet, IDR's
|
// packet; there may be packet preceeding this packet, IDR's
|
||||||
// resolution will be applied to them.
|
// resolution will be applied to them.
|
||||||
if (buffer_[start_index].data.width() > 0 &&
|
if (buffer_[start_index].packet->width() > 0 &&
|
||||||
buffer_[start_index].data.height() > 0) {
|
buffer_[start_index].packet->height() > 0) {
|
||||||
idr_width = buffer_[start_index].data.width();
|
idr_width = buffer_[start_index].packet->width();
|
||||||
idr_height = buffer_[start_index].data.height();
|
idr_height = buffer_[start_index].packet->height();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -344,8 +340,8 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
// the PacketBuffer to hand out incomplete frames.
|
// the PacketBuffer to hand out incomplete frames.
|
||||||
// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
|
// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
|
||||||
if (is_h264 &&
|
if (is_h264 &&
|
||||||
(!buffer_[start_index].used ||
|
(!buffer_[start_index].used() ||
|
||||||
buffer_[start_index].data.timestamp != frame_timestamp)) {
|
buffer_[start_index].packet->timestamp != frame_timestamp)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,23 +365,27 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
// determines if the RtpFrameObject is a key frame or delta frame.
|
// determines if the RtpFrameObject is a key frame or delta frame.
|
||||||
const size_t first_packet_index = start_seq_num % buffer_.size();
|
const size_t first_packet_index = start_seq_num % buffer_.size();
|
||||||
if (is_h264_keyframe) {
|
if (is_h264_keyframe) {
|
||||||
buffer_[first_packet_index].data.video_header.frame_type =
|
buffer_[first_packet_index].packet->video_header.frame_type =
|
||||||
VideoFrameType::kVideoFrameKey;
|
VideoFrameType::kVideoFrameKey;
|
||||||
if (idr_width > 0 && idr_height > 0) {
|
if (idr_width > 0 && idr_height > 0) {
|
||||||
// IDR frame was finalized and we have the correct resolution for
|
// IDR frame was finalized and we have the correct resolution for
|
||||||
// IDR; update first packet to have same resolution as IDR.
|
// IDR; update first packet to have same resolution as IDR.
|
||||||
buffer_[first_packet_index].data.video_header.width = idr_width;
|
buffer_[first_packet_index].packet->video_header.width = idr_width;
|
||||||
buffer_[first_packet_index].data.video_header.height = idr_height;
|
buffer_[first_packet_index].packet->video_header.height =
|
||||||
|
idr_height;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
buffer_[first_packet_index].data.video_header.frame_type =
|
buffer_[first_packet_index].packet->video_header.frame_type =
|
||||||
VideoFrameType::kVideoFrameDelta;
|
VideoFrameType::kVideoFrameDelta;
|
||||||
}
|
}
|
||||||
|
|
||||||
// With IPPP, if this is not a keyframe, make sure there are no gaps
|
// With IPPP, if this is not a keyframe, make sure there are no gaps
|
||||||
// in the packet sequence numbers up until this point.
|
// in the packet sequence numbers up until this point.
|
||||||
const uint8_t h264tid =
|
const uint8_t h264tid =
|
||||||
buffer_[start_index].data.video_header.frame_marking.temporal_id;
|
buffer_[start_index].used()
|
||||||
|
? buffer_[start_index]
|
||||||
|
.packet->video_header.frame_marking.temporal_id
|
||||||
|
: kNoTemporalIdx;
|
||||||
if (h264tid == kNoTemporalIdx && !is_h264_keyframe &&
|
if (h264tid == kNoTemporalIdx && !is_h264_keyframe &&
|
||||||
missing_packets_.upper_bound(start_seq_num) !=
|
missing_packets_.upper_bound(start_seq_num) !=
|
||||||
missing_packets_.begin()) {
|
missing_packets_.begin()) {
|
||||||
@ -480,9 +480,9 @@ std::unique_ptr<RtpFrameObject> PacketBuffer::AssembleFrame(
|
|||||||
|
|
||||||
const PacketBuffer::Packet& PacketBuffer::GetPacket(uint16_t seq_num) const {
|
const PacketBuffer::Packet& PacketBuffer::GetPacket(uint16_t seq_num) const {
|
||||||
const StoredPacket& entry = buffer_[seq_num % buffer_.size()];
|
const StoredPacket& entry = buffer_[seq_num % buffer_.size()];
|
||||||
RTC_DCHECK(entry.used);
|
RTC_DCHECK(entry.used());
|
||||||
RTC_DCHECK_EQ(seq_num, entry.seq_num());
|
RTC_DCHECK_EQ(seq_num, entry.seq_num());
|
||||||
return entry.data;
|
return *entry.packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
|
void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
|
||||||
|
@ -41,9 +41,9 @@ class PacketBuffer {
|
|||||||
int64_t ntp_time_ms,
|
int64_t ntp_time_ms,
|
||||||
int64_t receive_time_ms);
|
int64_t receive_time_ms);
|
||||||
Packet(const Packet&) = delete;
|
Packet(const Packet&) = delete;
|
||||||
Packet(Packet&&) = default;
|
Packet(Packet&&) = delete;
|
||||||
Packet& operator=(const Packet&) = delete;
|
Packet& operator=(const Packet&) = delete;
|
||||||
Packet& operator=(Packet&&) = default;
|
Packet& operator=(Packet&&) = delete;
|
||||||
~Packet() = default;
|
~Packet() = default;
|
||||||
|
|
||||||
VideoCodecType codec() const { return video_header.codec; }
|
VideoCodecType codec() const { return video_header.codec; }
|
||||||
@ -82,9 +82,8 @@ class PacketBuffer {
|
|||||||
PacketBuffer(Clock* clock, size_t start_buffer_size, size_t max_buffer_size);
|
PacketBuffer(Clock* clock, size_t start_buffer_size, size_t max_buffer_size);
|
||||||
~PacketBuffer();
|
~PacketBuffer();
|
||||||
|
|
||||||
// The PacketBuffer will always take ownership of the |packet.dataPtr| when
|
InsertResult InsertPacket(std::unique_ptr<Packet> packet)
|
||||||
// this function is called.
|
ABSL_MUST_USE_RESULT;
|
||||||
InsertResult InsertPacket(Packet* packet) ABSL_MUST_USE_RESULT;
|
|
||||||
InsertResult InsertPadding(uint16_t seq_num) ABSL_MUST_USE_RESULT;
|
InsertResult InsertPadding(uint16_t seq_num) ABSL_MUST_USE_RESULT;
|
||||||
void ClearTo(uint16_t seq_num);
|
void ClearTo(uint16_t seq_num);
|
||||||
void Clear();
|
void Clear();
|
||||||
@ -95,21 +94,21 @@ class PacketBuffer {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
struct StoredPacket {
|
struct StoredPacket {
|
||||||
uint16_t seq_num() const { return data.seq_num; }
|
uint16_t seq_num() const { return packet->seq_num; }
|
||||||
|
|
||||||
// If this is the first packet of the frame.
|
// If this is the first packet of the frame.
|
||||||
bool frame_begin() const { return data.is_first_packet_in_frame(); }
|
bool frame_begin() const { return packet->is_first_packet_in_frame(); }
|
||||||
|
|
||||||
// If this is the last packet of the frame.
|
// If this is the last packet of the frame.
|
||||||
bool frame_end() const { return data.is_last_packet_in_frame(); }
|
bool frame_end() const { return packet->is_last_packet_in_frame(); }
|
||||||
|
|
||||||
// If this slot is currently used.
|
// If this slot is currently used.
|
||||||
bool used = false;
|
bool used() const { return packet != nullptr; }
|
||||||
|
|
||||||
// If all its previous packets have been inserted into the packet buffer.
|
// If all its previous packets have been inserted into the packet buffer.
|
||||||
bool continuous = false;
|
bool continuous = false;
|
||||||
|
|
||||||
Packet data;
|
std::unique_ptr<Packet> packet;
|
||||||
};
|
};
|
||||||
|
|
||||||
Clock* const clock_;
|
Clock* const clock_;
|
||||||
|
@ -111,18 +111,19 @@ class PacketBufferTest : public ::testing::Test {
|
|||||||
IsLast last, // is last packet of frame
|
IsLast last, // is last packet of frame
|
||||||
rtc::ArrayView<const uint8_t> data = {},
|
rtc::ArrayView<const uint8_t> data = {},
|
||||||
uint32_t timestamp = 123u) { // rtp timestamp
|
uint32_t timestamp = 123u) { // rtp timestamp
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.codec = kVideoCodecGeneric;
|
packet->video_header.codec = kVideoCodecGeneric;
|
||||||
packet.timestamp = timestamp;
|
packet->timestamp = timestamp;
|
||||||
packet.seq_num = seq_num;
|
packet->seq_num = seq_num;
|
||||||
packet.video_header.frame_type = keyframe == kKeyFrame
|
packet->video_header.frame_type = keyframe == kKeyFrame
|
||||||
? VideoFrameType::kVideoFrameKey
|
? VideoFrameType::kVideoFrameKey
|
||||||
: VideoFrameType::kVideoFrameDelta;
|
: VideoFrameType::kVideoFrameDelta;
|
||||||
packet.video_header.is_first_packet_in_frame = first == kFirst;
|
packet->video_header.is_first_packet_in_frame = first == kFirst;
|
||||||
packet.video_header.is_last_packet_in_frame = last == kLast;
|
packet->video_header.is_last_packet_in_frame = last == kLast;
|
||||||
packet.video_payload.SetData(data.data(), data.size());
|
packet->video_payload.SetData(data.data(), data.size());
|
||||||
|
|
||||||
return PacketBufferInsertResult(packet_buffer_.InsertPacket(&packet));
|
return PacketBufferInsertResult(
|
||||||
|
packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
}
|
}
|
||||||
|
|
||||||
const test::ScopedFieldTrials scoped_field_trials_;
|
const test::ScopedFieldTrials scoped_field_trials_;
|
||||||
@ -181,29 +182,38 @@ TEST_F(PacketBufferTest, InsertOldPackets) {
|
|||||||
TEST_F(PacketBufferTest, NackCount) {
|
TEST_F(PacketBufferTest, NackCount) {
|
||||||
const uint16_t seq_num = Rand();
|
const uint16_t seq_num = Rand();
|
||||||
|
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.codec = kVideoCodecGeneric;
|
packet->video_header.codec = kVideoCodecGeneric;
|
||||||
packet.seq_num = seq_num;
|
packet->seq_num = seq_num;
|
||||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = false;
|
packet->video_header.is_last_packet_in_frame = false;
|
||||||
packet.times_nacked = 0;
|
packet->times_nacked = 0;
|
||||||
|
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
|
|
||||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
|
packet->seq_num = seq_num + 1;
|
||||||
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
|
packet->video_header.is_first_packet_in_frame = false;
|
||||||
|
packet->video_header.is_last_packet_in_frame = false;
|
||||||
|
packet->times_nacked = 1;
|
||||||
|
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
|
|
||||||
packet.seq_num++;
|
packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.is_first_packet_in_frame = false;
|
packet->seq_num = seq_num + 2;
|
||||||
packet.times_nacked = 1;
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
packet->video_header.is_first_packet_in_frame = false;
|
||||||
|
packet->video_header.is_last_packet_in_frame = false;
|
||||||
|
packet->times_nacked = 3;
|
||||||
|
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
|
|
||||||
packet.seq_num++;
|
packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.times_nacked = 3;
|
packet->seq_num = seq_num + 3;
|
||||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
|
packet->video_header.is_first_packet_in_frame = false;
|
||||||
packet.seq_num++;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = true;
|
packet->times_nacked = 1;
|
||||||
packet.times_nacked = 1;
|
auto frames = packet_buffer_.InsertPacket(std::move(packet)).frames;
|
||||||
auto frames = packet_buffer_.InsertPacket(&packet).frames;
|
|
||||||
|
|
||||||
ASSERT_THAT(frames, SizeIs(1));
|
ASSERT_THAT(frames, SizeIs(1));
|
||||||
EXPECT_EQ(frames.front()->times_nacked(), 3);
|
EXPECT_EQ(frames.front()->times_nacked(), 3);
|
||||||
@ -428,22 +438,22 @@ TEST_F(PacketBufferTest, GetBitstreamAv1) {
|
|||||||
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
||||||
const uint8_t data2[] = {0b10'01'0000, 'b', 'i', 't', 's', 0};
|
const uint8_t data2[] = {0b10'01'0000, 'b', 'i', 't', 's', 0};
|
||||||
|
|
||||||
PacketBuffer::Packet packet1;
|
auto packet1 = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet1.video_header.codec = kVideoCodecAV1;
|
packet1->video_header.codec = kVideoCodecAV1;
|
||||||
packet1.seq_num = 13;
|
packet1->seq_num = 13;
|
||||||
packet1.video_header.is_first_packet_in_frame = true;
|
packet1->video_header.is_first_packet_in_frame = true;
|
||||||
packet1.video_header.is_last_packet_in_frame = false;
|
packet1->video_header.is_last_packet_in_frame = false;
|
||||||
packet1.video_payload = data1;
|
packet1->video_payload = data1;
|
||||||
auto frames = packet_buffer_.InsertPacket(&packet1).frames;
|
auto frames = packet_buffer_.InsertPacket(std::move(packet1)).frames;
|
||||||
EXPECT_THAT(frames, IsEmpty());
|
EXPECT_THAT(frames, IsEmpty());
|
||||||
|
|
||||||
PacketBuffer::Packet packet2;
|
auto packet2 = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet2.video_header.codec = kVideoCodecAV1;
|
packet2->video_header.codec = kVideoCodecAV1;
|
||||||
packet2.seq_num = 14;
|
packet2->seq_num = 14;
|
||||||
packet2.video_header.is_first_packet_in_frame = false;
|
packet2->video_header.is_first_packet_in_frame = false;
|
||||||
packet2.video_header.is_last_packet_in_frame = true;
|
packet2->video_header.is_last_packet_in_frame = true;
|
||||||
packet2.video_payload = data2;
|
packet2->video_payload = data2;
|
||||||
frames = packet_buffer_.InsertPacket(&packet2).frames;
|
frames = packet_buffer_.InsertPacket(std::move(packet2)).frames;
|
||||||
|
|
||||||
ASSERT_THAT(frames, SizeIs(1));
|
ASSERT_THAT(frames, SizeIs(1));
|
||||||
EXPECT_EQ(frames[0]->first_seq_num(), 13);
|
EXPECT_EQ(frames[0]->first_seq_num(), 13);
|
||||||
@ -458,22 +468,22 @@ TEST_F(PacketBufferTest, GetBitstreamInvalidAv1) {
|
|||||||
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
const uint8_t data1[] = {0b01'01'0000, 0b0'0100'000, 'm', 'a', 'n', 'y', ' '};
|
||||||
const uint8_t data2[] = {0b00'01'0000, 'b', 'i', 't', 's', 0};
|
const uint8_t data2[] = {0b00'01'0000, 'b', 'i', 't', 's', 0};
|
||||||
|
|
||||||
PacketBuffer::Packet packet1;
|
auto packet1 = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet1.video_header.codec = kVideoCodecAV1;
|
packet1->video_header.codec = kVideoCodecAV1;
|
||||||
packet1.seq_num = 13;
|
packet1->seq_num = 13;
|
||||||
packet1.video_header.is_first_packet_in_frame = true;
|
packet1->video_header.is_first_packet_in_frame = true;
|
||||||
packet1.video_header.is_last_packet_in_frame = false;
|
packet1->video_header.is_last_packet_in_frame = false;
|
||||||
packet1.video_payload = data1;
|
packet1->video_payload = data1;
|
||||||
auto frames = packet_buffer_.InsertPacket(&packet1).frames;
|
auto frames = packet_buffer_.InsertPacket(std::move(packet1)).frames;
|
||||||
EXPECT_THAT(frames, IsEmpty());
|
EXPECT_THAT(frames, IsEmpty());
|
||||||
|
|
||||||
PacketBuffer::Packet packet2;
|
auto packet2 = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet2.video_header.codec = kVideoCodecAV1;
|
packet2->video_header.codec = kVideoCodecAV1;
|
||||||
packet2.seq_num = 14;
|
packet2->seq_num = 14;
|
||||||
packet2.video_header.is_first_packet_in_frame = false;
|
packet2->video_header.is_first_packet_in_frame = false;
|
||||||
packet2.video_header.is_last_packet_in_frame = true;
|
packet2->video_header.is_last_packet_in_frame = true;
|
||||||
packet2.video_payload = data2;
|
packet2->video_payload = data2;
|
||||||
frames = packet_buffer_.InsertPacket(&packet2).frames;
|
frames = packet_buffer_.InsertPacket(std::move(packet2)).frames;
|
||||||
|
|
||||||
EXPECT_THAT(frames, IsEmpty());
|
EXPECT_THAT(frames, IsEmpty());
|
||||||
}
|
}
|
||||||
@ -526,12 +536,12 @@ class PacketBufferH264Test : public PacketBufferTest {
|
|||||||
rtc::ArrayView<const uint8_t> data = {},
|
rtc::ArrayView<const uint8_t> data = {},
|
||||||
uint32_t width = 0, // width of frame (SPS/IDR)
|
uint32_t width = 0, // width of frame (SPS/IDR)
|
||||||
uint32_t height = 0) { // height of frame (SPS/IDR)
|
uint32_t height = 0) { // height of frame (SPS/IDR)
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.codec = kVideoCodecH264;
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet.seq_num = seq_num;
|
packet->seq_num = seq_num;
|
||||||
packet.timestamp = timestamp;
|
packet->timestamp = timestamp;
|
||||||
if (keyframe == kKeyFrame) {
|
if (keyframe == kKeyFrame) {
|
||||||
if (sps_pps_idr_is_keyframe_) {
|
if (sps_pps_idr_is_keyframe_) {
|
||||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
@ -543,13 +553,14 @@ class PacketBufferH264Test : public PacketBufferTest {
|
|||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
packet.video_header.width = width;
|
packet->video_header.width = width;
|
||||||
packet.video_header.height = height;
|
packet->video_header.height = height;
|
||||||
packet.video_header.is_first_packet_in_frame = first == kFirst;
|
packet->video_header.is_first_packet_in_frame = first == kFirst;
|
||||||
packet.video_header.is_last_packet_in_frame = last == kLast;
|
packet->video_header.is_last_packet_in_frame = last == kLast;
|
||||||
packet.video_payload.SetData(data.data(), data.size());
|
packet->video_payload.SetData(data.data(), data.size());
|
||||||
|
|
||||||
return PacketBufferInsertResult(packet_buffer_.InsertPacket(&packet));
|
return PacketBufferInsertResult(
|
||||||
|
packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
}
|
}
|
||||||
|
|
||||||
PacketBufferInsertResult InsertH264KeyFrameWithAud(
|
PacketBufferInsertResult InsertH264KeyFrameWithAud(
|
||||||
@ -561,12 +572,12 @@ class PacketBufferH264Test : public PacketBufferTest {
|
|||||||
rtc::ArrayView<const uint8_t> data = {},
|
rtc::ArrayView<const uint8_t> data = {},
|
||||||
uint32_t width = 0, // width of frame (SPS/IDR)
|
uint32_t width = 0, // width of frame (SPS/IDR)
|
||||||
uint32_t height = 0) { // height of frame (SPS/IDR)
|
uint32_t height = 0) { // height of frame (SPS/IDR)
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.codec = kVideoCodecH264;
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet.seq_num = seq_num;
|
packet->seq_num = seq_num;
|
||||||
packet.timestamp = timestamp;
|
packet->timestamp = timestamp;
|
||||||
|
|
||||||
// this should be the start of frame.
|
// this should be the start of frame.
|
||||||
RTC_CHECK(first == kFirst);
|
RTC_CHECK(first == kFirst);
|
||||||
@ -574,9 +585,9 @@ class PacketBufferH264Test : public PacketBufferTest {
|
|||||||
// Insert a AUD NALU / packet without width/height.
|
// Insert a AUD NALU / packet without width/height.
|
||||||
h264_header.nalus[0].type = H264::NaluType::kAud;
|
h264_header.nalus[0].type = H264::NaluType::kAud;
|
||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = false;
|
packet->video_header.is_last_packet_in_frame = false;
|
||||||
IgnoreResult(packet_buffer_.InsertPacket(&packet));
|
IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
// insert IDR
|
// insert IDR
|
||||||
return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp, data,
|
return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp, data,
|
||||||
width, height);
|
width, height);
|
||||||
@ -633,18 +644,18 @@ TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
|
|||||||
uint16_t seq_num = Rand();
|
uint16_t seq_num = Rand();
|
||||||
uint8_t data[] = "some plain old data";
|
uint8_t data[] = "some plain old data";
|
||||||
|
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
h264_header.packetization_type = kH264SingleNalu;
|
h264_header.packetization_type = kH264SingleNalu;
|
||||||
packet.seq_num = seq_num;
|
packet->seq_num = seq_num;
|
||||||
packet.video_header.codec = kVideoCodecH264;
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
packet.video_payload = data;
|
packet->video_payload = data;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = true;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
auto frames = packet_buffer_.InsertPacket(&packet).frames;
|
auto frames = packet_buffer_.InsertPacket(std::move(packet)).frames;
|
||||||
|
|
||||||
ASSERT_THAT(frames, SizeIs(1));
|
ASSERT_THAT(frames, SizeIs(1));
|
||||||
EXPECT_EQ(frames[0]->first_seq_num(), seq_num);
|
EXPECT_EQ(frames[0]->first_seq_num(), seq_num);
|
||||||
@ -807,45 +818,51 @@ TEST_F(PacketBufferTest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PacketBufferTest, IncomingCodecChange) {
|
TEST_F(PacketBufferTest, IncomingCodecChange) {
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = true;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
|
packet->video_header.codec = kVideoCodecVP8;
|
||||||
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||||
|
packet->timestamp = 1;
|
||||||
|
packet->seq_num = 1;
|
||||||
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, SizeIs(1));
|
||||||
|
|
||||||
packet.video_header.codec = kVideoCodecVP8;
|
packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.timestamp = 1;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
packet.seq_num = 1;
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(1));
|
|
||||||
|
|
||||||
packet.video_header.codec = kVideoCodecH264;
|
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
packet.timestamp = 3;
|
packet->timestamp = 3;
|
||||||
packet.seq_num = 3;
|
packet->seq_num = 3;
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, IsEmpty());
|
||||||
|
|
||||||
packet.video_header.codec = kVideoCodecVP8;
|
packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.timestamp = 2;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
packet.seq_num = 2;
|
packet->video_header.codec = kVideoCodecVP8;
|
||||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, SizeIs(2));
|
packet->timestamp = 2;
|
||||||
|
packet->seq_num = 2;
|
||||||
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, SizeIs(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PacketBufferTest, TooManyNalusInPacket) {
|
TEST_F(PacketBufferTest, TooManyNalusInPacket) {
|
||||||
PacketBuffer::Packet packet;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
packet.video_header.codec = kVideoCodecH264;
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
packet.timestamp = 1;
|
packet->timestamp = 1;
|
||||||
packet.seq_num = 1;
|
packet->seq_num = 1;
|
||||||
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
packet.video_header.is_last_packet_in_frame = true;
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus_length = kMaxNalusPerPacket;
|
h264_header.nalus_length = kMaxNalusPerPacket;
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet).frames, IsEmpty());
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames, IsEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
|
TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
|
||||||
@ -902,15 +919,17 @@ class PacketBufferH264XIsKeyframeTest : public PacketBufferH264Test {
|
|||||||
const uint16_t kSeqNum = 5;
|
const uint16_t kSeqNum = 5;
|
||||||
|
|
||||||
explicit PacketBufferH264XIsKeyframeTest(bool sps_pps_idr_is_keyframe)
|
explicit PacketBufferH264XIsKeyframeTest(bool sps_pps_idr_is_keyframe)
|
||||||
: PacketBufferH264Test(sps_pps_idr_is_keyframe) {
|
: PacketBufferH264Test(sps_pps_idr_is_keyframe) {}
|
||||||
packet_.video_header.codec = kVideoCodecH264;
|
|
||||||
packet_.seq_num = kSeqNum;
|
|
||||||
|
|
||||||
packet_.video_header.is_first_packet_in_frame = true;
|
std::unique_ptr<PacketBuffer::Packet> CreatePacket() {
|
||||||
packet_.video_header.is_last_packet_in_frame = true;
|
auto packet = std::make_unique<PacketBuffer::Packet>();
|
||||||
|
packet->video_header.codec = kVideoCodecH264;
|
||||||
|
packet->seq_num = kSeqNum;
|
||||||
|
|
||||||
|
packet->video_header.is_first_packet_in_frame = true;
|
||||||
|
packet->video_header.is_last_packet_in_frame = true;
|
||||||
|
return packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
PacketBuffer::Packet packet_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class PacketBufferH264IdrIsKeyframeTest
|
class PacketBufferH264IdrIsKeyframeTest
|
||||||
@ -921,23 +940,25 @@ class PacketBufferH264IdrIsKeyframeTest
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
|
TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
|
||||||
|
auto packet = CreatePacket();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||||
ElementsAre(KeyFrame()));
|
ElementsAre(KeyFrame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
||||||
|
auto packet = CreatePacket();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||||
h264_header.nalus_length = 3;
|
h264_header.nalus_length = 3;
|
||||||
|
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||||
ElementsAre(KeyFrame()));
|
ElementsAre(KeyFrame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -949,35 +970,38 @@ class PacketBufferH264SpsPpsIdrIsKeyframeTest
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
|
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
|
||||||
|
auto packet = CreatePacket();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
h264_header.nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
|
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||||
ElementsAre(DeltaFrame()));
|
ElementsAre(DeltaFrame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
|
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
|
||||||
|
auto packet = CreatePacket();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
h264_header.nalus_length = 2;
|
h264_header.nalus_length = 2;
|
||||||
|
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||||
ElementsAre(DeltaFrame()));
|
ElementsAre(DeltaFrame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
|
||||||
|
auto packet = CreatePacket();
|
||||||
auto& h264_header =
|
auto& h264_header =
|
||||||
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
h264_header.nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
h264_header.nalus[1].type = H264::NaluType::kPps;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||||
h264_header.nalus_length = 3;
|
h264_header.nalus_length = 3;
|
||||||
|
|
||||||
EXPECT_THAT(packet_buffer_.InsertPacket(&packet_).frames,
|
EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).frames,
|
||||||
ElementsAre(KeyFrame()));
|
ElementsAre(KeyFrame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,9 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "modules/video_coding/frame_object.h"
|
#include "modules/video_coding/frame_object.h"
|
||||||
#include "modules/video_coding/packet_buffer.h"
|
#include "modules/video_coding/packet_buffer.h"
|
||||||
#include "system_wrappers/include/clock.h"
|
#include "system_wrappers/include/clock.h"
|
||||||
@ -26,20 +29,20 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
|||||||
test::FuzzDataHelper helper(rtc::ArrayView<const uint8_t>(data, size));
|
test::FuzzDataHelper helper(rtc::ArrayView<const uint8_t>(data, size));
|
||||||
|
|
||||||
while (helper.BytesLeft()) {
|
while (helper.BytesLeft()) {
|
||||||
video_coding::PacketBuffer::Packet packet;
|
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>();
|
||||||
// Fuzz POD members of the packet.
|
// Fuzz POD members of the packet.
|
||||||
helper.CopyTo(&packet.marker_bit);
|
helper.CopyTo(&packet->marker_bit);
|
||||||
helper.CopyTo(&packet.payload_type);
|
helper.CopyTo(&packet->payload_type);
|
||||||
helper.CopyTo(&packet.seq_num);
|
helper.CopyTo(&packet->seq_num);
|
||||||
helper.CopyTo(&packet.timestamp);
|
helper.CopyTo(&packet->timestamp);
|
||||||
helper.CopyTo(&packet.ntp_time_ms);
|
helper.CopyTo(&packet->ntp_time_ms);
|
||||||
helper.CopyTo(&packet.times_nacked);
|
helper.CopyTo(&packet->times_nacked);
|
||||||
|
|
||||||
// Fuzz non-POD member of the packet.
|
// Fuzz non-POD member of the packet.
|
||||||
packet.video_payload.SetSize(helper.ReadOrDefaultValue<uint8_t>(0));
|
packet->video_payload.SetSize(helper.ReadOrDefaultValue<uint8_t>(0));
|
||||||
// TODO(danilchap): Fuzz other non-POD members of the |packet|.
|
// TODO(danilchap): Fuzz other non-POD members of the |packet|.
|
||||||
|
|
||||||
IgnoreResult(packet_buffer.InsertPacket(&packet));
|
IgnoreResult(packet_buffer.InsertPacket(std::move(packet)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,23 +331,23 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
const RtpPacketReceived& rtp_packet,
|
const RtpPacketReceived& rtp_packet,
|
||||||
const RTPVideoHeader& video) {
|
const RTPVideoHeader& video) {
|
||||||
RTC_DCHECK_RUN_ON(&worker_task_checker_);
|
RTC_DCHECK_RUN_ON(&worker_task_checker_);
|
||||||
video_coding::PacketBuffer::Packet packet(
|
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
|
||||||
rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
|
rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
|
||||||
clock_->TimeInMilliseconds());
|
clock_->TimeInMilliseconds());
|
||||||
|
|
||||||
// Try to extrapolate absolute capture time if it is missing.
|
// Try to extrapolate absolute capture time if it is missing.
|
||||||
// TODO(bugs.webrtc.org/10739): Add support for estimated capture clock
|
// TODO(bugs.webrtc.org/10739): Add support for estimated capture clock
|
||||||
// offset.
|
// offset.
|
||||||
packet.packet_info.set_absolute_capture_time(
|
packet->packet_info.set_absolute_capture_time(
|
||||||
absolute_capture_time_receiver_.OnReceivePacket(
|
absolute_capture_time_receiver_.OnReceivePacket(
|
||||||
AbsoluteCaptureTimeReceiver::GetSource(packet.packet_info.ssrc(),
|
AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
|
||||||
packet.packet_info.csrcs()),
|
packet->packet_info.csrcs()),
|
||||||
packet.packet_info.rtp_timestamp(),
|
packet->packet_info.rtp_timestamp(),
|
||||||
// Assume frequency is the same one for all video frames.
|
// Assume frequency is the same one for all video frames.
|
||||||
kVideoPayloadTypeFrequency,
|
kVideoPayloadTypeFrequency,
|
||||||
packet.packet_info.absolute_capture_time()));
|
packet->packet_info.absolute_capture_time()));
|
||||||
|
|
||||||
RTPVideoHeader& video_header = packet.video_header;
|
RTPVideoHeader& video_header = packet->video_header;
|
||||||
video_header.rotation = kVideoRotation_0;
|
video_header.rotation = kVideoRotation_0;
|
||||||
video_header.content_type = VideoContentType::UNSPECIFIED;
|
video_header.content_type = VideoContentType::UNSPECIFIED;
|
||||||
video_header.video_timing.flags = VideoSendTiming::kInvalid;
|
video_header.video_timing.flags = VideoSendTiming::kInvalid;
|
||||||
@ -368,7 +368,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
||||||
|
|
||||||
RtpGenericFrameDescriptor& generic_descriptor =
|
RtpGenericFrameDescriptor& generic_descriptor =
|
||||||
packet.generic_descriptor.emplace();
|
packet->generic_descriptor.emplace();
|
||||||
if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
|
if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
|
||||||
&generic_descriptor)) {
|
&generic_descriptor)) {
|
||||||
if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
|
if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
|
||||||
@ -382,36 +382,36 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
generic_descriptor.SetByteRepresentation(
|
generic_descriptor.SetByteRepresentation(
|
||||||
rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
|
rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
|
||||||
} else {
|
} else {
|
||||||
packet.generic_descriptor = absl::nullopt;
|
packet->generic_descriptor = absl::nullopt;
|
||||||
}
|
}
|
||||||
if (packet.generic_descriptor != absl::nullopt) {
|
if (packet->generic_descriptor != absl::nullopt) {
|
||||||
video_header.is_first_packet_in_frame =
|
video_header.is_first_packet_in_frame =
|
||||||
packet.generic_descriptor->FirstPacketInSubFrame();
|
packet->generic_descriptor->FirstPacketInSubFrame();
|
||||||
video_header.is_last_packet_in_frame =
|
video_header.is_last_packet_in_frame =
|
||||||
packet.generic_descriptor->LastPacketInSubFrame();
|
packet->generic_descriptor->LastPacketInSubFrame();
|
||||||
|
|
||||||
if (packet.generic_descriptor->FirstPacketInSubFrame()) {
|
if (packet->generic_descriptor->FirstPacketInSubFrame()) {
|
||||||
video_header.frame_type =
|
video_header.frame_type =
|
||||||
packet.generic_descriptor->FrameDependenciesDiffs().empty()
|
packet->generic_descriptor->FrameDependenciesDiffs().empty()
|
||||||
? VideoFrameType::kVideoFrameKey
|
? VideoFrameType::kVideoFrameKey
|
||||||
: VideoFrameType::kVideoFrameDelta;
|
: VideoFrameType::kVideoFrameDelta;
|
||||||
|
|
||||||
auto& descriptor = video_header.generic.emplace();
|
auto& descriptor = video_header.generic.emplace();
|
||||||
int64_t frame_id =
|
int64_t frame_id =
|
||||||
frame_id_unwrapper_.Unwrap(packet.generic_descriptor->FrameId());
|
frame_id_unwrapper_.Unwrap(packet->generic_descriptor->FrameId());
|
||||||
descriptor.frame_id = frame_id;
|
descriptor.frame_id = frame_id;
|
||||||
descriptor.spatial_index = packet.generic_descriptor->SpatialLayer();
|
descriptor.spatial_index = packet->generic_descriptor->SpatialLayer();
|
||||||
descriptor.temporal_index = packet.generic_descriptor->TemporalLayer();
|
descriptor.temporal_index = packet->generic_descriptor->TemporalLayer();
|
||||||
descriptor.discardable =
|
descriptor.discardable =
|
||||||
packet.generic_descriptor->Discardable().value_or(false);
|
packet->generic_descriptor->Discardable().value_or(false);
|
||||||
for (uint16_t fdiff :
|
for (uint16_t fdiff :
|
||||||
packet.generic_descriptor->FrameDependenciesDiffs()) {
|
packet->generic_descriptor->FrameDependenciesDiffs()) {
|
||||||
descriptor.dependencies.push_back(frame_id - fdiff);
|
descriptor.dependencies.push_back(frame_id - fdiff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
video_header.width = packet.generic_descriptor->Width();
|
video_header.width = packet->generic_descriptor->Width();
|
||||||
video_header.height = packet.generic_descriptor->Height();
|
video_header.height = packet->generic_descriptor->Height();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Color space should only be transmitted in the last packet of a frame,
|
// Color space should only be transmitted in the last packet of a frame,
|
||||||
@ -435,7 +435,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
|
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
|
||||||
RTC_LOG(LS_INFO)
|
RTC_LOG(LS_INFO)
|
||||||
<< "LossNotificationController does not support reordering.";
|
<< "LossNotificationController does not support reordering.";
|
||||||
} else if (!packet.generic_descriptor) {
|
} else if (!packet->generic_descriptor) {
|
||||||
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
|
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
|
||||||
"frame descriptor, but it is missing.";
|
"frame descriptor, but it is missing.";
|
||||||
} else {
|
} else {
|
||||||
@ -460,31 +460,31 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
video_header.is_first_packet_in_frame &&
|
video_header.is_first_packet_in_frame &&
|
||||||
video_header.frame_type == VideoFrameType::kVideoFrameKey;
|
video_header.frame_type == VideoFrameType::kVideoFrameKey;
|
||||||
|
|
||||||
packet.times_nacked = nack_module_->OnReceivedPacket(
|
packet->times_nacked = nack_module_->OnReceivedPacket(
|
||||||
rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
|
rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
|
||||||
} else {
|
} else {
|
||||||
packet.times_nacked = -1;
|
packet->times_nacked = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (codec_payload.size() == 0) {
|
if (codec_payload.size() == 0) {
|
||||||
NotifyReceiverOfEmptyPacket(packet.seq_num);
|
NotifyReceiverOfEmptyPacket(packet->seq_num);
|
||||||
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (packet.codec() == kVideoCodecH264) {
|
if (packet->codec() == kVideoCodecH264) {
|
||||||
// Only when we start to receive packets will we know what payload type
|
// Only when we start to receive packets will we know what payload type
|
||||||
// that will be used. When we know the payload type insert the correct
|
// that will be used. When we know the payload type insert the correct
|
||||||
// sps/pps into the tracker.
|
// sps/pps into the tracker.
|
||||||
if (packet.payload_type != last_payload_type_) {
|
if (packet->payload_type != last_payload_type_) {
|
||||||
last_payload_type_ = packet.payload_type;
|
last_payload_type_ = packet->payload_type;
|
||||||
InsertSpsPpsIntoTracker(packet.payload_type);
|
InsertSpsPpsIntoTracker(packet->payload_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
video_coding::H264SpsPpsTracker::FixedBitstream fixed =
|
video_coding::H264SpsPpsTracker::FixedBitstream fixed =
|
||||||
tracker_.CopyAndFixBitstream(
|
tracker_.CopyAndFixBitstream(
|
||||||
rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
|
rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
|
||||||
&packet.video_header);
|
&packet->video_header);
|
||||||
|
|
||||||
switch (fixed.action) {
|
switch (fixed.action) {
|
||||||
case video_coding::H264SpsPpsTracker::kRequestKeyframe:
|
case video_coding::H264SpsPpsTracker::kRequestKeyframe:
|
||||||
@ -494,17 +494,17 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
case video_coding::H264SpsPpsTracker::kDrop:
|
case video_coding::H264SpsPpsTracker::kDrop:
|
||||||
return;
|
return;
|
||||||
case video_coding::H264SpsPpsTracker::kInsert:
|
case video_coding::H264SpsPpsTracker::kInsert:
|
||||||
packet.video_payload = std::move(fixed.bitstream);
|
packet->video_payload = std::move(fixed.bitstream);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
packet.video_payload = std::move(codec_payload);
|
packet->video_payload = std::move(codec_payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
|
||||||
frame_counter_.Add(packet.timestamp);
|
frame_counter_.Add(packet->timestamp);
|
||||||
OnInsertedPacket(packet_buffer_.InsertPacket(&packet));
|
OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
|
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
|
||||||
|
Reference in New Issue
Block a user