Don't store RtpPacketInfo in the PacketBuffer.

Historically the PacketBuffer used a callback for assembled frames, and because of that RtpPacketInfos were piped through it even though they didn't have anything to do with the PacketBuffer.

With this CL RtpPacketInfos are stored in the RtpVideoStreamReceiver(2) instead.

Bug: webrtc:12579
Change-Id: Ia6285b59e135910eee7234b89b23425bb0fc0d2b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/215320
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33980}
This commit is contained in:
philipel
2021-05-11 11:30:52 +02:00
committed by WebRTC LUCI CQ
parent ae1f8f59ac
commit 9599b3c582
6 changed files with 158 additions and 101 deletions

View File

@ -35,20 +35,13 @@ namespace webrtc {
namespace video_coding {
PacketBuffer::Packet::Packet(const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video_header,
Timestamp receive_time)
const RTPVideoHeader& video_header)
: marker_bit(rtp_packet.Marker()),
payload_type(rtp_packet.PayloadType()),
seq_num(rtp_packet.SequenceNumber()),
timestamp(rtp_packet.Timestamp()),
times_nacked(-1),
video_header(video_header),
packet_info(rtp_packet.Ssrc(),
rtp_packet.Csrcs(),
rtp_packet.Timestamp(),
/*audio_level=*/absl::nullopt,
rtp_packet.GetExtension<AbsoluteCaptureTimeExtension>(),
receive_time) {}
video_header(video_header) {}
PacketBuffer::PacketBuffer(size_t start_buffer_size, size_t max_buffer_size)
: max_size_(max_buffer_size),

View File

@ -34,8 +34,7 @@ class PacketBuffer {
struct Packet {
Packet() = default;
Packet(const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video_header,
Timestamp receive_time);
const RTPVideoHeader& video_header);
Packet(const Packet&) = delete;
Packet(Packet&&) = delete;
Packet& operator=(const Packet&) = delete;
@ -64,8 +63,6 @@ class PacketBuffer {
rtc::CopyOnWriteBuffer video_payload;
RTPVideoHeader video_header;
RtpPacketInfo packet_info;
};
struct InsertResult {
std::vector<std::unique_ptr<Packet>> packets;

View File

@ -506,18 +506,9 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video) {
RTC_DCHECK_RUN_ON(&worker_task_checker_);
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
rtp_packet, video, clock_->CurrentTime());
// Try to extrapolate absolute capture time if it is missing.
packet->packet_info.set_absolute_capture_time(
absolute_capture_time_receiver_.OnReceivePacket(
AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
packet->packet_info.csrcs()),
packet->packet_info.rtp_timestamp(),
// Assume frequency is the same one for all video frames.
kVideoPayloadTypeFrequency,
packet->packet_info.absolute_capture_time()));
auto packet =
std::make_unique<video_coding::PacketBuffer::Packet>(rtp_packet, video);
RTPVideoHeader& video_header = packet->video_header;
video_header.rotation = kVideoRotation_0;
@ -648,6 +639,29 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
video_coding::PacketBuffer::InsertResult insert_result;
{
MutexLock lock(&packet_buffer_lock_);
int64_t unwrapped_rtp_seq_num =
rtp_seq_num_unwrapper_.Unwrap(rtp_packet.SequenceNumber());
auto& packet_info =
packet_infos_
.emplace(
unwrapped_rtp_seq_num,
RtpPacketInfo(
rtp_packet.Ssrc(), rtp_packet.Csrcs(),
rtp_packet.Timestamp(),
/*audio_level=*/absl::nullopt,
rtp_packet.GetExtension<AbsoluteCaptureTimeExtension>(),
/*receive_time_ms=*/clock_->TimeInMilliseconds()))
.first->second;
// Try to extrapolate absolute capture time if it is missing.
packet_info.set_absolute_capture_time(
absolute_capture_time_receiver_.OnReceivePacket(
AbsoluteCaptureTimeReceiver::GetSource(packet_info.ssrc(),
packet_info.csrcs()),
packet_info.rtp_timestamp(),
// Assume frequency is the same one for all video frames.
kVideoPayloadTypeFrequency, packet_info.absolute_capture_time()));
insert_result = packet_buffer_.InsertPacket(std::move(packet));
}
OnInsertedPacket(std::move(insert_result));
@ -737,6 +751,9 @@ bool RtpVideoStreamReceiver::IsDecryptable() const {
void RtpVideoStreamReceiver::OnInsertedPacket(
video_coding::PacketBuffer::InsertResult result) {
std::vector<std::unique_ptr<RtpFrameObject>> assembled_frames;
{
MutexLock lock(&packet_buffer_lock_);
video_coding::PacketBuffer::Packet* first_packet = nullptr;
int max_nack_count;
int64_t min_recv_time;
@ -749,26 +766,31 @@ void RtpVideoStreamReceiver::OnInsertedPacket(
// PacketBuffer promisses frame boundaries are correctly set on each
// packet. Document that assumption with the DCHECKs.
RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
int64_t unwrapped_rtp_seq_num =
rtp_seq_num_unwrapper_.Unwrap(packet->seq_num);
RTC_DCHECK(packet_infos_.count(unwrapped_rtp_seq_num) > 0);
RtpPacketInfo& packet_info = packet_infos_[unwrapped_rtp_seq_num];
if (packet->is_first_packet_in_frame()) {
first_packet = packet.get();
max_nack_count = packet->times_nacked;
min_recv_time = packet->packet_info.receive_time().ms();
max_recv_time = packet->packet_info.receive_time().ms();
min_recv_time = packet_info.receive_time().ms();
max_recv_time = packet_info.receive_time().ms();
payloads.clear();
packet_infos.clear();
} else {
max_nack_count = std::max(max_nack_count, packet->times_nacked);
min_recv_time =
std::min(min_recv_time, packet->packet_info.receive_time().ms());
std::min(min_recv_time, packet_info.receive_time().ms());
max_recv_time =
std::max(max_recv_time, packet->packet_info.receive_time().ms());
std::max(max_recv_time, packet_info.receive_time().ms());
}
payloads.emplace_back(packet->video_payload);
packet_infos.push_back(packet->packet_info);
packet_infos.push_back(packet_info);
frame_boundary = packet->is_last_packet_in_frame();
if (packet->is_last_packet_in_frame()) {
auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
auto depacketizer_it =
payload_type_map_.find(first_packet->payload_type);
RTC_CHECK(depacketizer_it != payload_type_map_.end());
rtc::scoped_refptr<EncodedImageBuffer> bitstream =
@ -779,7 +801,7 @@ void RtpVideoStreamReceiver::OnInsertedPacket(
}
const video_coding::PacketBuffer::Packet& last_packet = *packet;
OnAssembledFrame(std::make_unique<RtpFrameObject>(
assembled_frames.push_back(std::make_unique<RtpFrameObject>(
first_packet->seq_num, //
last_packet.seq_num, //
last_packet.marker_bit, //
@ -800,6 +822,12 @@ void RtpVideoStreamReceiver::OnInsertedPacket(
}
}
RTC_DCHECK(frame_boundary);
if (result.buffer_cleared) {
packet_infos_.clear();
}
} // packet_buffer_lock_
if (result.buffer_cleared) {
{
MutexLock lock(&sync_info_lock_);
@ -809,6 +837,10 @@ void RtpVideoStreamReceiver::OnInsertedPacket(
}
RequestKeyFrame();
}
for (auto& frame : assembled_frames) {
OnAssembledFrame(std::move(frame));
}
}
void RtpVideoStreamReceiver::OnAssembledFrame(
@ -851,8 +883,8 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
if (frame_is_newer) {
// When we reset the |reference_finder_| we don't want new picture ids
// to overlap with old picture ids. To ensure that doesn't happen we
// start from the |last_completed_picture_id_| and add an offset in case
// of reordering.
// start from the |last_completed_picture_id_| and add an offset in
// case of reordering.
reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
this,
last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
@ -1028,12 +1060,14 @@ void RtpVideoStreamReceiver::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
MutexLock lock(&reference_finder_lock_);
reference_finder_->PaddingReceived(seq_num);
}
video_coding::PacketBuffer::InsertResult insert_result;
{
MutexLock lock(&packet_buffer_lock_);
insert_result = packet_buffer_.InsertPadding(seq_num);
}
OnInsertedPacket(std::move(insert_result));
if (nack_module_) {
nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
/* is _recovered = */ false);
@ -1118,6 +1152,9 @@ void RtpVideoStreamReceiver::FrameDecoded(int64_t picture_id) {
{
MutexLock lock(&packet_buffer_lock_);
packet_buffer_.ClearTo(seq_num);
int64_t unwrapped_rtp_seq_num = rtp_seq_num_unwrapper_.Unwrap(seq_num);
packet_infos_.erase(packet_infos_.begin(),
packet_infos_.upper_bound(unwrapped_rtp_seq_num));
}
MutexLock lock(&reference_finder_lock_);
reference_finder_->ClearTo(seq_num);

View File

@ -303,7 +303,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
ParseGenericDependenciesResult ParseGenericDependenciesExtension(
const RtpPacketReceived& rtp_packet,
RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_);
void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame);
void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame)
RTC_LOCKS_EXCLUDED(packet_buffer_lock_);
void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet,
bool is_keyframe)
RTC_RUN_ON(worker_task_checker_);
@ -407,6 +408,11 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
frame_transformer_delegate_;
SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_
RTC_GUARDED_BY(packet_buffer_lock_);
std::map<int64_t, RtpPacketInfo> packet_infos_
RTC_GUARDED_BY(packet_buffer_lock_);
};
} // namespace webrtc

View File

@ -473,18 +473,31 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video) {
RTC_DCHECK_RUN_ON(&worker_task_checker_);
auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
rtp_packet, video, clock_->CurrentTime());
auto packet =
std::make_unique<video_coding::PacketBuffer::Packet>(rtp_packet, video);
int64_t unwrapped_rtp_seq_num =
rtp_seq_num_unwrapper_.Unwrap(rtp_packet.SequenceNumber());
auto& packet_info =
packet_infos_
.emplace(
unwrapped_rtp_seq_num,
RtpPacketInfo(
rtp_packet.Ssrc(), rtp_packet.Csrcs(), rtp_packet.Timestamp(),
/*audio_level=*/absl::nullopt,
rtp_packet.GetExtension<AbsoluteCaptureTimeExtension>(),
/*receive_time_ms=*/clock_->CurrentTime()))
.first->second;
// Try to extrapolate absolute capture time if it is missing.
packet->packet_info.set_absolute_capture_time(
packet_info.set_absolute_capture_time(
absolute_capture_time_receiver_.OnReceivePacket(
AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
packet->packet_info.csrcs()),
packet->packet_info.rtp_timestamp(),
AbsoluteCaptureTimeReceiver::GetSource(packet_info.ssrc(),
packet_info.csrcs()),
packet_info.rtp_timestamp(),
// Assume frequency is the same one for all video frames.
kVideoPayloadTypeFrequency,
packet->packet_info.absolute_capture_time()));
kVideoPayloadTypeFrequency, packet_info.absolute_capture_time()));
RTPVideoHeader& video_header = packet->video_header;
video_header.rotation = kVideoRotation_0;
@ -715,22 +728,24 @@ void RtpVideoStreamReceiver2::OnInsertedPacket(
// PacketBuffer promisses frame boundaries are correctly set on each
// packet. Document that assumption with the DCHECKs.
RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
int64_t unwrapped_rtp_seq_num =
rtp_seq_num_unwrapper_.Unwrap(packet->seq_num);
RTC_DCHECK(packet_infos_.count(unwrapped_rtp_seq_num) > 0);
RtpPacketInfo& packet_info = packet_infos_[unwrapped_rtp_seq_num];
if (packet->is_first_packet_in_frame()) {
first_packet = packet.get();
max_nack_count = packet->times_nacked;
min_recv_time = packet->packet_info.receive_time().ms();
max_recv_time = packet->packet_info.receive_time().ms();
min_recv_time = packet_info.receive_time().ms();
max_recv_time = packet_info.receive_time().ms();
payloads.clear();
packet_infos.clear();
} else {
max_nack_count = std::max(max_nack_count, packet->times_nacked);
min_recv_time =
std::min(min_recv_time, packet->packet_info.receive_time().ms());
max_recv_time =
std::max(max_recv_time, packet->packet_info.receive_time().ms());
min_recv_time = std::min(min_recv_time, packet_info.receive_time().ms());
max_recv_time = std::max(max_recv_time, packet_info.receive_time().ms());
}
payloads.emplace_back(packet->video_payload);
packet_infos.push_back(packet->packet_info);
packet_infos.push_back(packet_info);
frame_boundary = packet->is_last_packet_in_frame();
if (packet->is_last_packet_in_frame()) {
@ -770,6 +785,7 @@ void RtpVideoStreamReceiver2::OnInsertedPacket(
last_received_rtp_system_time_.reset();
last_received_keyframe_rtp_system_time_.reset();
last_received_keyframe_rtp_timestamp_.reset();
packet_infos_.clear();
RequestKeyFrame();
}
}
@ -1053,6 +1069,9 @@ void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) {
}
if (seq_num != -1) {
int64_t unwrapped_rtp_seq_num = rtp_seq_num_unwrapper_.Unwrap(seq_num);
packet_infos_.erase(packet_infos_.begin(),
packet_infos_.upper_bound(unwrapped_rtp_seq_num));
packet_buffer_.ClearTo(seq_num);
reference_finder_->ClearTo(seq_num);
}

View File

@ -357,6 +357,11 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
frame_transformer_delegate_;
SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_
RTC_GUARDED_BY(worker_task_checker_);
std::map<int64_t, RtpPacketInfo> packet_infos_
RTC_GUARDED_BY(worker_task_checker_);
};
} // namespace webrtc