Prepare to replace VideoLayerFrameId with int64_t.

Bug: webrtc:12206
Change-Id: I10bfdefbc95a79e0595956c1a0e688051da6d2b9
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/207180
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33265}
This commit is contained in:
philipel
2021-02-15 13:31:29 +01:00
committed by Commit Bot
parent 563fbc1dc5
commit 9aa9b8dbbe
29 changed files with 233 additions and 271 deletions

View File

@ -179,8 +179,7 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
for (size_t i = 0; i < EncodedFrame::kMaxFrameReferences &&
i < next_frame_it->second.frame->num_references;
++i) {
if (next_frame_it->second.frame->references[i] >=
frame_it->first.picture_id) {
if (next_frame_it->second.frame->references[i] >= frame_it->first) {
has_inter_layer_dependency = true;
break;
}
@ -262,11 +261,11 @@ EncodedFrame* FrameBuffer::GetNextFrame() {
// Remove decoded frame and all undecoded frames before it.
if (stats_callback_) {
unsigned int dropped_frames = std::count_if(
frames_.begin(), frame_it,
[](const std::pair<const VideoLayerFrameId, FrameInfo>& frame) {
return frame.second.frame != nullptr;
});
unsigned int dropped_frames =
std::count_if(frames_.begin(), frame_it,
[](const std::pair<const int64_t, FrameInfo>& frame) {
return frame.second.frame != nullptr;
});
if (dropped_frames > 0) {
stats_callback_->OnDroppedFrames(dropped_frames);
}
@ -371,7 +370,7 @@ void FrameBuffer::UpdateRtt(int64_t rtt_ms) {
bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const {
for (size_t i = 0; i < frame.num_references; ++i) {
if (frame.references[i] >= frame.id.picture_id)
if (frame.references[i] >= frame.Id())
return false;
for (size_t j = i + 1; j < frame.num_references; ++j) {
@ -397,73 +396,69 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
MutexLock lock(&mutex_);
const VideoLayerFrameId& id = frame->id;
int64_t last_continuous_picture_id =
!last_continuous_frame_ ? -1 : last_continuous_frame_->picture_id;
int64_t last_continuous_frame_id = last_continuous_frame_.value_or(-1);
if (!ValidReferences(*frame)) {
RTC_LOG(LS_WARNING) << "Frame " << id.picture_id
RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
<< " has invalid frame references, dropping frame.";
return last_continuous_picture_id;
return last_continuous_frame_id;
}
if (frames_.size() >= kMaxFramesBuffered) {
if (frame->is_keyframe()) {
RTC_LOG(LS_WARNING) << "Inserting keyframe " << id.picture_id
RTC_LOG(LS_WARNING) << "Inserting keyframe " << frame->Id()
<< " but buffer is full, clearing"
" buffer and inserting the frame.";
ClearFramesAndHistory();
} else {
RTC_LOG(LS_WARNING) << "Frame " << id.picture_id
RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
<< " could not be inserted due to the frame "
"buffer being full, dropping frame.";
return last_continuous_picture_id;
return last_continuous_frame_id;
}
}
auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
auto last_decoded_frame_timestamp =
decoded_frames_history_.GetLastDecodedFrameTimestamp();
if (last_decoded_frame && id <= *last_decoded_frame) {
if (last_decoded_frame && frame->Id() <= *last_decoded_frame) {
if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) &&
frame->is_keyframe()) {
// If this frame has a newer timestamp but an earlier picture id then we
// assume there has been a jump in the picture id due to some encoder
// If this frame has a newer timestamp but an earlier frame id then we
// assume there has been a jump in the frame id due to some encoder
// reconfiguration or some other reason. Even though this is not according
// to spec we can still continue to decode from this frame if it is a
// keyframe.
RTC_LOG(LS_WARNING)
<< "A jump in picture id was detected, clearing buffer.";
<< "A jump in frame id was detected, clearing buffer.";
ClearFramesAndHistory();
last_continuous_picture_id = -1;
last_continuous_frame_id = -1;
} else {
RTC_LOG(LS_WARNING) << "Frame " << id.picture_id
<< " inserted after frame "
<< last_decoded_frame->picture_id
RTC_LOG(LS_WARNING) << "Frame " << frame->Id() << " inserted after frame "
<< *last_decoded_frame
<< " was handed off for decoding, dropping frame.";
return last_continuous_picture_id;
return last_continuous_frame_id;
}
}
// Test if inserting this frame would cause the order of the frames to become
// ambiguous (covering more than half the interval of 2^16). This can happen
// when the picture id make large jumps mid stream.
if (!frames_.empty() && id < frames_.begin()->first &&
frames_.rbegin()->first < id) {
RTC_LOG(LS_WARNING)
<< "A jump in picture id was detected, clearing buffer.";
// when the frame id make large jumps mid stream.
if (!frames_.empty() && frame->Id() < frames_.begin()->first &&
frames_.rbegin()->first < frame->Id()) {
RTC_LOG(LS_WARNING) << "A jump in frame id was detected, clearing buffer.";
ClearFramesAndHistory();
last_continuous_picture_id = -1;
last_continuous_frame_id = -1;
}
auto info = frames_.emplace(id, FrameInfo()).first;
auto info = frames_.emplace(frame->Id(), FrameInfo()).first;
if (info->second.frame) {
return last_continuous_picture_id;
return last_continuous_frame_id;
}
if (!UpdateFrameInfoWithIncomingFrame(*frame, info))
return last_continuous_picture_id;
return last_continuous_frame_id;
if (!frame->delayed_by_retransmission())
timing_->IncomingTimestamp(frame->Timestamp(), frame->ReceivedTime());
@ -480,7 +475,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
if (info->second.num_missing_continuous == 0) {
info->second.continuous = true;
PropagateContinuity(info);
last_continuous_picture_id = last_continuous_frame_->picture_id;
last_continuous_frame_id = *last_continuous_frame_;
// Since we now have new continuous frames there might be a better frame
// to return from NextFrame.
@ -496,7 +491,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
}
}
return last_continuous_picture_id;
return last_continuous_frame_id;
}
void FrameBuffer::PropagateContinuity(FrameMap::iterator start) {
@ -549,8 +544,6 @@ void FrameBuffer::PropagateDecodability(const FrameInfo& info) {
bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
FrameMap::iterator info) {
TRACE_EVENT0("webrtc", "FrameBuffer::UpdateFrameInfoWithIncomingFrame");
const VideoLayerFrameId& id = frame.id;
auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first);
@ -563,23 +556,22 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
// so that |num_missing_continuous| and |num_missing_decodable| can be
// decremented as frames become continuous/are decoded.
struct Dependency {
VideoLayerFrameId id;
int64_t frame_id;
bool continuous;
};
std::vector<Dependency> not_yet_fulfilled_dependencies;
// Find all dependencies that have not yet been fulfilled.
for (size_t i = 0; i < frame.num_references; ++i) {
VideoLayerFrameId ref_key(frame.references[i]);
// Does |frame| depend on a frame earlier than the last decoded one?
if (last_decoded_frame && ref_key <= *last_decoded_frame) {
if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) {
// Was that frame decoded? If not, this |frame| will never become
// decodable.
if (!decoded_frames_history_.WasDecoded(ref_key)) {
if (!decoded_frames_history_.WasDecoded(frame.references[i])) {
int64_t now_ms = clock_->TimeInMilliseconds();
if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) {
RTC_LOG(LS_WARNING)
<< "Frame " << id.picture_id
<< "Frame " << frame.Id()
<< " depends on a non-decoded frame more previous than the last "
"decoded frame, dropping frame.";
last_log_non_decoded_ms_ = now_ms;
@ -587,10 +579,11 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
return false;
}
} else {
auto ref_info = frames_.find(ref_key);
auto ref_info = frames_.find(frame.references[i]);
bool ref_continuous =
ref_info != frames_.end() && ref_info->second.continuous;
not_yet_fulfilled_dependencies.push_back({ref_key, ref_continuous});
not_yet_fulfilled_dependencies.push_back(
{frame.references[i], ref_continuous});
}
}
@ -601,7 +594,7 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
if (dep.continuous)
--info->second.num_missing_continuous;
frames_[dep.id].dependent_frames.push_back(id);
frames_[dep.frame_id].dependent_frames.push_back(frame.Id());
}
return true;
@ -637,11 +630,11 @@ void FrameBuffer::UpdateTimingFrameInfo() {
void FrameBuffer::ClearFramesAndHistory() {
TRACE_EVENT0("webrtc", "FrameBuffer::ClearFramesAndHistory");
if (stats_callback_) {
unsigned int dropped_frames = std::count_if(
frames_.begin(), frames_.end(),
[](const std::pair<const VideoLayerFrameId, FrameInfo>& frame) {
return frame.second.frame != nullptr;
});
unsigned int dropped_frames =
std::count_if(frames_.begin(), frames_.end(),
[](const std::pair<const int64_t, FrameInfo>& frame) {
return frame.second.frame != nullptr;
});
if (dropped_frames > 0) {
stats_callback_->OnDroppedFrames(dropped_frames);
}

View File

@ -58,7 +58,6 @@ class FrameBuffer {
// Insert a frame into the frame buffer. Returns the picture id
// of the last continuous frame or -1 if there is no continuous frame.
// TODO(philipel): Return a VideoLayerFrameId and not only the picture id.
int64_t InsertFrame(std::unique_ptr<EncodedFrame> frame);
// Get the next frame for decoding. Will return at latest after
@ -95,7 +94,7 @@ class FrameBuffer {
// Which other frames that have direct unfulfilled dependencies
// on this frame.
absl::InlinedVector<VideoLayerFrameId, 8> dependent_frames;
absl::InlinedVector<int64_t, 8> dependent_frames;
// A frame is continiuous if it has all its referenced/indirectly
// referenced frames.
@ -115,7 +114,7 @@ class FrameBuffer {
std::unique_ptr<EncodedFrame> frame;
};
using FrameMap = std::map<VideoLayerFrameId, FrameInfo>;
using FrameMap = std::map<int64_t, FrameInfo>;
// Check that the references of |frame| are valid.
bool ValidReferences(const EncodedFrame& frame) const;
@ -178,8 +177,7 @@ class FrameBuffer {
VCMJitterEstimator jitter_estimator_ RTC_GUARDED_BY(mutex_);
VCMTiming* const timing_ RTC_GUARDED_BY(mutex_);
VCMInterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(mutex_);
absl::optional<VideoLayerFrameId> last_continuous_frame_
RTC_GUARDED_BY(mutex_);
absl::optional<int64_t> last_continuous_frame_ RTC_GUARDED_BY(mutex_);
std::vector<FrameMap::iterator> frames_to_decode_ RTC_GUARDED_BY(mutex_);
bool stopped_ RTC_GUARDED_BY(mutex_);
VCMVideoProtection protection_mode_ RTC_GUARDED_BY(mutex_);

View File

@ -164,7 +164,7 @@ class TestFrameBuffer2 : public ::testing::Test {
{rtc::checked_cast<uint16_t>(refs)...}};
auto frame = std::make_unique<FrameObjectFake>();
frame->id.picture_id = picture_id;
frame->SetId(picture_id);
frame->SetSpatialIndex(spatial_layer);
frame->SetTimestamp(ts_ms * 90);
frame->num_references = references.size();
@ -214,7 +214,7 @@ class TestFrameBuffer2 : public ::testing::Test {
void CheckFrame(size_t index, int picture_id, int spatial_layer) {
ASSERT_LT(index, frames_.size());
ASSERT_TRUE(frames_[index]);
ASSERT_EQ(picture_id, frames_[index]->id.picture_id);
ASSERT_EQ(picture_id, frames_[index]->Id());
ASSERT_EQ(spatial_layer, frames_[index]->SpatialIndex().value_or(0));
}
@ -278,7 +278,7 @@ TEST_F(TestFrameBuffer2, ZeroPlayoutDelay) {
new FrameBuffer(time_controller_.GetClock(), &timing, &stats_callback_));
const VideoPlayoutDelay kPlayoutDelayMs = {0, 0};
std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
test_frame->id.picture_id = 0;
test_frame->SetId(0);
test_frame->SetPlayoutDelay(kPlayoutDelayMs);
buffer_->InsertFrame(std::move(test_frame));
ExtractFrame(0, false);
@ -544,7 +544,7 @@ TEST_F(TestFrameBuffer2, StatsCallback) {
{
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
frame->SetEncodedData(EncodedImageBuffer::Create(kFrameSize));
frame->id.picture_id = pid;
frame->SetId(pid);
frame->SetTimestamp(ts);
frame->num_references = 0;

View File

@ -21,10 +21,10 @@ RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame,
int frame_id) {
frame->SetSpatialIndex(0);
frame->id.picture_id = unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1));
frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1)));
frame->num_references =
frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1;
frame->references[0] = frame->id.picture_id - 1;
frame->references[0] = frame->Id() - 1;
RtpFrameReferenceFinder::ReturnVector res;
res.push_back(std::move(frame));

View File

@ -177,7 +177,7 @@ void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) {
void RtpFrameReferenceFinder::HandOffFrames(ReturnVector frames) {
for (auto& frame : frames) {
frame->id.picture_id += picture_id_offset_;
frame->SetId(frame->Id() + picture_id_offset_);
for (size_t i = 0; i < frame->num_references; ++i) {
frame->references[i] += picture_id_offset_;
}

View File

@ -72,7 +72,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
uint16_t Rand() { return rand_.Rand<uint16_t>(); }
void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override {
int64_t pid = frame->id.picture_id;
int64_t pid = frame->Id();
uint16_t sidx = *frame->SpatialIndex();
auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
if (frame_it != frames_from_callback_.end()) {

View File

@ -22,7 +22,7 @@ RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
const RTPVideoHeader::GenericDescriptorInfo& descriptor) {
// Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap
// them here.
frame->id.picture_id = descriptor.frame_id;
frame->SetId(descriptor.frame_id);
frame->SetSpatialIndex(descriptor.spatial_index);
RtpFrameReferenceFinder::ReturnVector res;

View File

@ -86,18 +86,18 @@ RtpSeqNumOnlyRefFinder::ManageFrameInternal(RtpFrameObject* frame) {
// Since keyframes can cause reordering we can't simply assign the
// picture id according to some incrementing counter.
frame->id.picture_id = frame->last_seq_num();
frame->SetId(frame->last_seq_num());
frame->num_references =
frame->frame_type() == VideoFrameType::kVideoFrameDelta;
frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop);
if (AheadOf<uint16_t>(frame->id.picture_id, last_picture_id_gop)) {
seq_num_it->second.first = frame->id.picture_id;
seq_num_it->second.second = frame->id.picture_id;
if (AheadOf<uint16_t>(frame->Id(), last_picture_id_gop)) {
seq_num_it->second.first = frame->Id();
seq_num_it->second.second = frame->Id();
}
UpdateLastPictureIdWithPadding(frame->id.picture_id);
UpdateLastPictureIdWithPadding(frame->Id());
frame->SetSpatialIndex(0);
frame->id.picture_id = rtp_seq_num_unwrapper_.Unwrap(frame->id.picture_id);
frame->SetId(rtp_seq_num_unwrapper_.Unwrap(frame->Id()));
return kHandOff;
}

View File

@ -50,14 +50,14 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
return kDrop;
frame->SetSpatialIndex(0);
frame->id.picture_id = codec_header.pictureId & 0x7FFF;
frame->SetId(codec_header.pictureId & 0x7FFF);
if (last_picture_id_ == -1)
last_picture_id_ = frame->id.picture_id;
last_picture_id_ = frame->Id();
// Clean up info about not yet received frames that are too old.
uint16_t old_picture_id =
Subtract<kFrameIdLength>(frame->id.picture_id, kMaxNotYetReceivedFrames);
Subtract<kFrameIdLength>(frame->Id(), kMaxNotYetReceivedFrames);
auto clean_frames_to = not_yet_received_frames_.lower_bound(old_picture_id);
not_yet_received_frames_.erase(not_yet_received_frames_.begin(),
clean_frames_to);
@ -67,12 +67,11 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
}
// Find if there has been a gap in fully received frames and save the picture
// id of those frames in |not_yet_received_frames_|.
if (AheadOf<uint16_t, kFrameIdLength>(frame->id.picture_id,
last_picture_id_)) {
if (AheadOf<uint16_t, kFrameIdLength>(frame->Id(), last_picture_id_)) {
do {
last_picture_id_ = Add<kFrameIdLength>(last_picture_id_, 1);
not_yet_received_frames_.insert(last_picture_id_);
} while (last_picture_id_ != frame->id.picture_id);
} while (last_picture_id_ != frame->Id());
}
int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(codec_header.tl0PicIdx & 0xFF);
@ -110,8 +109,7 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
// Is this an old frame that has already been used to update the state? If
// so, drop it.
if (AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer,
frame->id.picture_id)) {
if (AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
return kDrop;
}
@ -128,8 +126,7 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
// Is this an old frame that has already been used to update the state? If
// so, drop it.
if (last_pid_on_layer != -1 &&
AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer,
frame->id.picture_id)) {
AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
return kDrop;
}
@ -150,7 +147,7 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
// a layer sync frame has been received after this frame for the same
// base layer frame, drop this frame.
if (AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[layer],
frame->id.picture_id)) {
frame->Id())) {
return kDrop;
}
@ -159,14 +156,14 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
auto not_received_frame_it =
not_yet_received_frames_.upper_bound(layer_info_it->second[layer]);
if (not_received_frame_it != not_yet_received_frames_.end() &&
AheadOf<uint16_t, kFrameIdLength>(frame->id.picture_id,
AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
*not_received_frame_it)) {
return kStash;
}
if (!(AheadOf<uint16_t, kFrameIdLength>(frame->id.picture_id,
if (!(AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
layer_info_it->second[layer]))) {
RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->id.picture_id
RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->Id()
<< " and packet range [" << frame->first_seq_num()
<< ", " << frame->last_seq_num()
<< "] already received, "
@ -191,17 +188,17 @@ void RtpVp8RefFinder::UpdateLayerInfoVp8(RtpFrameObject* frame,
while (layer_info_it != layer_info_.end()) {
if (layer_info_it->second[temporal_idx] != -1 &&
AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[temporal_idx],
frame->id.picture_id)) {
frame->Id())) {
// The frame was not newer, then no subsequent layer info have to be
// update.
break;
}
layer_info_it->second[temporal_idx] = frame->id.picture_id;
layer_info_it->second[temporal_idx] = frame->Id();
++unwrapped_tl0;
layer_info_it = layer_info_.find(unwrapped_tl0);
}
not_yet_received_frames_.erase(frame->id.picture_id);
not_yet_received_frames_.erase(frame->Id());
UnwrapPictureIds(frame);
}
@ -233,7 +230,7 @@ void RtpVp8RefFinder::RetryStashedFrames(
void RtpVp8RefFinder::UnwrapPictureIds(RtpFrameObject* frame) {
for (size_t i = 0; i < frame->num_references; ++i)
frame->references[i] = unwrapper_.Unwrap(frame->references[i]);
frame->id.picture_id = unwrapper_.Unwrap(frame->id.picture_id);
frame->SetId(unwrapper_.Unwrap(frame->Id()));
}
void RtpVp8RefFinder::ClearTo(uint16_t seq_num) {

View File

@ -52,10 +52,10 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
return kDrop;
frame->SetSpatialIndex(codec_header.spatial_idx);
frame->id.picture_id = codec_header.picture_id & (kFrameIdLength - 1);
frame->SetId(codec_header.picture_id & (kFrameIdLength - 1));
if (last_picture_id_ == -1)
last_picture_id_ = frame->id.picture_id;
last_picture_id_ = frame->Id();
if (codec_header.flexible_mode) {
if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) {
@ -63,8 +63,8 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
}
frame->num_references = codec_header.num_ref_pics;
for (size_t i = 0; i < frame->num_references; ++i) {
frame->references[i] = Subtract<kFrameIdLength>(frame->id.picture_id,
codec_header.pid_diff[i]);
frame->references[i] =
Subtract<kFrameIdLength>(frame->Id(), codec_header.pid_diff[i]);
}
FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
@ -104,10 +104,10 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
current_ss_idx_ = Add<kMaxGofSaved>(current_ss_idx_, 1);
scalability_structures_[current_ss_idx_] = gof;
scalability_structures_[current_ss_idx_].pid_start = frame->id.picture_id;
gof_info_.emplace(unwrapped_tl0,
GofInfo(&scalability_structures_[current_ss_idx_],
frame->id.picture_id));
scalability_structures_[current_ss_idx_].pid_start = frame->Id();
gof_info_.emplace(
unwrapped_tl0,
GofInfo(&scalability_structures_[current_ss_idx_], frame->Id()));
}
const auto gof_info_it = gof_info_.find(unwrapped_tl0);
@ -118,7 +118,7 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
frame->num_references = 0;
FrameReceivedVp9(frame->id.picture_id, info);
FrameReceivedVp9(frame->Id(), info);
FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
return kHandOff;
}
@ -134,7 +134,7 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
info = &gof_info_it->second;
frame->num_references = 0;
FrameReceivedVp9(frame->id.picture_id, info);
FrameReceivedVp9(frame->Id(), info);
FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
return kHandOff;
} else {
@ -147,8 +147,8 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
if (codec_header.temporal_idx == 0) {
gof_info_it = gof_info_
.emplace(unwrapped_tl0, GofInfo(gof_info_it->second.gof,
frame->id.picture_id))
.emplace(unwrapped_tl0,
GofInfo(gof_info_it->second.gof, frame->Id()))
.first;
}
@ -160,23 +160,23 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx);
gof_info_.erase(gof_info_.begin(), clean_gof_info_to);
FrameReceivedVp9(frame->id.picture_id, info);
FrameReceivedVp9(frame->Id(), info);
// Make sure we don't miss any frame that could potentially have the
// up switch flag set.
if (MissingRequiredFrameVp9(frame->id.picture_id, *info))
if (MissingRequiredFrameVp9(frame->Id(), *info))
return kStash;
if (codec_header.temporal_up_switch)
up_switch_.emplace(frame->id.picture_id, codec_header.temporal_idx);
up_switch_.emplace(frame->Id(), codec_header.temporal_idx);
// Clean out old info about up switch frames.
uint16_t old_picture_id = Subtract<kFrameIdLength>(frame->id.picture_id, 50);
uint16_t old_picture_id = Subtract<kFrameIdLength>(frame->Id(), 50);
auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id);
up_switch_.erase(up_switch_.begin(), up_switch_erase_to);
size_t diff = ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start,
frame->id.picture_id);
size_t diff =
ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, frame->Id());
size_t gof_idx = diff % info->gof->num_frames_in_gof;
if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) {
@ -185,12 +185,12 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
// Populate references according to the scalability structure.
frame->num_references = info->gof->num_ref_pics[gof_idx];
for (size_t i = 0; i < frame->num_references; ++i) {
frame->references[i] = Subtract<kFrameIdLength>(
frame->id.picture_id, info->gof->pid_diff[gof_idx][i]);
frame->references[i] =
Subtract<kFrameIdLength>(frame->Id(), info->gof->pid_diff[gof_idx][i]);
// If this is a reference to a frame earlier than the last up switch point,
// then ignore this reference.
if (UpSwitchInIntervalVp9(frame->id.picture_id, codec_header.temporal_idx,
if (UpSwitchInIntervalVp9(frame->Id(), codec_header.temporal_idx,
frame->references[i])) {
--frame->num_references;
}
@ -330,13 +330,12 @@ void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame,
unwrapper_.Unwrap(frame->references[i]) * kMaxSpatialLayers +
*frame->SpatialIndex();
}
frame->id.picture_id =
unwrapper_.Unwrap(frame->id.picture_id) * kMaxSpatialLayers +
*frame->SpatialIndex();
frame->SetId(unwrapper_.Unwrap(frame->Id()) * kMaxSpatialLayers +
*frame->SpatialIndex());
if (inter_layer_predicted &&
frame->num_references + 1 <= EncodedFrame::kMaxFrameReferences) {
frame->references[frame->num_references] = frame->id.picture_id - 1;
frame->references[frame->num_references] = frame->Id() - 1;
++frame->num_references;
}
}

View File

@ -163,7 +163,7 @@ class HasFrameMatcher : public MatcherInterface<const FrameVector&> {
MatchResultListener* result_listener) const override {
auto it = std::find_if(frames.begin(), frames.end(),
[this](const std::unique_ptr<EncodedFrame>& f) {
return f->id.picture_id == frame_id_;
return f->Id() == frame_id_;
});
if (it == frames.end()) {
if (result_listener->IsInterested()) {
@ -635,7 +635,7 @@ TEST_F(RtpVp9RefFinderTest, WrappingFlexReference) {
ASSERT_EQ(1UL, frames_.size());
const EncodedFrame& frame = *frames_[0];
ASSERT_EQ(frame.id.picture_id - frame.references[0], 5);
ASSERT_EQ(frame.Id() - frame.references[0], 5);
}
TEST_F(RtpVp9RefFinderTest, GofPidJump) {

View File

@ -23,18 +23,17 @@ DecodedFramesHistory::DecodedFramesHistory(size_t window_size)
DecodedFramesHistory::~DecodedFramesHistory() = default;
void DecodedFramesHistory::InsertDecoded(const VideoLayerFrameId& frameid,
uint32_t timestamp) {
last_decoded_frame_ = frameid;
void DecodedFramesHistory::InsertDecoded(int64_t frame_id, uint32_t timestamp) {
last_decoded_frame_ = frame_id;
last_decoded_frame_timestamp_ = timestamp;
int new_index = PictureIdToIndex(frameid.picture_id);
int new_index = FrameIdToIndex(frame_id);
RTC_DCHECK(last_picture_id_ < frameid.picture_id);
RTC_DCHECK(last_frame_id_ < frame_id);
// Clears expired values from the cyclic buffer_.
if (last_picture_id_) {
int64_t id_jump = frameid.picture_id - *last_picture_id_;
int last_index = PictureIdToIndex(*last_picture_id_);
if (last_frame_id_) {
int64_t id_jump = frame_id - *last_frame_id_;
int last_index = FrameIdToIndex(*last_frame_id_);
if (id_jump >= static_cast<int64_t>(buffer_.size())) {
std::fill(buffer_.begin(), buffer_.end(), false);
@ -48,36 +47,34 @@ void DecodedFramesHistory::InsertDecoded(const VideoLayerFrameId& frameid,
}
buffer_[new_index] = true;
last_picture_id_ = frameid.picture_id;
last_frame_id_ = frame_id;
}
bool DecodedFramesHistory::WasDecoded(const VideoLayerFrameId& frameid) {
if (!last_picture_id_)
bool DecodedFramesHistory::WasDecoded(int64_t frame_id) {
if (!last_frame_id_)
return false;
// Reference to the picture_id out of the stored should happen.
if (frameid.picture_id <=
*last_picture_id_ - static_cast<int64_t>(buffer_.size())) {
if (frame_id <= *last_frame_id_ - static_cast<int64_t>(buffer_.size())) {
RTC_LOG(LS_WARNING) << "Referencing a frame out of the window. "
"Assuming it was undecoded to avoid artifacts.";
return false;
}
if (frameid.picture_id > last_picture_id_)
if (frame_id > last_frame_id_)
return false;
return buffer_[PictureIdToIndex(frameid.picture_id)];
return buffer_[FrameIdToIndex(frame_id)];
}
void DecodedFramesHistory::Clear() {
last_decoded_frame_timestamp_.reset();
last_decoded_frame_.reset();
std::fill(buffer_.begin(), buffer_.end(), false);
last_picture_id_.reset();
last_frame_id_.reset();
}
absl::optional<VideoLayerFrameId>
DecodedFramesHistory::GetLastDecodedFrameId() {
absl::optional<int64_t> DecodedFramesHistory::GetLastDecodedFrameId() {
return last_decoded_frame_;
}
@ -85,7 +82,7 @@ absl::optional<uint32_t> DecodedFramesHistory::GetLastDecodedFrameTimestamp() {
return last_decoded_frame_timestamp_;
}
int DecodedFramesHistory::PictureIdToIndex(int64_t frame_id) const {
int DecodedFramesHistory::FrameIdToIndex(int64_t frame_id) const {
int m = frame_id % buffer_.size();
return m >= 0 ? m : m + buffer_.size();
}

View File

@ -27,23 +27,23 @@ class DecodedFramesHistory {
// window_size - how much frames back to the past are actually remembered.
explicit DecodedFramesHistory(size_t window_size);
~DecodedFramesHistory();
// Called for each decoded frame. Assumes picture id's are non-decreasing.
void InsertDecoded(const VideoLayerFrameId& frameid, uint32_t timestamp);
// Query if the following (picture_id, spatial_id) pair was inserted before.
// Should be at most less by window_size-1 than the last inserted picture id.
bool WasDecoded(const VideoLayerFrameId& frameid);
// Called for each decoded frame. Assumes frame id's are non-decreasing.
void InsertDecoded(int64_t frame_id, uint32_t timestamp);
// Query if the following (frame_id, spatial_id) pair was inserted before.
// Should be at most less by window_size-1 than the last inserted frame id.
bool WasDecoded(int64_t frame_id);
void Clear();
absl::optional<VideoLayerFrameId> GetLastDecodedFrameId();
absl::optional<int64_t> GetLastDecodedFrameId();
absl::optional<uint32_t> GetLastDecodedFrameTimestamp();
private:
int PictureIdToIndex(int64_t frame_id) const;
int FrameIdToIndex(int64_t frame_id) const;
std::vector<bool> buffer_;
absl::optional<int64_t> last_picture_id_;
absl::optional<VideoLayerFrameId> last_decoded_frame_;
absl::optional<int64_t> last_frame_id_;
absl::optional<int64_t> last_decoded_frame_;
absl::optional<uint32_t> last_decoded_frame_timestamp_;
};

View File

@ -20,93 +20,93 @@ constexpr int kHistorySize = 1 << 13;
TEST(DecodedFramesHistory, RequestOnEmptyHistory) {
DecodedFramesHistory history(kHistorySize);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), false);
EXPECT_EQ(history.WasDecoded(1234), false);
}
TEST(DecodedFramesHistory, FindsLastDecodedFrame) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), true);
history.InsertDecoded(1234, 0);
EXPECT_EQ(history.WasDecoded(1234), true);
}
TEST(DecodedFramesHistory, FindsPreviousFrame) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
history.InsertDecoded(VideoLayerFrameId{1235}, 0);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), true);
history.InsertDecoded(1234, 0);
history.InsertDecoded(1235, 0);
EXPECT_EQ(history.WasDecoded(1234), true);
}
TEST(DecodedFramesHistory, ReportsMissingFrame) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
history.InsertDecoded(VideoLayerFrameId{1236}, 0);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1235}), false);
history.InsertDecoded(1234, 0);
history.InsertDecoded(1236, 0);
EXPECT_EQ(history.WasDecoded(1235), false);
}
TEST(DecodedFramesHistory, ClearsHistory) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
history.InsertDecoded(1234, 0);
history.Clear();
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), false);
EXPECT_EQ(history.WasDecoded(1234), false);
EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
}
TEST(DecodedFramesHistory, HandlesBigJumpInPictureId) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
history.InsertDecoded(VideoLayerFrameId{1235}, 0);
history.InsertDecoded(VideoLayerFrameId{1236}, 0);
history.InsertDecoded(VideoLayerFrameId{1236 + kHistorySize / 2}, 0);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), true);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1237}), false);
history.InsertDecoded(1234, 0);
history.InsertDecoded(1235, 0);
history.InsertDecoded(1236, 0);
history.InsertDecoded(1236 + kHistorySize / 2, 0);
EXPECT_EQ(history.WasDecoded(1234), true);
EXPECT_EQ(history.WasDecoded(1237), false);
}
TEST(DecodedFramesHistory, ForgetsTooOldHistory) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
history.InsertDecoded(VideoLayerFrameId{1235}, 0);
history.InsertDecoded(VideoLayerFrameId{1236}, 0);
history.InsertDecoded(VideoLayerFrameId{1236 + kHistorySize * 2}, 0);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1234}), false);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1237}), false);
history.InsertDecoded(1234, 0);
history.InsertDecoded(1235, 0);
history.InsertDecoded(1236, 0);
history.InsertDecoded(1236 + kHistorySize * 2, 0);
EXPECT_EQ(history.WasDecoded(1234), false);
EXPECT_EQ(history.WasDecoded(1237), false);
}
TEST(DecodedFramesHistory, ReturnsLastDecodedFrameId) {
DecodedFramesHistory history(kHistorySize);
EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
history.InsertDecoded(VideoLayerFrameId{1234}, 0);
EXPECT_EQ(history.GetLastDecodedFrameId(), VideoLayerFrameId(1234));
history.InsertDecoded(VideoLayerFrameId{1235}, 0);
EXPECT_EQ(history.GetLastDecodedFrameId(), VideoLayerFrameId(1235));
history.InsertDecoded(1234, 0);
EXPECT_EQ(history.GetLastDecodedFrameId(), 1234);
history.InsertDecoded(1235, 0);
EXPECT_EQ(history.GetLastDecodedFrameId(), 1235);
}
TEST(DecodedFramesHistory, ReturnsLastDecodedFrameTimestamp) {
DecodedFramesHistory history(kHistorySize);
EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
history.InsertDecoded(VideoLayerFrameId{1234}, 12345);
history.InsertDecoded(1234, 12345);
EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12345u);
history.InsertDecoded(VideoLayerFrameId{1235}, 12366);
history.InsertDecoded(1235, 12366);
EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12366u);
}
TEST(DecodedFramesHistory, NegativePictureIds) {
DecodedFramesHistory history(kHistorySize);
history.InsertDecoded(VideoLayerFrameId{-1234}, 12345);
history.InsertDecoded(VideoLayerFrameId{-1233}, 12366);
EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, -1233);
history.InsertDecoded(-1234, 12345);
history.InsertDecoded(-1233, 12366);
EXPECT_EQ(*history.GetLastDecodedFrameId(), -1233);
history.InsertDecoded(VideoLayerFrameId{-1}, 12377);
history.InsertDecoded(VideoLayerFrameId{0}, 12388);
EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, 0);
history.InsertDecoded(-1, 12377);
history.InsertDecoded(0, 12388);
EXPECT_EQ(*history.GetLastDecodedFrameId(), 0);
history.InsertDecoded(VideoLayerFrameId{1}, 12399);
EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, 1);
history.InsertDecoded(1, 12399);
EXPECT_EQ(*history.GetLastDecodedFrameId(), 1);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{-1234}), true);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{-1}), true);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{0}), true);
EXPECT_EQ(history.WasDecoded(VideoLayerFrameId{1}), true);
EXPECT_EQ(history.WasDecoded(-1234), true);
EXPECT_EQ(history.WasDecoded(-1), true);
EXPECT_EQ(history.WasDecoded(0), true);
EXPECT_EQ(history.WasDecoded(1), true);
}
} // namespace