Use unique_ptr in GetNextFrame instead of release/delete

Bug: webrtc:13343
Change-Id: Iea86335dae5c0407f0fe6c91ccfe2f1eb13175b6
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/236847
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Evan Shrubsole <eshr@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35331}
This commit is contained in:
Evan Shrubsole
2021-11-10 11:41:14 +01:00
committed by WebRTC LUCI CQ
parent 1b320f8b7d
commit 0072c21934
3 changed files with 23 additions and 23 deletions

View File

@ -264,7 +264,6 @@ rtc_library("video_coding") {
absl_deps = [ absl_deps = [
"//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/container:inlined_vector",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/types:variant", "//third_party/abseil-cpp/absl/types:variant",
] ]

View File

@ -13,11 +13,11 @@
#include <algorithm> #include <algorithm>
#include <cstdlib> #include <cstdlib>
#include <iterator> #include <iterator>
#include <memory>
#include <queue> #include <queue>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "absl/memory/memory.h"
#include "api/video/encoded_image.h" #include "api/video/encoded_image.h"
#include "api/video/video_timing.h" #include "api/video/video_timing.h"
#include "modules/video_coding/include/video_coding_defines.h" #include "modules/video_coding/include/video_coding_defines.h"
@ -116,7 +116,7 @@ void FrameBuffer::StartWaitForNextFrameOnQueue() {
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (!frames_to_decode_.empty()) { if (!frames_to_decode_.empty()) {
// We have frames, deliver! // We have frames, deliver!
frame = absl::WrapUnique(GetNextFrame()); frame = GetNextFrame();
timing_->SetLastDecodeScheduledTimestamp( timing_->SetLastDecodeScheduledTimestamp(
clock_->TimeInMilliseconds()); clock_->TimeInMilliseconds());
} else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) { } else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) {
@ -240,11 +240,11 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
return wait_ms; return wait_ms;
} }
EncodedFrame* FrameBuffer::GetNextFrame() { std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
RTC_DCHECK_RUN_ON(&callback_checker_); RTC_DCHECK_RUN_ON(&callback_checker_);
int64_t now_ms = clock_->TimeInMilliseconds(); int64_t now_ms = clock_->TimeInMilliseconds();
// TODO(ilnik): remove `frames_out` use frames_to_decode_ directly. // TODO(ilnik): remove `frames_out` use frames_to_decode_ directly.
std::vector<EncodedFrame*> frames_out; std::vector<std::unique_ptr<EncodedFrame>> frames_out;
RTC_DCHECK(!frames_to_decode_.empty()); RTC_DCHECK(!frames_to_decode_.empty());
bool superframe_delayed_by_retransmission = false; bool superframe_delayed_by_retransmission = false;
@ -261,7 +261,7 @@ EncodedFrame* FrameBuffer::GetNextFrame() {
for (FrameMap::iterator& frame_it : frames_to_decode_) { for (FrameMap::iterator& frame_it : frames_to_decode_) {
RTC_DCHECK(frame_it != frames_.end()); RTC_DCHECK(frame_it != frames_.end());
EncodedFrame* frame = frame_it->second.frame.release(); std::unique_ptr<EncodedFrame> frame = std::move(frame_it->second.frame);
frame->SetRenderTime(render_time_ms); frame->SetRenderTime(render_time_ms);
@ -286,7 +286,7 @@ EncodedFrame* FrameBuffer::GetNextFrame() {
frames_.erase(frames_.begin(), ++frame_it); frames_.erase(frames_.begin(), ++frame_it);
frames_out.push_back(frame); frames_out.emplace_back(std::move(frame));
} }
if (!superframe_delayed_by_retransmission) { if (!superframe_delayed_by_retransmission) {
@ -315,9 +315,9 @@ EncodedFrame* FrameBuffer::GetNextFrame() {
UpdateTimingFrameInfo(); UpdateTimingFrameInfo();
if (frames_out.size() == 1) { if (frames_out.size() == 1) {
return frames_out[0]; return std::move(frames_out[0]);
} else { } else {
return CombineAndDeleteFrames(frames_out); return CombineAndDeleteFrames(std::move(frames_out));
} }
} }
@ -660,15 +660,15 @@ void FrameBuffer::ClearFramesAndHistory() {
// TODO(philipel): Avoid the concatenation of frames here, by replacing // TODO(philipel): Avoid the concatenation of frames here, by replacing
// NextFrame and GetNextFrame with methods returning multiple frames. // NextFrame and GetNextFrame with methods returning multiple frames.
EncodedFrame* FrameBuffer::CombineAndDeleteFrames( std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
const std::vector<EncodedFrame*>& frames) const { std::vector<std::unique_ptr<EncodedFrame>> frames) const {
RTC_DCHECK(!frames.empty()); RTC_DCHECK(!frames.empty());
EncodedFrame* first_frame = frames[0];
EncodedFrame* last_frame = frames.back();
size_t total_length = 0; size_t total_length = 0;
for (size_t i = 0; i < frames.size(); ++i) { for (const auto& frame : frames) {
total_length += frames[i]->size(); total_length += frame->size();
} }
const EncodedFrame& last_frame = *frames.back();
std::unique_ptr<EncodedFrame> first_frame = std::move(frames[0]);
auto encoded_image_buffer = EncodedImageBuffer::Create(total_length); auto encoded_image_buffer = EncodedImageBuffer::Create(total_length);
uint8_t* buffer = encoded_image_buffer->data(); uint8_t* buffer = encoded_image_buffer->data();
first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0), first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0),
@ -678,21 +678,21 @@ EncodedFrame* FrameBuffer::CombineAndDeleteFrames(
// Spatial index of combined frame is set equal to spatial index of its top // Spatial index of combined frame is set equal to spatial index of its top
// spatial layer. // spatial layer.
first_frame->SetSpatialIndex(last_frame->SpatialIndex().value_or(0)); first_frame->SetSpatialIndex(last_frame.SpatialIndex().value_or(0));
first_frame->video_timing_mutable()->network2_timestamp_ms = first_frame->video_timing_mutable()->network2_timestamp_ms =
last_frame->video_timing().network2_timestamp_ms; last_frame.video_timing().network2_timestamp_ms;
first_frame->video_timing_mutable()->receive_finish_ms = first_frame->video_timing_mutable()->receive_finish_ms =
last_frame->video_timing().receive_finish_ms; last_frame.video_timing().receive_finish_ms;
// Append all remaining frames to the first one. // Append all remaining frames to the first one.
for (size_t i = 1; i < frames.size(); ++i) { for (size_t i = 1; i < frames.size(); ++i) {
EncodedFrame* next_frame = frames[i]; // Let |next_frame| fall out of scope so it is deleted after copying.
std::unique_ptr<EncodedFrame> next_frame = std::move(frames[i]);
first_frame->SetSpatialLayerFrameSize( first_frame->SetSpatialLayerFrameSize(
next_frame->SpatialIndex().value_or(0), next_frame->size()); next_frame->SpatialIndex().value_or(0), next_frame->size());
memcpy(buffer, next_frame->data(), next_frame->size()); memcpy(buffer, next_frame->data(), next_frame->size());
buffer += next_frame->size(); buffer += next_frame->size();
delete next_frame;
} }
first_frame->SetEncodedData(encoded_image_buffer); first_frame->SetEncodedData(encoded_image_buffer);
return first_frame; return first_frame;

View File

@ -121,7 +121,8 @@ class FrameBuffer {
bool ValidReferences(const EncodedFrame& frame) const; bool ValidReferences(const EncodedFrame& frame) const;
int64_t FindNextFrame(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int64_t FindNextFrame(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
EncodedFrame* GetNextFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); std::unique_ptr<EncodedFrame> GetNextFrame()
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
@ -155,8 +156,8 @@ class FrameBuffer {
// vector of frames, but until the decoding pipeline can support decoding // vector of frames, but until the decoding pipeline can support decoding
// multiple frames at the same time we combine all frames to one frame and // multiple frames at the same time we combine all frames to one frame and
// return it. See bugs.webrtc.org/10064 // return it. See bugs.webrtc.org/10064
EncodedFrame* CombineAndDeleteFrames( std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
const std::vector<EncodedFrame*>& frames) const; std::vector<std::unique_ptr<EncodedFrame>> frames) const;
RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_; RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_;
RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_; RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_;