Add frame_helpers
A number of utility functions to be shared between frame buffer 2 and the new frame scheduling implementation based on frame buffer 3. Change-Id: Icc932c6c76fddeeedc8aa64ec27c7e0c955abfd0 Bug: webrtc:13343 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/241604 Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Philip Eliasson <philipel@webrtc.org> Commit-Queue: Evan Shrubsole <eshr@webrtc.org> Cr-Commit-Position: refs/heads/main@{#35743}
This commit is contained in:

committed by
WebRTC LUCI CQ

parent
8e8b966d54
commit
f83d4265b5
@ -148,6 +148,18 @@ rtc_library("h264_packet_buffer") {
|
|||||||
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
|
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rtc_library("frame_helpers") {
|
||||||
|
sources = [
|
||||||
|
"frame_helpers.cc",
|
||||||
|
"frame_helpers.h",
|
||||||
|
]
|
||||||
|
deps = [
|
||||||
|
"../../api/video:encoded_frame",
|
||||||
|
"../../rtc_base:logging",
|
||||||
|
]
|
||||||
|
absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
|
||||||
|
}
|
||||||
|
|
||||||
rtc_library("frame_buffer") {
|
rtc_library("frame_buffer") {
|
||||||
sources = [
|
sources = [
|
||||||
"frame_buffer3.cc",
|
"frame_buffer3.cc",
|
||||||
@ -227,6 +239,7 @@ rtc_library("video_coding") {
|
|||||||
":codec_globals_headers",
|
":codec_globals_headers",
|
||||||
":encoded_frame",
|
":encoded_frame",
|
||||||
":frame_buffer",
|
":frame_buffer",
|
||||||
|
":frame_helpers",
|
||||||
":packet_buffer",
|
":packet_buffer",
|
||||||
":video_codec_interface",
|
":video_codec_interface",
|
||||||
":video_coding_utility",
|
":video_coding_utility",
|
||||||
|
@ -18,8 +18,10 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/container/inlined_vector.h"
|
||||||
#include "api/video/encoded_image.h"
|
#include "api/video/encoded_image.h"
|
||||||
#include "api/video/video_timing.h"
|
#include "api/video/video_timing.h"
|
||||||
|
#include "modules/video_coding/frame_helpers.h"
|
||||||
#include "modules/video_coding/include/video_coding_defines.h"
|
#include "modules/video_coding/include/video_coding_defines.h"
|
||||||
#include "modules/video_coding/jitter_estimator.h"
|
#include "modules/video_coding/jitter_estimator.h"
|
||||||
#include "modules/video_coding/timing.h"
|
#include "modules/video_coding/timing.h"
|
||||||
@ -250,7 +252,8 @@ std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
|
|||||||
int64_t render_time_ms = first_frame.RenderTime();
|
int64_t render_time_ms = first_frame.RenderTime();
|
||||||
int64_t receive_time_ms = first_frame.ReceivedTime();
|
int64_t receive_time_ms = first_frame.ReceivedTime();
|
||||||
// Gracefully handle bad RTP timestamps and render time issues.
|
// Gracefully handle bad RTP timestamps and render time issues.
|
||||||
if (HasBadRenderTiming(first_frame, now_ms)) {
|
if (FrameHasBadRenderTiming(first_frame.RenderTimeMs(), now_ms,
|
||||||
|
timing_->TargetVideoDelay())) {
|
||||||
jitter_estimator_.Reset();
|
jitter_estimator_.Reset();
|
||||||
timing_->Reset();
|
timing_->Reset();
|
||||||
render_time_ms = timing_->RenderTimeMs(first_frame.Timestamp(), now_ms);
|
render_time_ms = timing_->RenderTimeMs(first_frame.Timestamp(), now_ms);
|
||||||
@ -318,35 +321,6 @@ std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FrameBuffer::HasBadRenderTiming(const EncodedFrame& frame,
|
|
||||||
int64_t now_ms) {
|
|
||||||
// Assume that render timing errors are due to changes in the video stream.
|
|
||||||
int64_t render_time_ms = frame.RenderTimeMs();
|
|
||||||
// Zero render time means render immediately.
|
|
||||||
if (render_time_ms == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (render_time_ms < 0) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
const int64_t kMaxVideoDelayMs = 10000;
|
|
||||||
if (std::abs(render_time_ms - now_ms) > kMaxVideoDelayMs) {
|
|
||||||
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
|
|
||||||
RTC_LOG(LS_WARNING)
|
|
||||||
<< "A frame about to be decoded is out of the configured "
|
|
||||||
"delay bounds ("
|
|
||||||
<< frame_delay << " > " << kMaxVideoDelayMs
|
|
||||||
<< "). Resetting the video jitter buffer.";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (static_cast<int>(timing_->TargetVideoDelay()) > kMaxVideoDelayMs) {
|
|
||||||
RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
|
|
||||||
<< kMaxVideoDelayMs << " ms.";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
|
void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
|
||||||
TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode");
|
TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode");
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
@ -660,39 +634,11 @@ void FrameBuffer::ClearFramesAndHistory() {
|
|||||||
std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
|
std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
|
||||||
std::vector<std::unique_ptr<EncodedFrame>> frames) const {
|
std::vector<std::unique_ptr<EncodedFrame>> frames) const {
|
||||||
RTC_DCHECK(!frames.empty());
|
RTC_DCHECK(!frames.empty());
|
||||||
size_t total_length = 0;
|
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> inlined;
|
||||||
for (const auto& frame : frames) {
|
for (auto& frame : frames) {
|
||||||
total_length += frame->size();
|
inlined.push_back(std::move(frame));
|
||||||
}
|
}
|
||||||
const EncodedFrame& last_frame = *frames.back();
|
return webrtc::CombineAndDeleteFrames(std::move(inlined));
|
||||||
std::unique_ptr<EncodedFrame> first_frame = std::move(frames[0]);
|
|
||||||
auto encoded_image_buffer = EncodedImageBuffer::Create(total_length);
|
|
||||||
uint8_t* buffer = encoded_image_buffer->data();
|
|
||||||
first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0),
|
|
||||||
first_frame->size());
|
|
||||||
memcpy(buffer, first_frame->data(), first_frame->size());
|
|
||||||
buffer += first_frame->size();
|
|
||||||
|
|
||||||
// Spatial index of combined frame is set equal to spatial index of its top
|
|
||||||
// spatial layer.
|
|
||||||
first_frame->SetSpatialIndex(last_frame.SpatialIndex().value_or(0));
|
|
||||||
|
|
||||||
first_frame->video_timing_mutable()->network2_timestamp_ms =
|
|
||||||
last_frame.video_timing().network2_timestamp_ms;
|
|
||||||
first_frame->video_timing_mutable()->receive_finish_ms =
|
|
||||||
last_frame.video_timing().receive_finish_ms;
|
|
||||||
|
|
||||||
// Append all remaining frames to the first one.
|
|
||||||
for (size_t i = 1; i < frames.size(); ++i) {
|
|
||||||
// Let |next_frame| fall out of scope so it is deleted after copying.
|
|
||||||
std::unique_ptr<EncodedFrame> next_frame = std::move(frames[i]);
|
|
||||||
first_frame->SetSpatialLayerFrameSize(
|
|
||||||
next_frame->SpatialIndex().value_or(0), next_frame->size());
|
|
||||||
memcpy(buffer, next_frame->data(), next_frame->size());
|
|
||||||
buffer += next_frame->size();
|
|
||||||
}
|
|
||||||
first_frame->SetEncodedData(encoded_image_buffer);
|
|
||||||
return first_frame;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FrameBuffer::FrameInfo::FrameInfo() = default;
|
FrameBuffer::FrameInfo::FrameInfo() = default;
|
||||||
|
@ -147,9 +147,6 @@ class FrameBuffer {
|
|||||||
|
|
||||||
void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
|
|
||||||
bool HasBadRenderTiming(const EncodedFrame& frame, int64_t now_ms)
|
|
||||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
|
||||||
|
|
||||||
// The cleaner solution would be to have the NextFrame function return a
|
// The cleaner solution would be to have the NextFrame function return a
|
||||||
// vector of frames, but until the decoding pipeline can support decoding
|
// vector of frames, but until the decoding pipeline can support decoding
|
||||||
// multiple frames at the same time we combine all frames to one frame and
|
// multiple frames at the same time we combine all frames to one frame and
|
||||||
|
90
modules/video_coding/frame_helpers.cc
Normal file
90
modules/video_coding/frame_helpers.cc
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "modules/video_coding/frame_helpers.h"
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "rtc_base/logging.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
bool FrameHasBadRenderTiming(int64_t render_time_ms,
|
||||||
|
int64_t now_ms,
|
||||||
|
int target_video_delay) {
|
||||||
|
// Zero render time means render immediately.
|
||||||
|
if (render_time_ms == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (render_time_ms < 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const int64_t kMaxVideoDelayMs = 10000;
|
||||||
|
if (std::abs(render_time_ms - now_ms) > kMaxVideoDelayMs) {
|
||||||
|
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
|
||||||
|
RTC_LOG(LS_WARNING)
|
||||||
|
<< "A frame about to be decoded is out of the configured "
|
||||||
|
"delay bounds ("
|
||||||
|
<< frame_delay << " > " << kMaxVideoDelayMs
|
||||||
|
<< "). Resetting the video jitter buffer.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (target_video_delay > kMaxVideoDelayMs) {
|
||||||
|
RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
|
||||||
|
<< kMaxVideoDelayMs << " ms.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
|
||||||
|
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames) {
|
||||||
|
RTC_DCHECK(!frames.empty());
|
||||||
|
|
||||||
|
if (frames.size() == 1) {
|
||||||
|
return std::move(frames[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t total_length = 0;
|
||||||
|
for (const auto& frame : frames) {
|
||||||
|
total_length += frame->size();
|
||||||
|
}
|
||||||
|
const EncodedFrame& last_frame = *frames.back();
|
||||||
|
std::unique_ptr<EncodedFrame> first_frame = std::move(frames[0]);
|
||||||
|
auto encoded_image_buffer = EncodedImageBuffer::Create(total_length);
|
||||||
|
uint8_t* buffer = encoded_image_buffer->data();
|
||||||
|
first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0),
|
||||||
|
first_frame->size());
|
||||||
|
memcpy(buffer, first_frame->data(), first_frame->size());
|
||||||
|
buffer += first_frame->size();
|
||||||
|
|
||||||
|
// Spatial index of combined frame is set equal to spatial index of its top
|
||||||
|
// spatial layer.
|
||||||
|
first_frame->SetSpatialIndex(last_frame.SpatialIndex().value_or(0));
|
||||||
|
|
||||||
|
first_frame->video_timing_mutable()->network2_timestamp_ms =
|
||||||
|
last_frame.video_timing().network2_timestamp_ms;
|
||||||
|
first_frame->video_timing_mutable()->receive_finish_ms =
|
||||||
|
last_frame.video_timing().receive_finish_ms;
|
||||||
|
|
||||||
|
// Append all remaining frames to the first one.
|
||||||
|
for (size_t i = 1; i < frames.size(); ++i) {
|
||||||
|
// Let |next_frame| fall out of scope so it is deleted after copying.
|
||||||
|
std::unique_ptr<EncodedFrame> next_frame = std::move(frames[i]);
|
||||||
|
first_frame->SetSpatialLayerFrameSize(
|
||||||
|
next_frame->SpatialIndex().value_or(0), next_frame->size());
|
||||||
|
memcpy(buffer, next_frame->data(), next_frame->size());
|
||||||
|
buffer += next_frame->size();
|
||||||
|
}
|
||||||
|
first_frame->SetEncodedData(encoded_image_buffer);
|
||||||
|
return first_frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace webrtc
|
31
modules/video_coding/frame_helpers.h
Normal file
31
modules/video_coding/frame_helpers.h
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_VIDEO_CODING_FRAME_HELPERS_H_
|
||||||
|
#define MODULES_VIDEO_CODING_FRAME_HELPERS_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/container/inlined_vector.h"
|
||||||
|
#include "api/video/encoded_frame.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
// TODO(https://bugs.webrtc.org/13589): Switch to using Timestamp and TimeDelta.
|
||||||
|
bool FrameHasBadRenderTiming(int64_t render_time_ms,
|
||||||
|
int64_t now_ms,
|
||||||
|
int target_video_delay);
|
||||||
|
|
||||||
|
std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
|
||||||
|
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames);
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_VIDEO_CODING_FRAME_HELPERS_H_
|
Reference in New Issue
Block a user