RtpEncodingParameters::request_resolution patch 4
This patch 1) modifies VideoAdapter to use requested_resolution instead on OnOutputFormatRequest, iff there are no active encoders that is not using requested_resolution (i.e all "old" encoder(s) are not active). 2) modifies VideoBroadcaster to not broadcast wants from encoders that are not active (iff there is an active encoder using requested_resolution). 3) fixes a bug in encoder_stream_factor in that the requested_resolution was not propagated to return value (must have been lost in merge?). Bug: webrtc:14451 Change-Id: I00e0907f0fe9329141ed169576fa46cdc5384886 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/278360 Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Commit-Queue: Jonas Oreland <jonaso@webrtc.org> Cr-Commit-Position: refs/heads/main@{#38323}
This commit is contained in:

committed by
WebRTC LUCI CQ

parent
9b73159888
commit
43f0f29d30
@ -85,7 +85,7 @@ struct RTC_EXPORT VideoSinkWants {
|
||||
absl::optional<FrameSize> requested_resolution;
|
||||
|
||||
// `active` : is (any) of the layers/sink(s) active.
|
||||
bool is_active = false;
|
||||
bool is_active = true;
|
||||
|
||||
// This sub-struct contains information computed by VideoBroadcaster
|
||||
// that aggregates several VideoSinkWants (and sends them to
|
||||
|
@ -605,6 +605,7 @@ if (rtc_include_tests) {
|
||||
"../api/units:time_delta",
|
||||
"../api/units:timestamp",
|
||||
"../api/video:builtin_video_bitrate_allocator_factory",
|
||||
"../api/video:resolution",
|
||||
"../api/video:video_bitrate_allocation",
|
||||
"../api/video:video_codec_constants",
|
||||
"../api/video:video_frame",
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "media/base/video_common.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
|
||||
@ -121,6 +122,15 @@ Fraction FindScale(int input_width,
|
||||
|
||||
return best_scale;
|
||||
}
|
||||
|
||||
absl::optional<std::pair<int, int>> Swap(
|
||||
const absl::optional<std::pair<int, int>>& in) {
|
||||
if (!in) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
return std::make_pair(in->second, in->first);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace cricket {
|
||||
@ -146,8 +156,8 @@ VideoAdapter::~VideoAdapter() {}
|
||||
|
||||
bool VideoAdapter::DropFrame(int64_t in_timestamp_ns) {
|
||||
int max_fps = max_framerate_request_;
|
||||
if (max_fps_)
|
||||
max_fps = std::min(max_fps, *max_fps_);
|
||||
if (output_format_request_.max_fps)
|
||||
max_fps = std::min(max_fps, *output_format_request_.max_fps);
|
||||
|
||||
framerate_controller_.SetMaxFramerate(max_fps);
|
||||
return framerate_controller_.ShouldDropFrame(in_timestamp_ns);
|
||||
@ -171,13 +181,15 @@ bool VideoAdapter::AdaptFrameResolution(int in_width,
|
||||
// orientation.
|
||||
absl::optional<std::pair<int, int>> target_aspect_ratio;
|
||||
if (in_width > in_height) {
|
||||
target_aspect_ratio = target_landscape_aspect_ratio_;
|
||||
if (max_landscape_pixel_count_)
|
||||
max_pixel_count = std::min(max_pixel_count, *max_landscape_pixel_count_);
|
||||
target_aspect_ratio = output_format_request_.target_landscape_aspect_ratio;
|
||||
if (output_format_request_.max_landscape_pixel_count)
|
||||
max_pixel_count = std::min(
|
||||
max_pixel_count, *output_format_request_.max_landscape_pixel_count);
|
||||
} else {
|
||||
target_aspect_ratio = target_portrait_aspect_ratio_;
|
||||
if (max_portrait_pixel_count_)
|
||||
max_pixel_count = std::min(max_pixel_count, *max_portrait_pixel_count_);
|
||||
target_aspect_ratio = output_format_request_.target_portrait_aspect_ratio;
|
||||
if (output_format_request_.max_portrait_pixel_count)
|
||||
max_pixel_count = std::min(
|
||||
max_pixel_count, *output_format_request_.max_portrait_pixel_count);
|
||||
}
|
||||
|
||||
int target_pixel_count =
|
||||
@ -195,7 +207,7 @@ bool VideoAdapter::AdaptFrameResolution(int in_width,
|
||||
<< " Input: " << in_width << "x" << in_height
|
||||
<< " timestamp: " << in_timestamp_ns
|
||||
<< " Output fps: " << max_framerate_request_ << "/"
|
||||
<< max_fps_.value_or(-1)
|
||||
<< output_format_request_.max_fps.value_or(-1)
|
||||
<< " alignment: " << resolution_alignment_;
|
||||
}
|
||||
|
||||
@ -249,7 +261,7 @@ bool VideoAdapter::AdaptFrameResolution(int in_width,
|
||||
<< " Scale: " << scale.numerator << "/"
|
||||
<< scale.denominator << " Output: " << *out_width << "x"
|
||||
<< *out_height << " fps: " << max_framerate_request_ << "/"
|
||||
<< max_fps_.value_or(-1)
|
||||
<< output_format_request_.max_fps.value_or(-1)
|
||||
<< " alignment: " << resolution_alignment_;
|
||||
}
|
||||
|
||||
@ -300,11 +312,27 @@ void VideoAdapter::OnOutputFormatRequest(
|
||||
const absl::optional<int>& max_portrait_pixel_count,
|
||||
const absl::optional<int>& max_fps) {
|
||||
webrtc::MutexLock lock(&mutex_);
|
||||
target_landscape_aspect_ratio_ = target_landscape_aspect_ratio;
|
||||
max_landscape_pixel_count_ = max_landscape_pixel_count;
|
||||
target_portrait_aspect_ratio_ = target_portrait_aspect_ratio;
|
||||
max_portrait_pixel_count_ = max_portrait_pixel_count;
|
||||
max_fps_ = max_fps;
|
||||
|
||||
OutputFormatRequest request = {
|
||||
.target_landscape_aspect_ratio = target_landscape_aspect_ratio,
|
||||
.max_landscape_pixel_count = max_landscape_pixel_count,
|
||||
.target_portrait_aspect_ratio = target_portrait_aspect_ratio,
|
||||
.max_portrait_pixel_count = max_portrait_pixel_count,
|
||||
.max_fps = max_fps};
|
||||
|
||||
if (stashed_output_format_request_) {
|
||||
// Save the output format request for later use in case the encoder making
|
||||
// this call would become active, because currently all active encoders use
|
||||
// requested_resolution instead.
|
||||
stashed_output_format_request_ = request;
|
||||
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
|
||||
<< stashed_output_format_request_->ToString();
|
||||
} else {
|
||||
output_format_request_ = request;
|
||||
RTC_LOG(LS_INFO) << "Setting output_format_request_: "
|
||||
<< output_format_request_.ToString();
|
||||
}
|
||||
|
||||
framerate_controller_.Reset();
|
||||
}
|
||||
|
||||
@ -317,6 +345,60 @@ void VideoAdapter::OnSinkWants(const rtc::VideoSinkWants& sink_wants) {
|
||||
max_framerate_request_ = sink_wants.max_framerate_fps;
|
||||
resolution_alignment_ = cricket::LeastCommonMultiple(
|
||||
source_resolution_alignment_, sink_wants.resolution_alignment);
|
||||
|
||||
if (!sink_wants.aggregates) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "These should always be created by VideoBroadcaster!";
|
||||
return;
|
||||
}
|
||||
|
||||
// If requested_resolution is used, and there are no active encoders
|
||||
// that are NOT using requested_resolution (aka newapi), then override
|
||||
// calls to OnOutputFormatRequest and use values from requested_resolution
|
||||
// instead (combined with qualityscaling based on pixel counts above).
|
||||
if (webrtc::field_trial::IsDisabled(
|
||||
"WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest")) {
|
||||
// kill-switch...
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sink_wants.requested_resolution) {
|
||||
if (stashed_output_format_request_) {
|
||||
// because current active_output_format_request is based on
|
||||
// requested_resolution logic, while current encoder(s) doesn't want that,
|
||||
// we have to restore the stashed request.
|
||||
RTC_LOG(LS_INFO) << "Unstashing OnOutputFormatRequest: "
|
||||
<< stashed_output_format_request_->ToString();
|
||||
output_format_request_ = *stashed_output_format_request_;
|
||||
stashed_output_format_request_.reset();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (sink_wants.aggregates->any_active_without_requested_resolution) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!stashed_output_format_request_) {
|
||||
// The active output format request is about to be rewritten by
|
||||
// request_resolution. We need to save it for later use in case the encoder
|
||||
// which doesn't use request_resolution logic become active in the future.
|
||||
stashed_output_format_request_ = output_format_request_;
|
||||
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
|
||||
<< stashed_output_format_request_->ToString();
|
||||
}
|
||||
|
||||
auto res = *sink_wants.requested_resolution;
|
||||
auto pixel_count = res.width * res.height;
|
||||
output_format_request_.target_landscape_aspect_ratio =
|
||||
std::make_pair(res.width, res.height);
|
||||
output_format_request_.max_landscape_pixel_count = pixel_count;
|
||||
output_format_request_.target_portrait_aspect_ratio =
|
||||
std::make_pair(res.height, res.width);
|
||||
output_format_request_.max_portrait_pixel_count = pixel_count;
|
||||
output_format_request_.max_fps = max_framerate_request_;
|
||||
RTC_LOG(LS_INFO) << "Setting output_format_request_ based on sink_wants: "
|
||||
<< output_format_request_.ToString();
|
||||
}
|
||||
|
||||
int VideoAdapter::GetTargetPixels() const {
|
||||
@ -326,10 +408,11 @@ int VideoAdapter::GetTargetPixels() const {
|
||||
|
||||
float VideoAdapter::GetMaxFramerate() const {
|
||||
webrtc::MutexLock lock(&mutex_);
|
||||
// Minimum of `max_fps_` and `max_framerate_request_` is used to throttle
|
||||
// frame-rate.
|
||||
int framerate = std::min(max_framerate_request_,
|
||||
max_fps_.value_or(max_framerate_request_));
|
||||
// Minimum of `output_format_request_.max_fps` and `max_framerate_request_` is
|
||||
// used to throttle frame-rate.
|
||||
int framerate =
|
||||
std::min(max_framerate_request_,
|
||||
output_format_request_.max_fps.value_or(max_framerate_request_));
|
||||
if (framerate == std::numeric_limits<int>::max()) {
|
||||
return std::numeric_limits<float>::infinity();
|
||||
} else {
|
||||
@ -337,4 +420,49 @@ float VideoAdapter::GetMaxFramerate() const {
|
||||
}
|
||||
}
|
||||
|
||||
std::string VideoAdapter::OutputFormatRequest::ToString() const {
|
||||
rtc::StringBuilder oss;
|
||||
oss << "[ ";
|
||||
if (target_landscape_aspect_ratio == Swap(target_portrait_aspect_ratio) &&
|
||||
max_landscape_pixel_count == max_portrait_pixel_count) {
|
||||
if (target_landscape_aspect_ratio) {
|
||||
oss << target_landscape_aspect_ratio->first << "x"
|
||||
<< target_landscape_aspect_ratio->second;
|
||||
} else {
|
||||
oss << "unset-resolution";
|
||||
}
|
||||
if (max_landscape_pixel_count) {
|
||||
oss << " max_pixel_count: " << *max_landscape_pixel_count;
|
||||
}
|
||||
} else {
|
||||
oss << "[ landscape: ";
|
||||
if (target_landscape_aspect_ratio) {
|
||||
oss << target_landscape_aspect_ratio->first << "x"
|
||||
<< target_landscape_aspect_ratio->second;
|
||||
} else {
|
||||
oss << "unset";
|
||||
}
|
||||
if (max_landscape_pixel_count) {
|
||||
oss << " max_pixel_count: " << *max_landscape_pixel_count;
|
||||
}
|
||||
oss << " ] [ portrait: ";
|
||||
if (target_portrait_aspect_ratio) {
|
||||
oss << target_portrait_aspect_ratio->first << "x"
|
||||
<< target_portrait_aspect_ratio->second;
|
||||
}
|
||||
if (max_portrait_pixel_count) {
|
||||
oss << " max_pixel_count: " << *max_portrait_pixel_count;
|
||||
}
|
||||
oss << " ]";
|
||||
}
|
||||
oss << " max_fps: ";
|
||||
if (max_fps) {
|
||||
oss << *max_fps;
|
||||
} else {
|
||||
oss << "unset";
|
||||
}
|
||||
oss << " ]";
|
||||
return oss.Release();
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
@ -133,17 +134,33 @@ class RTC_EXPORT VideoAdapter {
|
||||
// Max number of pixels/fps requested via calls to OnOutputFormatRequest,
|
||||
// OnResolutionFramerateRequest respectively.
|
||||
// The adapted output format is the minimum of these.
|
||||
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
absl::optional<int> max_landscape_pixel_count_ RTC_GUARDED_BY(mutex_);
|
||||
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
absl::optional<int> max_portrait_pixel_count_ RTC_GUARDED_BY(mutex_);
|
||||
absl::optional<int> max_fps_ RTC_GUARDED_BY(mutex_);
|
||||
struct OutputFormatRequest {
|
||||
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
|
||||
absl::optional<int> max_landscape_pixel_count;
|
||||
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
|
||||
absl::optional<int> max_portrait_pixel_count;
|
||||
absl::optional<int> max_fps;
|
||||
|
||||
// For logging.
|
||||
std::string ToString() const;
|
||||
};
|
||||
|
||||
OutputFormatRequest output_format_request_ RTC_GUARDED_BY(mutex_);
|
||||
int resolution_request_target_pixel_count_ RTC_GUARDED_BY(mutex_);
|
||||
int resolution_request_max_pixel_count_ RTC_GUARDED_BY(mutex_);
|
||||
int max_framerate_request_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
// Stashed OutputFormatRequest that is used to save value of
|
||||
// OnOutputFormatRequest in case all active encoders are using
|
||||
// requested_resolution. I.e when all active encoders are using
|
||||
// requested_resolution, the call to OnOutputFormatRequest is ignored
|
||||
// and the value from requested_resolution is used instead (to scale/crop
|
||||
// frame). This allows for an application to only use
|
||||
// RtpEncodingParameters::request_resolution and get the same behavior as if
|
||||
// it had used VideoAdapter::OnOutputFormatRequest.
|
||||
absl::optional<OutputFormatRequest> stashed_output_format_request_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
|
||||
webrtc::FramerateController framerate_controller_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
// The critical section to protect the above variables.
|
||||
|
@ -15,12 +15,14 @@
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "api/video/resolution.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "media/base/fake_frame_source.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "test/field_trial.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace cricket {
|
||||
@ -29,6 +31,11 @@ const int kWidth = 1280;
|
||||
const int kHeight = 720;
|
||||
const int kDefaultFps = 30;
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::Eq;
|
||||
using ::testing::Pair;
|
||||
using webrtc::Resolution;
|
||||
|
||||
rtc::VideoSinkWants BuildSinkWants(absl::optional<int> target_pixel_count,
|
||||
int max_pixel_count,
|
||||
int max_framerate_fps,
|
||||
@ -38,6 +45,31 @@ rtc::VideoSinkWants BuildSinkWants(absl::optional<int> target_pixel_count,
|
||||
wants.max_pixel_count = max_pixel_count;
|
||||
wants.max_framerate_fps = max_framerate_fps;
|
||||
wants.resolution_alignment = sink_alignment;
|
||||
wants.is_active = true;
|
||||
wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
|
||||
wants.aggregates->any_active_without_requested_resolution = false;
|
||||
return wants;
|
||||
}
|
||||
|
||||
rtc::VideoSinkWants BuildSinkWants(
|
||||
absl::optional<webrtc::Resolution> requested_resolution,
|
||||
bool any_active_without_requested_resolution) {
|
||||
rtc::VideoSinkWants wants;
|
||||
wants.max_framerate_fps = kDefaultFps;
|
||||
wants.resolution_alignment = 1;
|
||||
wants.is_active = true;
|
||||
if (requested_resolution) {
|
||||
wants.target_pixel_count = requested_resolution->PixelCount();
|
||||
wants.max_pixel_count = requested_resolution->PixelCount();
|
||||
wants.requested_resolution.emplace(rtc::VideoSinkWants::FrameSize(
|
||||
requested_resolution->width, requested_resolution->height));
|
||||
} else {
|
||||
wants.target_pixel_count = kWidth * kHeight;
|
||||
wants.max_pixel_count = kWidth * kHeight;
|
||||
}
|
||||
wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
|
||||
wants.aggregates->any_active_without_requested_resolution =
|
||||
any_active_without_requested_resolution;
|
||||
return wants;
|
||||
}
|
||||
|
||||
@ -136,9 +168,22 @@ class VideoAdapterTest : public ::testing::Test,
|
||||
cricket::FOURCC_I420));
|
||||
}
|
||||
|
||||
// Return pair of <out resolution, cropping>
|
||||
std::pair<webrtc::Resolution, webrtc::Resolution> AdaptFrameResolution(
|
||||
webrtc::Resolution res) {
|
||||
webrtc::Resolution out;
|
||||
webrtc::Resolution cropped;
|
||||
timestamp_ns_ += 1000000000;
|
||||
EXPECT_TRUE(adapter_.AdaptFrameResolution(
|
||||
res.width, res.height, timestamp_ns_, &cropped.width, &cropped.height,
|
||||
&out.width, &out.height));
|
||||
return std::make_pair(out, cropped);
|
||||
}
|
||||
|
||||
webrtc::test::ScopedFieldTrials override_field_trials_;
|
||||
const std::unique_ptr<FakeFrameSource> frame_source_;
|
||||
VideoAdapter adapter_;
|
||||
int64_t timestamp_ns_ = 0;
|
||||
int cropped_width_;
|
||||
int cropped_height_;
|
||||
int out_width_;
|
||||
@ -1152,6 +1197,81 @@ TEST_P(VideoAdapterTest, AdaptResolutionWithSinkAlignment) {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the cases the OnOutputFormatRequest is ignored and
|
||||
// requested_resolution is used instead.
|
||||
TEST_P(VideoAdapterTest, UseRequestedResolutionInsteadOfOnOutputFormatRequest) {
|
||||
{
|
||||
// Both new and old API active => Use OnOutputFormatRequest
|
||||
OnOutputFormatRequest(640, 360, kDefaultFps);
|
||||
adapter_.OnSinkWants(
|
||||
BuildSinkWants(Resolution{.width = 960, .height = 540},
|
||||
/* any_active_without_requested_resolution= */ true));
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 640, .height = 360}));
|
||||
}
|
||||
{
|
||||
// New API active, old API inactive, ignore OnOutputFormatRequest and use
|
||||
// requested_resolution.
|
||||
OnOutputFormatRequest(640, 360, kDefaultFps);
|
||||
adapter_.OnSinkWants(
|
||||
BuildSinkWants(Resolution{.width = 960, .height = 540},
|
||||
/* any_active_without_requested_resolution= */ false));
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 960, .height = 540}));
|
||||
}
|
||||
|
||||
{
|
||||
// New API inactive, old API inactive, use OnOutputFormatRequest.
|
||||
OnOutputFormatRequest(640, 360, kDefaultFps);
|
||||
adapter_.OnSinkWants(
|
||||
BuildSinkWants(absl::nullopt,
|
||||
/* any_active_without_requested_resolution= */ false));
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 640, .height = 360}));
|
||||
}
|
||||
|
||||
{
|
||||
// New API active, old API inactive, remember OnOutputFormatRequest.
|
||||
OnOutputFormatRequest(640, 360, kDefaultFps);
|
||||
adapter_.OnSinkWants(
|
||||
BuildSinkWants(Resolution{.width = 960, .height = 540},
|
||||
/* any_active_without_requested_resolution= */ false));
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 960, .height = 540}));
|
||||
|
||||
// This is ignored since there is not any active NOT using
|
||||
// requested_resolution.
|
||||
OnOutputFormatRequest(320, 180, kDefaultFps);
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 960, .height = 540}));
|
||||
|
||||
// Disable new API => fallback to last OnOutputFormatRequest.
|
||||
adapter_.OnSinkWants(
|
||||
BuildSinkWants(absl::nullopt,
|
||||
/* any_active_without_requested_resolution= */ false));
|
||||
|
||||
EXPECT_THAT(
|
||||
AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
|
||||
.first,
|
||||
Eq(Resolution{.width = 320, .height = 180}));
|
||||
}
|
||||
}
|
||||
|
||||
class VideoAdapterWithSourceAlignmentTest : public VideoAdapterTest {
|
||||
protected:
|
||||
static constexpr int kSourceResolutionAlignment = 7;
|
||||
|
@ -125,7 +125,28 @@ void VideoBroadcaster::UpdateWants() {
|
||||
wants.rotation_applied = false;
|
||||
wants.resolution_alignment = 1;
|
||||
wants.aggregates.emplace(VideoSinkWants::Aggregates());
|
||||
wants.is_active = false;
|
||||
|
||||
// TODO(webrtc:14451) : I think it makes sense to always
|
||||
// "ignore" encoders that are not active. But that would
|
||||
// probably require a controlled roll out with a field trials?
|
||||
// To play it safe, only ignore inactive encoders is there is an
|
||||
// active encoder using the new api (requested_resolution),
|
||||
// this means that there is only a behavioural change when using new
|
||||
// api.
|
||||
bool ignore_inactive_encoders_old_api = false;
|
||||
for (auto& sink : sink_pairs()) {
|
||||
if (sink.wants.is_active && sink.wants.requested_resolution.has_value()) {
|
||||
ignore_inactive_encoders_old_api = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& sink : sink_pairs()) {
|
||||
if (!sink.wants.is_active &&
|
||||
(sink.wants.requested_resolution || ignore_inactive_encoders_old_api)) {
|
||||
continue;
|
||||
}
|
||||
// wants.rotation_applied == ANY(sink.wants.rotation_applied)
|
||||
if (sink.wants.rotation_applied) {
|
||||
wants.rotation_applied = true;
|
||||
|
@ -398,3 +398,39 @@ TEST(VideoBroadcasterTest, AnyActiveWithoutRequestedResolution) {
|
||||
false,
|
||||
broadcaster.wants().aggregates->any_active_without_requested_resolution);
|
||||
}
|
||||
|
||||
// This verifies that the VideoSinkWants from a Sink that is_active = false
|
||||
// is ignored IF there is an active sink using new api (Requested_Resolution).
|
||||
// The uses resolution_alignment for verification.
|
||||
TEST(VideoBroadcasterTest, IgnoreInactiveSinkIfNewApiUsed) {
|
||||
VideoBroadcaster broadcaster;
|
||||
|
||||
FakeVideoRenderer sink1;
|
||||
VideoSinkWants wants1;
|
||||
wants1.is_active = true;
|
||||
wants1.requested_resolution = FrameSize(640, 360);
|
||||
wants1.resolution_alignment = 2;
|
||||
broadcaster.AddOrUpdateSink(&sink1, wants1);
|
||||
EXPECT_EQ(broadcaster.wants().resolution_alignment, 2);
|
||||
|
||||
FakeVideoRenderer sink2;
|
||||
VideoSinkWants wants2;
|
||||
wants2.is_active = true;
|
||||
wants2.resolution_alignment = 8;
|
||||
broadcaster.AddOrUpdateSink(&sink2, wants2);
|
||||
EXPECT_EQ(broadcaster.wants().resolution_alignment, 8);
|
||||
|
||||
// Now wants2 will be ignored.
|
||||
wants2.is_active = false;
|
||||
broadcaster.AddOrUpdateSink(&sink2, wants2);
|
||||
EXPECT_EQ(broadcaster.wants().resolution_alignment, 2);
|
||||
|
||||
// But when wants1 is inactive, wants2 matters again.
|
||||
wants1.is_active = false;
|
||||
broadcaster.AddOrUpdateSink(&sink1, wants1);
|
||||
EXPECT_EQ(broadcaster.wants().resolution_alignment, 8);
|
||||
|
||||
// inactive wants1 (new api) is always ignored.
|
||||
broadcaster.RemoveSink(&sink2);
|
||||
EXPECT_EQ(broadcaster.wants().resolution_alignment, 1);
|
||||
}
|
||||
|
@ -184,6 +184,8 @@ EncoderStreamFactory::CreateDefaultVideoStreams(
|
||||
layer.width = width;
|
||||
layer.height = height;
|
||||
layer.max_framerate = max_framerate;
|
||||
layer.requested_resolution =
|
||||
encoder_config.simulcast_layers[0].requested_resolution;
|
||||
// Note: VP9 seems to have be sending if any layer is active,
|
||||
// (see `UpdateSendState`) and still use parameters only from
|
||||
// encoder_config.simulcast_layers[0].
|
||||
@ -321,6 +323,8 @@ EncoderStreamFactory::CreateSimulcastOrConferenceModeScreenshareStreams(
|
||||
layers[i].active = encoder_config.simulcast_layers[i].active;
|
||||
layers[i].scalability_mode =
|
||||
encoder_config.simulcast_layers[i].scalability_mode;
|
||||
layers[i].requested_resolution =
|
||||
encoder_config.simulcast_layers[i].requested_resolution;
|
||||
// Update with configured num temporal layers if supported by codec.
|
||||
if (encoder_config.simulcast_layers[i].num_temporal_layers &&
|
||||
IsTemporalLayersSupported(codec_name_)) {
|
||||
@ -444,6 +448,7 @@ EncoderStreamFactory::GetLayerResolutionFromRequestedResolution(
|
||||
wants.max_pixel_count =
|
||||
rtc::dchecked_cast<int>(restrictions_->max_pixels_per_frame().value_or(
|
||||
std::numeric_limits<int>::max()));
|
||||
wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
|
||||
wants.resolution_alignment = encoder_info_requested_resolution_alignment_;
|
||||
adapter.OnSinkWants(wants);
|
||||
}
|
||||
|
@ -51,6 +51,8 @@ TEST(EncoderStreamFactory, SinglecastRequestedResolution) {
|
||||
encoder_config.simulcast_layers.push_back(
|
||||
LayerWithRequestedResolution({.width = 640, .height = 360}));
|
||||
auto streams = factory->CreateEncoderStreams(1280, 720, encoder_config);
|
||||
EXPECT_EQ(streams[0].requested_resolution,
|
||||
(Resolution{.width = 640, .height = 360}));
|
||||
EXPECT_EQ(GetStreamResolutions(streams), (std::vector<Resolution>{
|
||||
{.width = 640, .height = 360},
|
||||
}));
|
||||
@ -71,6 +73,8 @@ TEST(EncoderStreamFactory, SinglecastRequestedResolutionWithAdaptation) {
|
||||
encoder_config.simulcast_layers.push_back(
|
||||
LayerWithRequestedResolution({.width = 640, .height = 360}));
|
||||
auto streams = factory->CreateEncoderStreams(1280, 720, encoder_config);
|
||||
EXPECT_EQ(streams[0].requested_resolution,
|
||||
(Resolution{.width = 640, .height = 360}));
|
||||
EXPECT_EQ(GetStreamResolutions(streams), (std::vector<Resolution>{
|
||||
{.width = 320, .height = 180},
|
||||
}));
|
||||
|
Reference in New Issue
Block a user