Revert of Add ability to scale to arbitrary factors (patchset #7 id:120001 of https://codereview.webrtc.org/2555483005/ )

Reason for revert:
Issue discovered with scaling back up.

Original issue's description:
> Add ability to scale to arbitrary factors
>
> This CL adds a fallback for the case when no optimized scale factor produces a low enough resolution for what was requested. It also ensures that all resolutions provided by the video adapter are divisible by four. This is required by some hardware implementations.
>
> BUG=webrtc:6837
>
> Committed: https://crrev.com/710c335d785b104bda4a912bd7909e4d27f9b04f
> Cr-Commit-Position: refs/heads/master@{#15469}

TBR=magjed@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:6837

Review-Url: https://codereview.webrtc.org/2557323002
Cr-Commit-Position: refs/heads/master@{#15470}
This commit is contained in:
kthelgason
2016-12-08 02:18:25 -08:00
committed by Commit bot
parent 710c335d78
commit 7722a4cc8d
5 changed files with 30 additions and 90 deletions

View File

@ -16,11 +16,6 @@ AdaptedVideoTrackSource::AdaptedVideoTrackSource() {
thread_checker_.DetachFromThread(); thread_checker_.DetachFromThread();
} }
AdaptedVideoTrackSource::AdaptedVideoTrackSource(int required_alignment)
: video_adapter_(required_alignment) {
thread_checker_.DetachFromThread();
}
bool AdaptedVideoTrackSource::GetStats(Stats* stats) { bool AdaptedVideoTrackSource::GetStats(Stats* stats) {
rtc::CritScope lock(&stats_crit_); rtc::CritScope lock(&stats_crit_);

View File

@ -28,9 +28,6 @@ class AdaptedVideoTrackSource
AdaptedVideoTrackSource(); AdaptedVideoTrackSource();
protected: protected:
// Allows derived classes to initialize |video_adapter_| with a custom
// alignment.
AdaptedVideoTrackSource(int required_alignment);
// Checks the apply_rotation() flag. If the frame needs rotation, and it is a // Checks the apply_rotation() flag. If the frame needs rotation, and it is a
// plain memory frame, it is rotated. Subclasses producing native frames must // plain memory frame, it is rotated. Subclasses producing native frames must
// handle apply_rotation() themselves. // handle apply_rotation() themselves.

View File

@ -11,18 +11,16 @@
#include "webrtc/media/base/videoadapter.h" #include "webrtc/media/base/videoadapter.h"
#include <algorithm> #include <algorithm>
#include <cmath>
#include <cstdlib> #include <cstdlib>
#include <limits> #include <limits>
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/logging.h" #include "webrtc/base/logging.h"
#include "webrtc/base/optional.h"
#include "webrtc/media/base/mediaconstants.h" #include "webrtc/media/base/mediaconstants.h"
#include "webrtc/media/base/videocommon.h" #include "webrtc/media/base/videocommon.h"
namespace { namespace {
struct Fraction { struct Fraction {
int numerator; int numerator;
int denominator; int denominator;
@ -39,42 +37,17 @@ const Fraction kScaleFractions[] = {
{3, 16}, {3, 16},
}; };
// Round |value_to_round| to a multiple of |multiple|. Prefer rounding upwards, // Round |valueToRound| to a multiple of |multiple|. Prefer rounding upwards,
// but never more than |max_value|. // but never more than |maxValue|.
int roundUp(int value_to_round, int multiple, int max_value) { int roundUp(int valueToRound, int multiple, int maxValue) {
const int rounded_value = const int roundedValue = (valueToRound + multiple - 1) / multiple * multiple;
(value_to_round + multiple - 1) / multiple * multiple; return roundedValue <= maxValue ? roundedValue
return rounded_value <= max_value ? rounded_value : (maxValue / multiple * multiple);
: (max_value / multiple * multiple);
} }
// Generates a scale factor that makes |input_num_pixels| smaller than
// |target_num_pixels|. This should only be used after making sure none
// of the optimized factors are small enough.
Fraction FindScaleLessThanOrEqual(int input_num_pixels, int target_num_pixels) { Fraction FindScaleLessThanOrEqual(int input_num_pixels, int target_num_pixels) {
// Start searching from the last of the optimal fractions;
Fraction best_scale = kScaleFractions[arraysize(kScaleFractions) - 1];
const float target_scale =
sqrt(target_num_pixels / static_cast<float>(input_num_pixels));
do {
if (best_scale.numerator % 3 == 0 && best_scale.denominator % 2 == 0) {
// Multiply by 2/3
best_scale.numerator /= 3;
best_scale.denominator /= 2;
} else {
// Multiply by 3/4
best_scale.numerator *= 3;
best_scale.denominator *= 4;
}
} while (best_scale.numerator > (target_scale * best_scale.denominator));
return best_scale;
}
rtc::Optional<Fraction> FindOptimizedScaleLessThanOrEqual(
int input_num_pixels,
int target_num_pixels) {
float best_distance = std::numeric_limits<float>::max(); float best_distance = std::numeric_limits<float>::max();
rtc::Optional<Fraction> best_scale; Fraction best_scale = {0, 1}; // Default to 0 if nothing matches.
for (const auto& fraction : kScaleFractions) { for (const auto& fraction : kScaleFractions) {
const float scale = const float scale =
fraction.numerator / static_cast<float>(fraction.denominator); fraction.numerator / static_cast<float>(fraction.denominator);
@ -85,7 +58,7 @@ rtc::Optional<Fraction> FindOptimizedScaleLessThanOrEqual(
} }
if (diff < best_distance) { if (diff < best_distance) {
best_distance = diff; best_distance = diff;
best_scale = rtc::Optional<Fraction>(fraction); best_scale = fraction;
if (best_distance == 0) { // Found exact match. if (best_distance == 0) { // Found exact match.
break; break;
} }
@ -94,9 +67,9 @@ rtc::Optional<Fraction> FindOptimizedScaleLessThanOrEqual(
return best_scale; return best_scale;
} }
Fraction FindOptimizedScaleLargerThan(int input_num_pixels, Fraction FindScaleLargerThan(int input_num_pixels,
int target_num_pixels, int target_num_pixels,
int* resulting_number_of_pixels) { int* resulting_number_of_pixels) {
float best_distance = std::numeric_limits<float>::max(); float best_distance = std::numeric_limits<float>::max();
Fraction best_scale = {1, 1}; // Default to unscaled if nothing matches. Fraction best_scale = {1, 1}; // Default to unscaled if nothing matches.
// Default to input number of pixels. // Default to input number of pixels.
@ -120,47 +93,35 @@ Fraction FindOptimizedScaleLargerThan(int input_num_pixels,
return best_scale; return best_scale;
} }
rtc::Optional<Fraction> FindOptimizedScale(int input_num_pixels,
int max_pixel_count_step_up,
int max_pixel_count) {
// Try scale just above |max_pixel_count_step_up_|.
if (max_pixel_count_step_up > 0) {
int resulting_pixel_count;
const Fraction scale = FindOptimizedScaleLargerThan(
input_num_pixels, max_pixel_count_step_up, &resulting_pixel_count);
if (resulting_pixel_count <= max_pixel_count)
return rtc::Optional<Fraction>(scale);
}
// Return largest scale below |max_pixel_count|.
return FindOptimizedScaleLessThanOrEqual(input_num_pixels, max_pixel_count);
}
Fraction FindScale(int input_num_pixels, Fraction FindScale(int input_num_pixels,
int max_pixel_count_step_up, int max_pixel_count_step_up,
int max_pixel_count) { int max_pixel_count) {
const rtc::Optional<Fraction> optimized_scale = FindOptimizedScale( // Try scale just above |max_pixel_count_step_up_|.
input_num_pixels, max_pixel_count_step_up, max_pixel_count); if (max_pixel_count_step_up > 0) {
if (optimized_scale) int resulting_pixel_count;
return *optimized_scale; const Fraction scale = FindScaleLargerThan(
input_num_pixels, max_pixel_count_step_up, &resulting_pixel_count);
if (resulting_pixel_count <= max_pixel_count)
return scale;
}
// Return largest scale below |max_pixel_count|.
return FindScaleLessThanOrEqual(input_num_pixels, max_pixel_count); return FindScaleLessThanOrEqual(input_num_pixels, max_pixel_count);
} }
} // namespace } // namespace
namespace cricket { namespace cricket {
VideoAdapter::VideoAdapter(int required_resolution_alignment) VideoAdapter::VideoAdapter()
: frames_in_(0), : frames_in_(0),
frames_out_(0), frames_out_(0),
frames_scaled_(0), frames_scaled_(0),
adaption_changes_(0), adaption_changes_(0),
previous_width_(0), previous_width_(0),
previous_height_(0), previous_height_(0),
required_resolution_alignment_(required_resolution_alignment),
resolution_request_max_pixel_count_(std::numeric_limits<int>::max()), resolution_request_max_pixel_count_(std::numeric_limits<int>::max()),
resolution_request_max_pixel_count_step_up_(0) {} resolution_request_max_pixel_count_step_up_(0) {}
VideoAdapter::VideoAdapter() : VideoAdapter(1) {}
VideoAdapter::~VideoAdapter() {} VideoAdapter::~VideoAdapter() {}
bool VideoAdapter::KeepFrame(int64_t in_timestamp_ns) { bool VideoAdapter::KeepFrame(int64_t in_timestamp_ns) {
@ -250,26 +211,22 @@ bool VideoAdapter::AdaptFrameResolution(int in_width,
*cropped_height = *cropped_height =
std::min(in_height, static_cast<int>(in_width / requested_aspect)); std::min(in_height, static_cast<int>(in_width / requested_aspect));
} }
// Find best scale factor.
const Fraction scale = const Fraction scale =
FindScale(*cropped_width * *cropped_height, FindScale(*cropped_width * *cropped_height,
resolution_request_max_pixel_count_step_up_, max_pixel_count); resolution_request_max_pixel_count_step_up_, max_pixel_count);
// Adjust cropping slightly to get even integer output size and a perfect // Adjust cropping slightly to get even integer output size and a perfect
// scale factor. Make sure the resulting dimensions are aligned correctly // scale factor.
// to be nice to hardware encoders. *cropped_width = roundUp(*cropped_width, scale.denominator, in_width);
*cropped_width = *cropped_height = roundUp(*cropped_height, scale.denominator, in_height);
roundUp(*cropped_width,
scale.denominator * required_resolution_alignment_, in_width);
*cropped_height =
roundUp(*cropped_height,
scale.denominator * required_resolution_alignment_, in_height);
RTC_DCHECK_EQ(0, *cropped_width % scale.denominator); RTC_DCHECK_EQ(0, *cropped_width % scale.denominator);
RTC_DCHECK_EQ(0, *cropped_height % scale.denominator); RTC_DCHECK_EQ(0, *cropped_height % scale.denominator);
// Calculate final output size. // Calculate final output size.
*out_width = *cropped_width / scale.denominator * scale.numerator; *out_width = *cropped_width / scale.denominator * scale.numerator;
*out_height = *cropped_height / scale.denominator * scale.numerator; *out_height = *cropped_height / scale.denominator * scale.numerator;
RTC_DCHECK_EQ(0, *out_height % required_resolution_alignment_);
RTC_DCHECK_EQ(0, *out_height % required_resolution_alignment_);
++frames_out_; ++frames_out_;
if (scale.numerator != scale.denominator) if (scale.numerator != scale.denominator)

View File

@ -25,7 +25,6 @@ namespace cricket {
class VideoAdapter { class VideoAdapter {
public: public:
VideoAdapter(); VideoAdapter();
VideoAdapter(int required_resolution_alignment);
virtual ~VideoAdapter(); virtual ~VideoAdapter();
// Return the adapted resolution and cropping parameters given the // Return the adapted resolution and cropping parameters given the
@ -64,8 +63,6 @@ class VideoAdapter {
int adaption_changes_; // Number of changes in scale factor. int adaption_changes_; // Number of changes in scale factor.
int previous_width_; // Previous adapter output width. int previous_width_; // Previous adapter output width.
int previous_height_; // Previous adapter output height. int previous_height_; // Previous adapter output height.
// Resolution must be divisible by this factor.
const int required_resolution_alignment_;
// The target timestamp for the next frame based on requested format. // The target timestamp for the next frame based on requested format.
rtc::Optional<int64_t> next_frame_timestamp_ns_ GUARDED_BY(critical_section_); rtc::Optional<int64_t> next_frame_timestamp_ns_ GUARDED_BY(critical_section_);

View File

@ -12,19 +12,13 @@
#include <utility> #include <utility>
namespace {
// MediaCodec wants resolution to be divisible by 2.
const int kRequiredResolutionAlignment = 2;
}
namespace webrtc { namespace webrtc {
AndroidVideoTrackSource::AndroidVideoTrackSource(rtc::Thread* signaling_thread, AndroidVideoTrackSource::AndroidVideoTrackSource(rtc::Thread* signaling_thread,
JNIEnv* jni, JNIEnv* jni,
jobject j_egl_context, jobject j_egl_context,
bool is_screencast) bool is_screencast)
: AdaptedVideoTrackSource(kRequiredResolutionAlignment), : signaling_thread_(signaling_thread),
signaling_thread_(signaling_thread),
surface_texture_helper_(webrtc_jni::SurfaceTextureHelper::create( surface_texture_helper_(webrtc_jni::SurfaceTextureHelper::create(
jni, jni,
"Camera SurfaceTextureHelper", "Camera SurfaceTextureHelper",