Refactor scaling.
Introduce a new method I420Buffer::CropAndScale, and a static convenience helper I420Buffer::CenterCropAndScale. Use them for almost all scaling needs. Delete the Scaler class and the cricket::VideoFrame::Stretch* methods. BUG=webrtc:5682 R=pbos@webrtc.org, perkj@webrtc.org, stefan@webrtc.org Review URL: https://codereview.webrtc.org/2020593002 . Cr-Commit-Position: refs/heads/master@{#13110}
This commit is contained in:
@ -145,15 +145,16 @@ struct FrameEncodeParams {
|
||||
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
|
||||
// encoder. This performs the copy and format conversion.
|
||||
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
|
||||
bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
|
||||
CVPixelBufferRef pixel_buffer) {
|
||||
bool CopyVideoFrameToPixelBuffer(
|
||||
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& frame,
|
||||
CVPixelBufferRef pixel_buffer) {
|
||||
RTC_DCHECK(pixel_buffer);
|
||||
RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
|
||||
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
|
||||
RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
|
||||
static_cast<size_t>(frame.height()));
|
||||
static_cast<size_t>(frame->height()));
|
||||
RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
|
||||
static_cast<size_t>(frame.width()));
|
||||
static_cast<size_t>(frame->width()));
|
||||
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
@ -168,14 +169,11 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
|
||||
int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
|
||||
// Convert I420 to NV12.
|
||||
int ret = libyuv::I420ToNV12(
|
||||
frame.video_frame_buffer()->DataY(),
|
||||
frame.video_frame_buffer()->StrideY(),
|
||||
frame.video_frame_buffer()->DataU(),
|
||||
frame.video_frame_buffer()->StrideU(),
|
||||
frame.video_frame_buffer()->DataV(),
|
||||
frame.video_frame_buffer()->StrideV(),
|
||||
frame->DataY(), frame->StrideY(),
|
||||
frame->DataU(), frame->StrideU(),
|
||||
frame->DataV(), frame->StrideV(),
|
||||
dst_y, dst_stride_y, dst_uv, dst_stride_uv,
|
||||
frame.width(), frame.height());
|
||||
frame->width(), frame->height());
|
||||
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
|
||||
if (ret) {
|
||||
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
@ -247,11 +245,12 @@ int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
|
||||
return ResetCompressionSession();
|
||||
}
|
||||
|
||||
const VideoFrame& H264VideoToolboxEncoder::GetScaledFrameOnEncode(
|
||||
const VideoFrame& frame) {
|
||||
rtc::scoped_refptr<VideoFrameBuffer>
|
||||
H264VideoToolboxEncoder::GetScaledBufferOnEncode(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& frame) {
|
||||
rtc::CritScope lock(&quality_scaler_crit_);
|
||||
quality_scaler_.OnEncodeFrame(frame);
|
||||
return quality_scaler_.GetScaledFrame(frame);
|
||||
quality_scaler_.OnEncodeFrame(frame->width(), frame->height());
|
||||
return quality_scaler_.GetScaledBuffer(frame);
|
||||
}
|
||||
|
||||
int H264VideoToolboxEncoder::Encode(
|
||||
@ -270,11 +269,12 @@ int H264VideoToolboxEncoder::Encode(
|
||||
}
|
||||
#endif
|
||||
bool is_keyframe_required = false;
|
||||
const VideoFrame& input_image = GetScaledFrameOnEncode(frame);
|
||||
rtc::scoped_refptr<VideoFrameBuffer> input_image(
|
||||
GetScaledBufferOnEncode(frame.video_frame_buffer()));
|
||||
|
||||
if (input_image.width() != width_ || input_image.height() != height_) {
|
||||
width_ = input_image.width();
|
||||
height_ = input_image.height();
|
||||
if (input_image->width() != width_ || input_image->height() != height_) {
|
||||
width_ = input_image->width();
|
||||
height_ = input_image->height();
|
||||
int ret = ResetCompressionSession();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -327,7 +327,7 @@ int H264VideoToolboxEncoder::Encode(
|
||||
}
|
||||
|
||||
CMTime presentation_time_stamp =
|
||||
CMTimeMake(input_image.render_time_ms(), 1000);
|
||||
CMTimeMake(frame.render_time_ms(), 1000);
|
||||
CFDictionaryRef frame_properties = nullptr;
|
||||
if (is_keyframe_required) {
|
||||
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
|
||||
@ -336,8 +336,8 @@ int H264VideoToolboxEncoder::Encode(
|
||||
}
|
||||
std::unique_ptr<internal::FrameEncodeParams> encode_params;
|
||||
encode_params.reset(new internal::FrameEncodeParams(
|
||||
this, codec_specific_info, width_, height_, input_image.render_time_ms(),
|
||||
input_image.timestamp(), input_image.rotation()));
|
||||
this, codec_specific_info, width_, height_, frame.render_time_ms(),
|
||||
frame.timestamp(), frame.rotation()));
|
||||
|
||||
// Update the bitrate if needed.
|
||||
SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps());
|
||||
|
||||
@ -70,7 +70,8 @@ class H264VideoToolboxEncoder : public H264Encoder {
|
||||
int ResetCompressionSession();
|
||||
void ConfigureCompressionSession();
|
||||
void DestroyCompressionSession();
|
||||
const VideoFrame& GetScaledFrameOnEncode(const VideoFrame& frame);
|
||||
rtc::scoped_refptr<VideoFrameBuffer> GetScaledBufferOnEncode(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& frame);
|
||||
void SetBitrateBps(uint32_t bitrate_bps);
|
||||
void SetEncoderBitrateBps(uint32_t bitrate_bps);
|
||||
|
||||
|
||||
@ -66,8 +66,7 @@ VideoProcessorImpl::VideoProcessorImpl(webrtc::VideoEncoder* encoder,
|
||||
num_dropped_frames_(0),
|
||||
num_spatial_resizes_(0),
|
||||
last_encoder_frame_width_(0),
|
||||
last_encoder_frame_height_(0),
|
||||
scaler_() {
|
||||
last_encoder_frame_height_(0) {
|
||||
assert(encoder);
|
||||
assert(decoder);
|
||||
assert(frame_reader);
|
||||
@ -335,23 +334,16 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
|
||||
// upsample back to original size: needed for PSNR and SSIM computations.
|
||||
if (image.width() != config_.codec_settings->width ||
|
||||
image.height() != config_.codec_settings->height) {
|
||||
VideoFrame up_image;
|
||||
int ret_val = scaler_.Set(
|
||||
image.width(), image.height(), config_.codec_settings->width,
|
||||
config_.codec_settings->height, kI420, kI420, kScaleBilinear);
|
||||
assert(ret_val >= 0);
|
||||
if (ret_val < 0) {
|
||||
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
|
||||
frame_number, ret_val);
|
||||
}
|
||||
ret_val = scaler_.Scale(image, &up_image);
|
||||
assert(ret_val >= 0);
|
||||
if (ret_val < 0) {
|
||||
fprintf(stderr, "Failed to scale frame: %d, return code: %d\n",
|
||||
frame_number, ret_val);
|
||||
}
|
||||
rtc::scoped_refptr<I420Buffer> up_image(
|
||||
new rtc::RefCountedObject<I420Buffer>(config_.codec_settings->width,
|
||||
config_.codec_settings->height));
|
||||
|
||||
// Should be the same aspect ratio, no cropping needed.
|
||||
up_image->ScaleFrom(image.video_frame_buffer());
|
||||
|
||||
// TODO(mikhal): Extracting the buffer for now - need to update test.
|
||||
size_t length = CalcBufferSize(kI420, up_image.width(), up_image.height());
|
||||
size_t length =
|
||||
CalcBufferSize(kI420, up_image->width(), up_image->height());
|
||||
std::unique_ptr<uint8_t[]> image_buffer(new uint8_t[length]);
|
||||
int extracted_length = ExtractBuffer(up_image, length, image_buffer.get());
|
||||
assert(extracted_length > 0);
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
#include <string>
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/common_video/libyuv/include/scaler.h"
|
||||
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
|
||||
@ -219,7 +218,6 @@ class VideoProcessorImpl : public VideoProcessor {
|
||||
int num_spatial_resizes_;
|
||||
int last_encoder_frame_width_;
|
||||
int last_encoder_frame_height_;
|
||||
Scaler scaler_;
|
||||
|
||||
// Statistics
|
||||
double bit_rate_factor_; // multiply frame length with this to get bit rate
|
||||
|
||||
@ -525,8 +525,8 @@ class VideoProcessorIntegrationTest : public testing::Test {
|
||||
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
|
||||
EXPECT_GT(ssim_result.average, quality_metrics.minimum_avg_ssim);
|
||||
EXPECT_GT(ssim_result.min, quality_metrics.minimum_min_ssim);
|
||||
if (!remove(config_.output_filename.c_str())) {
|
||||
fprintf(stderr, "Failed to remove temporary file!");
|
||||
if (remove(config_.output_filename.c_str()) < 0) {
|
||||
fprintf(stderr, "Failed to remove temporary file!\n");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -729,40 +729,40 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
if (encoded_complete_callback_ == NULL)
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
||||
if (quality_scaler_enabled_)
|
||||
quality_scaler_.OnEncodeFrame(frame);
|
||||
const VideoFrame& input_image =
|
||||
quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
|
||||
rtc::scoped_refptr<VideoFrameBuffer> input_image = frame.video_frame_buffer();
|
||||
|
||||
if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
|
||||
input_image.height() != codec_.height)) {
|
||||
int ret = UpdateCodecFrameSize(input_image);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (quality_scaler_enabled_) {
|
||||
quality_scaler_.OnEncodeFrame(frame.width(), frame.height());
|
||||
input_image = quality_scaler_.GetScaledBuffer(input_image);
|
||||
|
||||
if (input_image->width() != codec_.width ||
|
||||
input_image->height() != codec_.height) {
|
||||
int ret =
|
||||
UpdateCodecFrameSize(input_image->width(), input_image->height());
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
// Since we are extracting raw pointers from |input_image| to
|
||||
// |raw_images_[0]|, the resolution of these frames must match. Note that
|
||||
// |input_image| might be scaled from |frame|. In that case, the resolution of
|
||||
// |raw_images_[0]| should have been updated in UpdateCodecFrameSize.
|
||||
RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
|
||||
RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
|
||||
RTC_DCHECK_EQ(input_image->width(), static_cast<int>(raw_images_[0].d_w));
|
||||
RTC_DCHECK_EQ(input_image->height(), static_cast<int>(raw_images_[0].d_h));
|
||||
|
||||
// Image in vpx_image_t format.
|
||||
// Input image is const. VP8's raw image is not defined as const.
|
||||
raw_images_[0].planes[VPX_PLANE_Y] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
|
||||
const_cast<uint8_t*>(input_image->DataY());
|
||||
raw_images_[0].planes[VPX_PLANE_U] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
|
||||
const_cast<uint8_t*>(input_image->DataU());
|
||||
raw_images_[0].planes[VPX_PLANE_V] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
|
||||
const_cast<uint8_t*>(input_image->DataV());
|
||||
|
||||
raw_images_[0].stride[VPX_PLANE_Y] =
|
||||
input_image.video_frame_buffer()->StrideY();
|
||||
raw_images_[0].stride[VPX_PLANE_U] =
|
||||
input_image.video_frame_buffer()->StrideU();
|
||||
raw_images_[0].stride[VPX_PLANE_V] =
|
||||
input_image.video_frame_buffer()->StrideV();
|
||||
raw_images_[0].stride[VPX_PLANE_Y] = input_image->StrideY();
|
||||
raw_images_[0].stride[VPX_PLANE_U] = input_image->StrideU();
|
||||
raw_images_[0].stride[VPX_PLANE_V] = input_image->StrideV();
|
||||
|
||||
for (size_t i = 1; i < encoders_.size(); ++i) {
|
||||
// Scale the image down a number of times by downsampling factor
|
||||
@ -781,7 +781,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
}
|
||||
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp());
|
||||
int ret = temporal_layers_[i]->EncodeFlags(frame.timestamp());
|
||||
if (ret < 0) {
|
||||
// Drop this frame.
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
@ -833,11 +833,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
|
||||
}
|
||||
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
|
||||
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
|
||||
sendRefresh = rps_.ReceivedSLI(frame.timestamp());
|
||||
}
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh,
|
||||
input_image.timestamp());
|
||||
frame.timestamp());
|
||||
}
|
||||
} else {
|
||||
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
|
||||
@ -905,17 +905,18 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
if (error)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
timestamp_ += duration;
|
||||
return GetEncodedPartitions(input_image, only_predict_from_key_frame);
|
||||
// Examines frame timestamps only.
|
||||
return GetEncodedPartitions(frame, only_predict_from_key_frame);
|
||||
}
|
||||
|
||||
// TODO(pbos): Make sure this works for properly for >1 encoders.
|
||||
int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
|
||||
codec_.width = input_image.width();
|
||||
codec_.height = input_image.height();
|
||||
int VP8EncoderImpl::UpdateCodecFrameSize(int width, int height) {
|
||||
codec_.width = width;
|
||||
codec_.height = height;
|
||||
if (codec_.numberOfSimulcastStreams <= 1) {
|
||||
// For now scaling is only used for single-layer streams.
|
||||
codec_.simulcastStream[0].width = input_image.width();
|
||||
codec_.simulcastStream[0].height = input_image.height();
|
||||
codec_.simulcastStream[0].width = width;
|
||||
codec_.simulcastStream[0].height = height;
|
||||
}
|
||||
// Update the cpu_speed setting for resolution change.
|
||||
vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
|
||||
|
||||
@ -75,7 +75,7 @@ class VP8EncoderImpl : public VP8Encoder {
|
||||
int InitAndSetControlSettings();
|
||||
|
||||
// Update frame size for codec.
|
||||
int UpdateCodecFrameSize(const VideoFrame& input_image);
|
||||
int UpdateCodecFrameSize(int width, int height);
|
||||
|
||||
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
const vpx_codec_cx_pkt& pkt,
|
||||
|
||||
@ -11,6 +11,8 @@
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOVING_AVERAGE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOVING_AVERAGE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
@ -7,6 +7,7 @@
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/video_coding/utility/quality_scaler.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -94,11 +95,11 @@ void QualityScaler::ReportDroppedFrame() {
|
||||
framedrop_percent_.AddSample(100);
|
||||
}
|
||||
|
||||
void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
|
||||
void QualityScaler::OnEncodeFrame(int width, int height) {
|
||||
// Should be set through InitEncode -> Should be set by now.
|
||||
assert(low_qp_threshold_ >= 0);
|
||||
assert(num_samples_upscale_ > 0);
|
||||
assert(num_samples_downscale_ > 0);
|
||||
RTC_DCHECK_GE(low_qp_threshold_, 0);
|
||||
RTC_DCHECK_GT(num_samples_upscale_, 0u);
|
||||
RTC_DCHECK_GT(num_samples_downscale_, 0u);
|
||||
|
||||
// Update scale factor.
|
||||
int avg_drop = 0;
|
||||
@ -113,38 +114,31 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
|
||||
avg_qp <= low_qp_threshold_) {
|
||||
AdjustScale(true);
|
||||
}
|
||||
UpdateTargetResolution(frame.width(), frame.height());
|
||||
UpdateTargetResolution(width, height);
|
||||
}
|
||||
|
||||
QualityScaler::Resolution QualityScaler::GetScaledResolution() const {
|
||||
return res_;
|
||||
}
|
||||
|
||||
const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
|
||||
rtc::scoped_refptr<VideoFrameBuffer> QualityScaler::GetScaledBuffer(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& frame) {
|
||||
Resolution res = GetScaledResolution();
|
||||
if (res.width == frame.width())
|
||||
int src_width = frame->width();
|
||||
int src_height = frame->height();
|
||||
|
||||
if (res.width == src_width && res.height == src_height)
|
||||
return frame;
|
||||
rtc::scoped_refptr<I420Buffer> scaled_buffer =
|
||||
pool_.CreateBuffer(res.width, res.height);
|
||||
|
||||
scaler_.Set(frame.width(), frame.height(), res.width, res.height, kI420,
|
||||
kI420, kScaleBox);
|
||||
if (scaler_.Scale(frame, &scaled_frame_) != 0)
|
||||
return frame;
|
||||
scaled_buffer->ScaleFrom(frame);
|
||||
|
||||
// TODO(perkj): Refactor the scaler to not own |scaled_frame|. VideoFrame are
|
||||
// just thin wrappers so instead the scaler should return a
|
||||
// rtc::scoped_refptr<VideoFrameBuffer> and a new VideoFrame be created with
|
||||
// the meta data from |frame|. That way we would not have to set all these
|
||||
// meta data.
|
||||
scaled_frame_.set_ntp_time_ms(frame.ntp_time_ms());
|
||||
scaled_frame_.set_timestamp(frame.timestamp());
|
||||
scaled_frame_.set_render_time_ms(frame.render_time_ms());
|
||||
scaled_frame_.set_rotation(frame.rotation());
|
||||
|
||||
return scaled_frame_;
|
||||
return scaled_buffer;
|
||||
}
|
||||
|
||||
void QualityScaler::UpdateTargetResolution(int frame_width, int frame_height) {
|
||||
assert(downscale_shift_ >= 0);
|
||||
RTC_DCHECK_GE(downscale_shift_, 0);
|
||||
int shifts_performed = 0;
|
||||
for (int shift = downscale_shift_;
|
||||
shift > 0 && (frame_width / 2 >= kMinDownscaleDimension) &&
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
|
||||
|
||||
#include "webrtc/common_video/libyuv/include/scaler.h"
|
||||
#include "webrtc/common_video/include/i420_buffer_pool.h"
|
||||
#include "webrtc/modules/video_coding/utility/moving_average.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -32,9 +32,10 @@ class QualityScaler {
|
||||
void ReportFramerate(int framerate);
|
||||
void ReportQP(int qp);
|
||||
void ReportDroppedFrame();
|
||||
void OnEncodeFrame(const VideoFrame& frame);
|
||||
void OnEncodeFrame(int width, int height);
|
||||
Resolution GetScaledResolution() const;
|
||||
const VideoFrame& GetScaledFrame(const VideoFrame& frame);
|
||||
rtc::scoped_refptr<VideoFrameBuffer> GetScaledBuffer(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& frame);
|
||||
int downscale_shift() const { return downscale_shift_; }
|
||||
|
||||
// QP is obtained from VP8-bitstream for HW, so the QP corresponds to the
|
||||
@ -52,8 +53,7 @@ class QualityScaler {
|
||||
void ClearSamples();
|
||||
void UpdateSampleCounts();
|
||||
|
||||
Scaler scaler_;
|
||||
VideoFrame scaled_frame_;
|
||||
I420BufferPool pool_;
|
||||
|
||||
size_t num_samples_downscale_;
|
||||
size_t num_samples_upscale_;
|
||||
|
||||
@ -16,7 +16,6 @@ namespace webrtc {
|
||||
namespace {
|
||||
static const int kNumSeconds = 10;
|
||||
static const int kWidth = 1920;
|
||||
static const int kHalfWidth = kWidth / 2;
|
||||
static const int kHeight = 1080;
|
||||
static const int kFramerate = 30;
|
||||
static const int kLowQp = 15;
|
||||
@ -43,14 +42,14 @@ class QualityScalerTest : public ::testing::Test {
|
||||
};
|
||||
|
||||
QualityScalerTest() {
|
||||
input_frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, kHalfWidth,
|
||||
kHalfWidth);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(kWidth, kHeight));
|
||||
qs_.Init(kLowQpThreshold, kHighQp, 0, 0, 0, kFramerate);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
}
|
||||
|
||||
bool TriggerScale(ScaleDirection scale_direction) {
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
int initial_width = qs_.GetScaledResolution().width;
|
||||
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
|
||||
switch (scale_direction) {
|
||||
@ -67,7 +66,7 @@ class QualityScalerTest : public ::testing::Test {
|
||||
qs_.ReportQP(kHighQp + 1);
|
||||
break;
|
||||
}
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
if (qs_.GetScaledResolution().width != initial_width)
|
||||
return true;
|
||||
}
|
||||
@ -76,16 +75,17 @@ class QualityScalerTest : public ::testing::Test {
|
||||
}
|
||||
|
||||
void ExpectOriginalFrame() {
|
||||
EXPECT_EQ(&input_frame_, &qs_.GetScaledFrame(input_frame_))
|
||||
EXPECT_EQ(input_frame_, qs_.GetScaledBuffer(input_frame_))
|
||||
<< "Using scaled frame instead of original input.";
|
||||
}
|
||||
|
||||
void ExpectScaleUsingReportedResolution() {
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
const VideoFrame& scaled_frame = qs_.GetScaledFrame(input_frame_);
|
||||
EXPECT_EQ(res.width, scaled_frame.width());
|
||||
EXPECT_EQ(res.height, scaled_frame.height());
|
||||
rtc::scoped_refptr<VideoFrameBuffer> scaled_frame =
|
||||
qs_.GetScaledBuffer(input_frame_);
|
||||
EXPECT_EQ(res.width, scaled_frame->width());
|
||||
EXPECT_EQ(res.height, scaled_frame->height());
|
||||
}
|
||||
|
||||
void ContinuouslyDownscalesByHalfDimensionsAndBackUp();
|
||||
@ -98,7 +98,7 @@ class QualityScalerTest : public ::testing::Test {
|
||||
int end_height);
|
||||
|
||||
QualityScaler qs_;
|
||||
VideoFrame input_frame_;
|
||||
rtc::scoped_refptr<VideoFrameBuffer> input_frame_;
|
||||
};
|
||||
|
||||
TEST_F(QualityScalerTest, UsesOriginalFrameInitially) {
|
||||
@ -106,34 +106,34 @@ TEST_F(QualityScalerTest, UsesOriginalFrameInitially) {
|
||||
}
|
||||
|
||||
TEST_F(QualityScalerTest, ReportsOriginalResolutionInitially) {
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
EXPECT_EQ(input_frame_.width(), res.width);
|
||||
EXPECT_EQ(input_frame_.height(), res.height);
|
||||
EXPECT_EQ(input_frame_->width(), res.width);
|
||||
EXPECT_EQ(input_frame_->height(), res.height);
|
||||
}
|
||||
|
||||
TEST_F(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
|
||||
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within " << kNumSeconds
|
||||
<< " seconds.";
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
EXPECT_LT(res.width, input_frame_.width());
|
||||
EXPECT_LT(res.height, input_frame_.height());
|
||||
EXPECT_LT(res.width, input_frame_->width());
|
||||
EXPECT_LT(res.height, input_frame_->height());
|
||||
}
|
||||
|
||||
TEST_F(QualityScalerTest, KeepsScaleAtHighQp) {
|
||||
EXPECT_FALSE(TriggerScale(kKeepScaleAtHighQp))
|
||||
<< "Downscale at high threshold which should keep scale.";
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
EXPECT_EQ(res.width, input_frame_.width());
|
||||
EXPECT_EQ(res.height, input_frame_.height());
|
||||
EXPECT_EQ(res.width, input_frame_->width());
|
||||
EXPECT_EQ(res.height, input_frame_->height());
|
||||
}
|
||||
|
||||
TEST_F(QualityScalerTest, DownscalesAboveHighQp) {
|
||||
EXPECT_TRUE(TriggerScale(kScaleDownAboveHighQp))
|
||||
<< "No downscale within " << kNumSeconds << " seconds.";
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
EXPECT_LT(res.width, input_frame_.width());
|
||||
EXPECT_LT(res.height, input_frame_.height());
|
||||
EXPECT_LT(res.width, input_frame_->width());
|
||||
EXPECT_LT(res.height, input_frame_->height());
|
||||
}
|
||||
|
||||
TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
|
||||
@ -141,8 +141,8 @@ TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
|
||||
qs_.ReportQP(kNormalQp);
|
||||
qs_.ReportDroppedFrame();
|
||||
qs_.ReportDroppedFrame();
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
if (qs_.GetScaledResolution().width < input_frame_.width())
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
if (qs_.GetScaledResolution().width < input_frame_->width())
|
||||
return;
|
||||
}
|
||||
|
||||
@ -152,8 +152,8 @@ TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
|
||||
TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
|
||||
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
|
||||
qs_.ReportQP(kNormalQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
|
||||
<< "Unexpected scale on half framedrop.";
|
||||
}
|
||||
}
|
||||
@ -161,33 +161,33 @@ TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
|
||||
TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
|
||||
for (int i = 0; i < kFramerate * kNumSeconds / 2; ++i) {
|
||||
qs_.ReportQP(kNormalQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
|
||||
<< "Unexpected scale on half framedrop.";
|
||||
|
||||
qs_.ReportDroppedFrame();
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
|
||||
<< "Unexpected scale on half framedrop.";
|
||||
}
|
||||
}
|
||||
|
||||
void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
|
||||
const int initial_min_dimension = input_frame_.width() < input_frame_.height()
|
||||
? input_frame_.width()
|
||||
: input_frame_.height();
|
||||
const int initial_min_dimension =
|
||||
input_frame_->width() < input_frame_->height() ? input_frame_->width()
|
||||
: input_frame_->height();
|
||||
int min_dimension = initial_min_dimension;
|
||||
int current_shift = 0;
|
||||
// Drop all frames to force-trigger downscaling.
|
||||
while (min_dimension >= 2 * kMinDownscaleDimension) {
|
||||
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within "
|
||||
<< kNumSeconds << " seconds.";
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
min_dimension = res.width < res.height ? res.width : res.height;
|
||||
++current_shift;
|
||||
ASSERT_EQ(input_frame_.width() >> current_shift, res.width);
|
||||
ASSERT_EQ(input_frame_.height() >> current_shift, res.height);
|
||||
ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
|
||||
ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
|
||||
ExpectScaleUsingReportedResolution();
|
||||
}
|
||||
|
||||
@ -195,12 +195,12 @@ void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
|
||||
while (min_dimension < initial_min_dimension) {
|
||||
EXPECT_TRUE(TriggerScale(kScaleUp)) << "No upscale within " << kNumSeconds
|
||||
<< " seconds.";
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution res = qs_.GetScaledResolution();
|
||||
min_dimension = res.width < res.height ? res.width : res.height;
|
||||
--current_shift;
|
||||
ASSERT_EQ(input_frame_.width() >> current_shift, res.width);
|
||||
ASSERT_EQ(input_frame_.height() >> current_shift, res.height);
|
||||
ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
|
||||
ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
|
||||
ExpectScaleUsingReportedResolution();
|
||||
}
|
||||
|
||||
@ -218,21 +218,20 @@ TEST_F(QualityScalerTest, ContinuouslyDownscalesByHalfDimensionsAndBackUp) {
|
||||
TEST_F(QualityScalerTest,
|
||||
ContinuouslyDownscalesOddResolutionsByHalfDimensionsAndBackUp) {
|
||||
const int kOddWidth = 517;
|
||||
const int kHalfOddWidth = (kOddWidth + 1) / 2;
|
||||
const int kOddHeight = 1239;
|
||||
input_frame_.CreateEmptyFrame(kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth,
|
||||
kHalfOddWidth);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(kOddWidth, kOddHeight));
|
||||
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
|
||||
}
|
||||
|
||||
void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
|
||||
input_frame_.CreateEmptyFrame(width, height, width, (width + 1) / 2,
|
||||
(width + 1) / 2);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(width, height));
|
||||
|
||||
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
|
||||
qs_.ReportDroppedFrame();
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
|
||||
<< "Unexpected scale of minimal-size frame.";
|
||||
}
|
||||
}
|
||||
@ -263,11 +262,11 @@ TEST_F(QualityScalerTest, DownscaleToVgaOnLowInitialBitrate) {
|
||||
static const int kWidth720p = 1280;
|
||||
static const int kHeight720p = 720;
|
||||
static const int kInitialBitrateKbps = 300;
|
||||
input_frame_.CreateEmptyFrame(kWidth720p, kHeight720p, kWidth720p,
|
||||
kWidth720p / 2, kWidth720p / 2);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(kWidth720p, kHeight720p));
|
||||
qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
|
||||
kWidth720p, kHeight720p, kFramerate);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
int init_width = qs_.GetScaledResolution().width;
|
||||
int init_height = qs_.GetScaledResolution().height;
|
||||
EXPECT_EQ(640, init_width);
|
||||
@ -278,11 +277,11 @@ TEST_F(QualityScalerTest, DownscaleToQvgaOnLowerInitialBitrate) {
|
||||
static const int kWidth720p = 1280;
|
||||
static const int kHeight720p = 720;
|
||||
static const int kInitialBitrateKbps = 200;
|
||||
input_frame_.CreateEmptyFrame(kWidth720p, kHeight720p, kWidth720p,
|
||||
kWidth720p / 2, kWidth720p / 2);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(kWidth720p, kHeight720p));
|
||||
qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
|
||||
kWidth720p, kHeight720p, kFramerate);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
int init_width = qs_.GetScaledResolution().width;
|
||||
int init_height = qs_.GetScaledResolution().height;
|
||||
EXPECT_EQ(320, init_width);
|
||||
@ -291,13 +290,13 @@ TEST_F(QualityScalerTest, DownscaleToQvgaOnLowerInitialBitrate) {
|
||||
|
||||
TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
|
||||
qs_.Init(kLowQpThreshold, kHighQp, 0, kWidth, kHeight, kFramerate);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
|
||||
|
||||
// Should not downscale if less than kMeasureSecondsDownscale seconds passed.
|
||||
for (int i = 0; i < kFramerate * kMeasureSecondsDownscale - 1; ++i) {
|
||||
qs_.ReportQP(kHighQp + 1);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
}
|
||||
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
|
||||
@ -305,7 +304,7 @@ TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
|
||||
// Should downscale if more than kMeasureSecondsDownscale seconds passed (add
|
||||
// last frame).
|
||||
qs_.ReportQP(kHighQp + 1);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
|
||||
|
||||
@ -313,7 +312,7 @@ TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
|
||||
// we saw issues initially (have already gone down).
|
||||
for (int i = 0; i < kFramerate * kMeasureSecondsUpscale - 1; ++i) {
|
||||
qs_.ReportQP(kLowQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
}
|
||||
EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
|
||||
@ -321,7 +320,7 @@ TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
|
||||
// Should upscale (back to initial) if kMeasureSecondsUpscale seconds passed
|
||||
// (add last frame).
|
||||
qs_.ReportQP(kLowQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
|
||||
}
|
||||
@ -329,13 +328,13 @@ TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
|
||||
TEST_F(QualityScalerTest, UpscaleQuicklyInitiallyAfterMeasuredSeconds) {
|
||||
qs_.Init(kLowQpThreshold, kHighQp, kLowInitialBitrateKbps, kWidth, kHeight,
|
||||
kFramerate);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
|
||||
|
||||
// Should not upscale if less than kMeasureSecondsFastUpscale seconds passed.
|
||||
for (int i = 0; i < kFramerate * kMeasureSecondsFastUpscale - 1; ++i) {
|
||||
qs_.ReportQP(kLowQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
}
|
||||
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
|
||||
@ -343,7 +342,7 @@ TEST_F(QualityScalerTest, UpscaleQuicklyInitiallyAfterMeasuredSeconds) {
|
||||
// Should upscale if kMeasureSecondsFastUpscale seconds passed (add last
|
||||
// frame).
|
||||
qs_.ReportQP(kLowQp);
|
||||
qs_.OnEncodeFrame(input_frame_);
|
||||
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
|
||||
EXPECT_LT(initial_res.width, qs_.GetScaledResolution().width);
|
||||
EXPECT_LT(initial_res.height, qs_.GetScaledResolution().height);
|
||||
}
|
||||
@ -354,8 +353,8 @@ void QualityScalerTest::DownscaleEndsAt(int input_width,
|
||||
int end_height) {
|
||||
// Create a frame with 2x expected end width/height to verify that we can
|
||||
// scale down to expected end width/height.
|
||||
input_frame_.CreateEmptyFrame(input_width, input_height, input_width,
|
||||
(input_width + 1) / 2, (input_width + 1) / 2);
|
||||
input_frame_ = rtc::scoped_refptr<VideoFrameBuffer>(
|
||||
new rtc::RefCountedObject<I420Buffer>(input_width, input_height));
|
||||
|
||||
int last_width = input_width;
|
||||
int last_height = input_height;
|
||||
|
||||
Reference in New Issue
Block a user