Move usage of QualityScaler to ViEEncoder.

This brings QualityScaler much more in line with OveruseFrameDetector.
The two classes are conceptually similar, and should be used in the
same way. The biggest changes in this CL are:
- Quality scaling is now only done in ViEEncoder and not in each
  encoder implementation separately.
- QualityScaler now checks the average QP asynchronously, instead of
  having to be polled on each frame.
- QualityScaler is no longer responsible for actually scaling the frames,
  but has a callback to ViEEncoder that it uses to express it's desire
  for lower resolution.

BUG=webrtc:6495

Review-Url: https://codereview.webrtc.org/2398963003
Cr-Commit-Position: refs/heads/master@{#15286}
This commit is contained in:
kthelgason
2016-11-29 01:44:11 -08:00
committed by Commit bot
parent 320e45ad87
commit 876222f77d
32 changed files with 736 additions and 907 deletions

View File

@ -235,9 +235,6 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
return WEBRTC_VIDEO_CODEC_ERROR;
}
// TODO(pbos): Base init params on these values before submitting.
quality_scaler_.Init(codec_settings->codecType, codec_settings->startBitrate,
codec_settings->width, codec_settings->height,
codec_settings->maxFramerate);
int video_format = EVideoFormatType::videoFormatI420;
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
&video_format);
@ -279,7 +276,6 @@ int32_t H264EncoderImpl::SetRateAllocation(
target_bps_ = bitrate_allocation.get_sum_bps();
max_frame_rate_ = static_cast<float>(framerate);
quality_scaler_.ReportFramerate(framerate);
SBitrateInfo target_bitrate;
memset(&target_bitrate, 0, sizeof(SBitrateInfo));
@ -309,20 +305,6 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
quality_scaler_.OnEncodeFrame(input_frame.width(), input_frame.height());
rtc::scoped_refptr<const VideoFrameBuffer> frame_buffer =
quality_scaler_.GetScaledBuffer(input_frame.video_frame_buffer());
if (frame_buffer->width() != width_ || frame_buffer->height() != height_) {
LOG(LS_INFO) << "Encoder reinitialized from " << width_ << "x" << height_
<< " to " << frame_buffer->width() << "x"
<< frame_buffer->height();
width_ = frame_buffer->width();
height_ = frame_buffer->height();
SEncParamExt encoder_params = CreateEncoderParams();
openh264_encoder_->SetOption(ENCODER_OPTION_SVC_ENCODE_PARAM_EXT,
&encoder_params);
}
bool force_key_frame = false;
if (frame_types != nullptr) {
// We only support a single stream.
@ -340,7 +322,8 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
// (If every frame is a key frame we get lag/delays.)
openh264_encoder_->ForceIntraFrame(true);
}
rtc::scoped_refptr<const VideoFrameBuffer> frame_buffer =
input_frame.video_frame_buffer();
// EncodeFrame input.
SSourcePicture picture;
memset(&picture, 0, sizeof(SSourcePicture));
@ -384,22 +367,16 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
// Encoder can skip frames to save bandwidth in which case
// |encoded_image_._length| == 0.
if (encoded_image_._length > 0) {
// Parse and report QP.
h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer,
encoded_image_._length);
int qp = -1;
if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
quality_scaler_.ReportQP(qp);
encoded_image_.qp_ = qp;
}
// Deliver encoded image.
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecH264;
encoded_image_callback_->OnEncodedImage(encoded_image_, &codec_specific,
&frag_header);
} else {
quality_scaler_.ReportDroppedFrame();
// Parse and report QP.
h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer,
encoded_image_._length);
h264_bitstream_parser_.GetLastSliceQp(&encoded_image_.qp_);
}
return WEBRTC_VIDEO_CODEC_OK;
}
@ -500,8 +477,8 @@ int32_t H264EncoderImpl::SetPeriodicKeyFrames(bool enable) {
return WEBRTC_VIDEO_CODEC_OK;
}
void H264EncoderImpl::OnDroppedFrame() {
quality_scaler_.ReportDroppedFrame();
VideoEncoder::ScalingSettings H264EncoderImpl::GetScalingSettings() const {
return VideoEncoder::ScalingSettings(true);
}
} // namespace webrtc

View File

@ -55,17 +55,17 @@ class H264EncoderImpl : public H264Encoder {
const char* ImplementationName() const override;
VideoEncoder::ScalingSettings GetScalingSettings() const override;
// Unsupported / Do nothing.
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int32_t SetPeriodicKeyFrames(bool enable) override;
void OnDroppedFrame() override;
private:
bool IsInitialized() const;
SEncParamExt CreateEncoderParams() const;
webrtc::H264BitstreamParser h264_bitstream_parser_;
QualityScaler quality_scaler_;
// Reports statistics with histograms.
void ReportInit();
void ReportError();

View File

@ -70,8 +70,6 @@ class I420Encoder : public VideoEncoder {
return WEBRTC_VIDEO_CODEC_OK;
}
void OnDroppedFrame() override {}
private:
static uint8_t* InsertHeader(uint8_t* buffer,
uint16_t width,

View File

@ -977,38 +977,6 @@ TEST_F(VideoProcessorIntegrationTest,
rc_metrics);
}
// Run with no packet loss, at low bitrate. During this time we should've
// resized once. Expect 2 key frames generated (first and one for resize).
// Too slow to finish before timeout on iOS. See webrtc:4755.
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
DISABLED_ProcessNoLossSpatialResizeFrameDropVP8
#else
#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
ProcessNoLossSpatialResizeFrameDropVP8
#endif
TEST_F(VideoProcessorIntegrationTest,
MAYBE_ProcessNoLossSpatialResizeFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 50, 30, 0);
rate_profile.frame_index_rate_update[1] = kNbrFramesLong + 1;
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
true, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 160, 80, 120, 20, 70, 1, 2);
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// VP8: Run with no packet loss, with 3 temporal layers, with a rate update in
// the middle of the sequence. The max values for the frame size mismatch and
// encoding rate mismatch are applied to each layer.

View File

@ -451,10 +451,6 @@ bool SimulcastEncoderAdapter::Initialized() const {
return !streaminfos_.empty();
}
void SimulcastEncoderAdapter::OnDroppedFrame() {
streaminfos_[0].encoder->OnDroppedFrame();
}
bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
// We should not be calling this method before streaminfos_ are configured.
RTC_DCHECK(!streaminfos_.empty());
@ -465,6 +461,14 @@ bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
return true;
}
VideoEncoder::ScalingSettings SimulcastEncoderAdapter::GetScalingSettings()
const {
// Turn off quality scaling for simulcast.
if (NumberOfStreams(codec_) != 1)
return VideoEncoder::ScalingSettings(false);
return streaminfos_[0].encoder->GetScalingSettings();
}
const char* SimulcastEncoderAdapter::ImplementationName() const {
return implementation_name_.c_str();
}

View File

@ -60,7 +60,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
void OnDroppedFrame() override;
VideoEncoder::ScalingSettings GetScalingSettings() const override;
bool SupportsNativeHandle() const override;
const char* ImplementationName() const override;

View File

@ -123,8 +123,7 @@ VP8EncoderImpl::VP8EncoderImpl()
token_partitions_(VP8_ONE_TOKENPARTITION),
down_scale_requested_(false),
down_scale_bitrate_(0),
key_frame_request_(kMaxSimulcastStreams, false),
quality_scaler_enabled_(false) {
key_frame_request_(kMaxSimulcastStreams, false) {
uint32_t seed = rtc::Time32();
srand(seed);
@ -253,15 +252,9 @@ int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate,
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
quality_scaler_.ReportFramerate(new_framerate);
return WEBRTC_VIDEO_CODEC_OK;
}
void VP8EncoderImpl::OnDroppedFrame() {
if (quality_scaler_enabled_)
quality_scaler_.ReportDroppedFrame();
}
const char* VP8EncoderImpl::ImplementationName() const {
return "libvpx";
}
@ -530,15 +523,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
rps_.Init();
quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width,
codec_.height, codec_.maxFramerate);
// Only apply scaling to improve for single-layer streams. The scaling metrics
// use frame drops as a signal and is only applicable when we drop frames.
quality_scaler_enabled_ = encoders_.size() == 1 &&
configurations_[0].rc_dropframe_thresh > 0 &&
codec_.VP8()->automaticResizeOn;
return InitAndSetControlSettings();
}
@ -671,6 +655,9 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_EQ(frame.width(), codec_.width);
RTC_DCHECK_EQ(frame.height(), codec_.height);
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@ -679,20 +666,6 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
rtc::scoped_refptr<VideoFrameBuffer> input_image = frame.video_frame_buffer();
if (quality_scaler_enabled_) {
quality_scaler_.OnEncodeFrame(frame.width(), frame.height());
input_image = quality_scaler_.GetScaledBuffer(input_image);
if (input_image->width() != codec_.width ||
input_image->height() != codec_.height) {
int ret =
UpdateCodecFrameSize(input_image->width(), input_image->height());
if (ret < 0)
return ret;
}
}
// Since we are extracting raw pointers from |input_image| to
// |raw_images_[0]|, the resolution of these frames must match. Note that
// |input_image| might be scaled from |frame|. In that case, the resolution of
@ -989,9 +962,6 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
codec_.simulcastStream[stream_idx].height;
encoded_images_[encoder_idx]._encodedWidth =
codec_.simulcastStream[stream_idx].width;
encoded_images_[encoder_idx]
.adapt_reason_.quality_resolution_downscales =
quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1;
// Report once per frame (lowest stream always sent).
encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled =
(stream_idx == 0) ? bw_resolutions_disabled : -1;
@ -1006,18 +976,16 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
}
}
}
if (encoders_.size() == 1 && send_stream_[0]) {
if (encoded_images_[0]._length > 0) {
int qp_128;
vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128);
quality_scaler_.ReportQP(qp_128);
} else {
quality_scaler_.ReportDroppedFrame();
}
}
return result;
}
VideoEncoder::ScalingSettings VP8EncoderImpl::GetScalingSettings() const {
const bool enable_scaling = encoders_.size() == 1 &&
configurations_[0].rc_dropframe_thresh > 0 &&
codec_.VP8().automaticResizeOn;
return VideoEncoder::ScalingSettings(enable_scaling);
}
int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
rps_.SetRtt(rtt);
return WEBRTC_VIDEO_CODEC_OK;

View File

@ -57,7 +57,7 @@ class VP8EncoderImpl : public VP8Encoder {
int SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t new_framerate) override;
void OnDroppedFrame() override;
ScalingSettings GetScalingSettings() const override;
const char* ImplementationName() const override;
@ -116,8 +116,6 @@ class VP8EncoderImpl : public VP8Encoder {
std::vector<vpx_codec_ctx_t> encoders_;
std::vector<vpx_codec_enc_cfg_t> configurations_;
std::vector<vpx_rational_t> downsampling_factors_;
QualityScaler quality_scaler_;
bool quality_scaler_enabled_;
}; // end of VP8EncoderImpl class
class VP8DecoderImpl : public VP8Decoder {

View File

@ -49,8 +49,6 @@ class VP9EncoderImpl : public VP9Encoder {
int SetRateAllocation(const BitrateAllocation& bitrate_allocation,
uint32_t frame_rate) override;
void OnDroppedFrame() override {}
const char* ImplementationName() const override;
struct LayerFrameRefSettings {

View File

@ -131,11 +131,6 @@ bool VCMGenericEncoder::InternalSource() const {
return internal_source_;
}
void VCMGenericEncoder::OnDroppedFrame() {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
encoder_->OnDroppedFrame();
}
bool VCMGenericEncoder::SupportsNativeHandle() const {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
return encoder_->SupportsNativeHandle();

View File

@ -13,8 +13,11 @@
#include <math.h>
#include <algorithm>
#include <memory>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/task_queue.h"
// TODO(kthelgason): Some versions of Android have issues with log2.
// See https://code.google.com/p/android/issues/detail?id=212634 for details
@ -26,21 +29,9 @@ namespace webrtc {
namespace {
// Threshold constant used until first downscale (to permit fast rampup).
static const int kMeasureSecondsFastUpscale = 2;
static const int kMeasureSecondsUpscale = 5;
static const int kMeasureSecondsDownscale = 5;
static const int kMeasureMs = 2000;
static const float kSamplePeriodScaleFactor = 2.5;
static const int kFramedropPercentThreshold = 60;
// Min width/height to downscale to, set to not go below QVGA, but with some
// margin to permit "almost-QVGA" resolutions, such as QCIF.
static const int kMinDownscaleDimension = 140;
// Initial resolutions corresponding to a bitrate. Aa bit above their actual
// values to permit near-VGA and near-QVGA resolutions to use the same
// mechanism.
static const int kVgaBitrateThresholdKbps = 500;
static const int kVgaNumPixels = 700 * 500; // 640x480
static const int kQvgaBitrateThresholdKbps = 250;
static const int kQvgaNumPixels = 400 * 300; // 320x240
// QP scaling threshold defaults:
static const int kLowH264QpThreshold = 24;
static const int kHighH264QpThreshold = 37;
@ -48,20 +39,13 @@ static const int kHighH264QpThreshold = 37;
// bitstream range of [0, 127] and not the user-level range of [0,63].
static const int kLowVp8QpThreshold = 29;
static const int kHighVp8QpThreshold = 95;
} // namespace
const ScalingObserverInterface::ScaleReason scale_reason_ =
ScalingObserverInterface::ScaleReason::kQuality;
// Default values. Should immediately get set to something more sensible.
QualityScaler::QualityScaler()
: average_qp_(kMeasureSecondsUpscale * 30),
framedrop_percent_(kMeasureSecondsUpscale * 30),
low_qp_threshold_(-1) {}
void QualityScaler::Init(VideoCodecType codec_type,
int initial_bitrate_kbps,
int width,
int height,
int fps) {
int low = -1, high = -1;
static VideoEncoder::QpThresholds CodecTypeToDefaultThresholds(
VideoCodecType codec_type) {
int low = -1;
int high = -1;
switch (codec_type) {
case kVideoCodecH264:
low = kLowH264QpThreshold;
@ -74,138 +58,132 @@ void QualityScaler::Init(VideoCodecType codec_type,
default:
RTC_NOTREACHED() << "Invalid codec type for QualityScaler.";
}
Init(low, high, initial_bitrate_kbps, width, height, fps);
return VideoEncoder::QpThresholds(low, high);
}
} // namespace
void QualityScaler::Init(int low_qp_threshold,
int high_qp_threshold,
int initial_bitrate_kbps,
int width,
int height,
int fps) {
ClearSamples();
low_qp_threshold_ = low_qp_threshold;
high_qp_threshold_ = high_qp_threshold;
downscale_shift_ = 0;
fast_rampup_ = true;
const int init_width = width;
const int init_height = height;
if (initial_bitrate_kbps > 0) {
int init_num_pixels = width * height;
if (initial_bitrate_kbps < kVgaBitrateThresholdKbps)
init_num_pixels = kVgaNumPixels;
if (initial_bitrate_kbps < kQvgaBitrateThresholdKbps)
init_num_pixels = kQvgaNumPixels;
while (width * height > init_num_pixels) {
++downscale_shift_;
width /= 2;
height /= 2;
}
class QualityScaler::CheckQPTask : public rtc::QueuedTask {
public:
explicit CheckQPTask(QualityScaler* scaler) : scaler_(scaler) {
LOG(LS_INFO) << "Created CheckQPTask. Scheduling on queue...";
rtc::TaskQueue::Current()->PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(this), scaler_->GetSamplingPeriodMs());
}
UpdateTargetResolution(init_width, init_height);
ReportFramerate(fps);
void Stop() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
LOG(LS_INFO) << "Stopping QP Check task.";
stop_ = true;
}
private:
bool Run() override {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
if (stop_)
return true; // TaskQueue will free this task.
scaler_->CheckQP();
rtc::TaskQueue::Current()->PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(this), scaler_->GetSamplingPeriodMs());
return false; // Retain the task in order to reuse it.
}
QualityScaler* const scaler_;
bool stop_ = false;
rtc::SequencedTaskChecker task_checker_;
};
QualityScaler::QualityScaler(ScalingObserverInterface* observer,
VideoCodecType codec_type)
: QualityScaler(observer, CodecTypeToDefaultThresholds(codec_type)) {}
QualityScaler::QualityScaler(ScalingObserverInterface* observer,
VideoEncoder::QpThresholds thresholds)
: QualityScaler(observer, thresholds, kMeasureMs) {}
// Protected ctor, should not be called directly.
QualityScaler::QualityScaler(ScalingObserverInterface* observer,
VideoEncoder::QpThresholds thresholds,
int64_t sampling_period)
: check_qp_task_(nullptr),
observer_(observer),
sampling_period_ms_(sampling_period),
fast_rampup_(true),
// Arbitrarily choose size based on 30 fps for 5 seconds.
average_qp_(5 * 30),
framedrop_percent_(5 * 30),
thresholds_(thresholds) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
RTC_DCHECK(observer_ != nullptr);
check_qp_task_ = new CheckQPTask(this);
}
// Report framerate(fps) to estimate # of samples.
void QualityScaler::ReportFramerate(int framerate) {
// Use a faster window for upscaling initially.
// This enables faster initial rampups without risking strong up-down
// behavior later.
num_samples_upscale_ = framerate * (fast_rampup_ ? kMeasureSecondsFastUpscale
: kMeasureSecondsUpscale);
num_samples_downscale_ = framerate * kMeasureSecondsDownscale;
QualityScaler::~QualityScaler() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
check_qp_task_->Stop();
}
int64_t QualityScaler::GetSamplingPeriodMs() const {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
return fast_rampup_ ? sampling_period_ms_
: (sampling_period_ms_ * kSamplePeriodScaleFactor);
}
void QualityScaler::ReportDroppedFrame() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
framedrop_percent_.AddSample(100);
}
void QualityScaler::ReportQP(int qp) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
framedrop_percent_.AddSample(0);
average_qp_.AddSample(qp);
}
void QualityScaler::ReportDroppedFrame() {
framedrop_percent_.AddSample(100);
}
void QualityScaler::OnEncodeFrame(int width, int height) {
void QualityScaler::CheckQP() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
// Should be set through InitEncode -> Should be set by now.
RTC_DCHECK_GE(low_qp_threshold_, 0);
if (target_res_.width != width || target_res_.height != height) {
UpdateTargetResolution(width, height);
}
RTC_DCHECK_GE(thresholds_.low, 0);
LOG(LS_INFO) << "Checking if average QP exceeds threshold";
// Check if we should scale down due to high frame drop.
const auto drop_rate = framedrop_percent_.GetAverage(num_samples_downscale_);
const rtc::Optional<int> drop_rate = framedrop_percent_.GetAverage();
if (drop_rate && *drop_rate >= kFramedropPercentThreshold) {
ScaleDown();
ReportQPHigh();
return;
}
// Check if we should scale up or down based on QP.
const auto avg_qp_down = average_qp_.GetAverage(num_samples_downscale_);
if (avg_qp_down && *avg_qp_down > high_qp_threshold_) {
ScaleDown();
const rtc::Optional<int> avg_qp = average_qp_.GetAverage();
if (avg_qp && *avg_qp > thresholds_.high) {
ReportQPHigh();
return;
}
const auto avg_qp_up = average_qp_.GetAverage(num_samples_upscale_);
if (avg_qp_up && *avg_qp_up <= low_qp_threshold_) {
if (avg_qp && *avg_qp <= thresholds_.low) {
// QP has been low. We want to try a higher resolution.
ScaleUp();
ReportQPLow();
return;
}
}
void QualityScaler::ScaleUp() {
downscale_shift_ = std::max(0, downscale_shift_ - 1);
void QualityScaler::ReportQPLow() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
LOG(LS_INFO) << "QP has been low, asking for higher resolution.";
ClearSamples();
observer_->ScaleUp(scale_reason_);
}
void QualityScaler::ScaleDown() {
downscale_shift_ = std::min(maximum_shift_, downscale_shift_ + 1);
void QualityScaler::ReportQPHigh() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
LOG(LS_INFO) << "QP has been high , asking for lower resolution.";
ClearSamples();
observer_->ScaleDown(scale_reason_);
// If we've scaled down, wait longer before scaling up again.
if (fast_rampup_) {
fast_rampup_ = false;
num_samples_upscale_ = (num_samples_upscale_ / kMeasureSecondsFastUpscale) *
kMeasureSecondsUpscale;
}
}
QualityScaler::Resolution QualityScaler::GetScaledResolution() const {
const int frame_width = target_res_.width >> downscale_shift_;
const int frame_height = target_res_.height >> downscale_shift_;
return Resolution{frame_width, frame_height};
}
rtc::scoped_refptr<VideoFrameBuffer> QualityScaler::GetScaledBuffer(
const rtc::scoped_refptr<VideoFrameBuffer>& frame) {
Resolution res = GetScaledResolution();
const int src_width = frame->width();
const int src_height = frame->height();
if (res.width == src_width && res.height == src_height)
return frame;
rtc::scoped_refptr<I420Buffer> scaled_buffer =
pool_.CreateBuffer(res.width, res.height);
scaled_buffer->ScaleFrom(*frame);
return scaled_buffer;
}
void QualityScaler::UpdateTargetResolution(int width, int height) {
if (width < kMinDownscaleDimension || height < kMinDownscaleDimension) {
maximum_shift_ = 0;
} else {
maximum_shift_ = static_cast<int>(
log2(std::min(width, height) / kMinDownscaleDimension));
}
target_res_ = Resolution{width, height};
}
void QualityScaler::ClearSamples() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
framedrop_percent_.Reset();
average_qp_.Reset();
}
} // namespace webrtc

View File

@ -11,61 +11,73 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
#include <utility>
#include "webrtc/common_types.h"
#include "webrtc/common_video/include/i420_buffer_pool.h"
#include "webrtc/video_encoder.h"
#include "webrtc/base/optional.h"
#include "webrtc/base/sequenced_task_checker.h"
#include "webrtc/modules/video_coding/utility/moving_average.h"
namespace webrtc {
class QualityScaler {
// An interface for a class that receives scale up/down requests.
class ScalingObserverInterface {
public:
struct Resolution {
int width;
int height;
};
enum ScaleReason : size_t { kQuality = 0, kCpu = 1 };
static const size_t kScaleReasonSize = 2;
// Called to signal that we can handle larger frames.
virtual void ScaleUp(ScaleReason reason) = 0;
// Called to signal that encoder to scale down.
virtual void ScaleDown(ScaleReason reason) = 0;
QualityScaler();
void Init(VideoCodecType codec_type,
int initial_bitrate_kbps,
int width,
int height,
int fps);
void Init(int low_qp_threshold,
int high_qp_threshold,
int initial_bitrate_kbps,
int width,
int height,
int fps);
void ReportFramerate(int framerate);
void ReportQP(int qp);
void ReportDroppedFrame();
void OnEncodeFrame(int width, int height);
Resolution GetScaledResolution() const;
rtc::scoped_refptr<VideoFrameBuffer> GetScaledBuffer(
const rtc::scoped_refptr<VideoFrameBuffer>& frame);
int downscale_shift() const { return downscale_shift_; }
private:
void ClearSamples();
void ScaleUp();
void ScaleDown();
void UpdateTargetResolution(int width, int height);
I420BufferPool pool_;
size_t num_samples_downscale_;
size_t num_samples_upscale_;
bool fast_rampup_;
MovingAverage average_qp_;
MovingAverage framedrop_percent_;
int low_qp_threshold_;
int high_qp_threshold_;
Resolution target_res_;
int downscale_shift_;
int maximum_shift_;
protected:
virtual ~ScalingObserverInterface() {}
};
// QualityScaler runs asynchronously and monitors QP values of encoded frames.
// It holds a reference to a ScalingObserverInterface implementation to signal
// an intent to scale up or down.
class QualityScaler {
public:
// Construct a QualityScaler with a given |observer|.
// This starts the quality scaler periodically checking what the average QP
// has been recently.
QualityScaler(ScalingObserverInterface* observer, VideoCodecType codec_type);
// If specific thresholds are desired these can be supplied as |thresholds|.
QualityScaler(ScalingObserverInterface* observer,
VideoEncoder::QpThresholds thresholds);
virtual ~QualityScaler();
// Should be called each time the encoder drops a frame
void ReportDroppedFrame();
// Inform the QualityScaler of the last seen QP.
void ReportQP(int qp);
// The following members declared protected for testing purposes
protected:
QualityScaler(ScalingObserverInterface* observer,
VideoEncoder::QpThresholds thresholds,
int64_t sampling_period);
private:
class CheckQPTask;
void CheckQP();
void ClearSamples();
void ReportQPLow();
void ReportQPHigh();
int64_t GetSamplingPeriodMs() const;
CheckQPTask* check_qp_task_ GUARDED_BY(&task_checker_);
ScalingObserverInterface* const observer_ GUARDED_BY(&task_checker_);
rtc::SequencedTaskChecker task_checker_;
const int64_t sampling_period_ms_;
bool fast_rampup_ GUARDED_BY(&task_checker_);
MovingAverage average_qp_ GUARDED_BY(&task_checker_);
MovingAverage framedrop_percent_ GUARDED_BY(&task_checker_);
VideoEncoder::QpThresholds thresholds_ GUARDED_BY(&task_checker_);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_

View File

@ -10,28 +10,49 @@
#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include <memory>
#include "webrtc/base/event.h"
#include "webrtc/base/task_queue.h"
#include "webrtc/test/gmock.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace {
static const int kNumSeconds = 10;
static const int kWidth = 1920;
static const int kHeight = 1080;
static const int kFramerate = 30;
static const int kLowQp = 15;
static const int kNormalQp = 30;
static const int kLowQpThreshold = 18;
static const int kHighQp = 40;
static const int kDisabledBadQpThreshold = 64;
static const int kLowInitialBitrateKbps = 300;
// These values need to be in sync with corresponding constants
// in quality_scaler.cc
static const int kMeasureSecondsFastUpscale = 2;
static const int kMeasureSecondsUpscale = 5;
static const int kMeasureSecondsDownscale = 5;
static const int kMinDownscaleDimension = 140;
static const size_t kDefaultTimeoutMs = 1000;
} // namespace
class MockScaleObserver : public ScalingObserverInterface {
public:
MockScaleObserver() : event(false, false) {}
virtual ~MockScaleObserver() {}
void ScaleUp(ScaleReason r) override {
scaled_up++;
event.Set();
}
void ScaleDown(ScaleReason r) override {
scaled_down++;
event.Set();
}
rtc::Event event;
int scaled_up = 0;
int scaled_down = 0;
};
// Pass a lower sampling period to speed up the tests.
class QualityScalerUnderTest : public QualityScaler {
public:
explicit QualityScalerUnderTest(ScalingObserverInterface* observer,
VideoEncoder::QpThresholds thresholds)
: QualityScaler(observer, thresholds, 5) {}
};
class QualityScalerTest : public ::testing::Test {
protected:
enum ScaleDirection {
@ -41,346 +62,116 @@ class QualityScalerTest : public ::testing::Test {
kScaleUp
};
QualityScalerTest() {
input_frame_ = I420Buffer::Create(kWidth, kHeight);
qs_.Init(kLowQpThreshold, kHighQp, 0, kWidth, kHeight, kFramerate);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScalerTest()
: q_(new rtc::TaskQueue("QualityScalerTestQueue")),
observer_(new MockScaleObserver()) {
rtc::Event event(false, false);
q_->PostTask([this, &event] {
qs_ = std::unique_ptr<QualityScaler>(new QualityScalerUnderTest(
observer_.get(),
VideoEncoder::QpThresholds(kLowQpThreshold, kHighQp)));
event.Set();
});
EXPECT_TRUE(event.Wait(kDefaultTimeoutMs));
}
bool TriggerScale(ScaleDirection scale_direction) {
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
int initial_width = qs_.GetScaledResolution().width;
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
~QualityScalerTest() {
rtc::Event event(false, false);
q_->PostTask([this, &event] {
qs_.reset(nullptr);
event.Set();
});
EXPECT_TRUE(event.Wait(kDefaultTimeoutMs));
}
void TriggerScale(ScaleDirection scale_direction) {
for (int i = 0; i < kFramerate * 5; ++i) {
switch (scale_direction) {
case kScaleUp:
qs_.ReportQP(kLowQp);
qs_->ReportQP(kLowQp);
break;
case kScaleDown:
qs_.ReportDroppedFrame();
qs_->ReportDroppedFrame();
break;
case kKeepScaleAtHighQp:
qs_.ReportQP(kHighQp);
qs_->ReportQP(kHighQp);
break;
case kScaleDownAboveHighQp:
qs_.ReportQP(kHighQp + 1);
qs_->ReportQP(kHighQp + 1);
break;
}
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
if (qs_.GetScaledResolution().width != initial_width)
return true;
}
return false;
}
void ExpectOriginalFrame() {
EXPECT_EQ(input_frame_, qs_.GetScaledBuffer(input_frame_))
<< "Using scaled frame instead of original input.";
}
void ExpectScaleUsingReportedResolution() {
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution res = qs_.GetScaledResolution();
rtc::scoped_refptr<VideoFrameBuffer> scaled_frame =
qs_.GetScaledBuffer(input_frame_);
EXPECT_EQ(res.width, scaled_frame->width());
EXPECT_EQ(res.height, scaled_frame->height());
}
void ContinuouslyDownscalesByHalfDimensionsAndBackUp();
void DoesNotDownscaleFrameDimensions(int width, int height);
void DownscaleEndsAt(int input_width,
int input_height,
int end_width,
int end_height);
QualityScaler qs_;
rtc::scoped_refptr<VideoFrameBuffer> input_frame_;
std::unique_ptr<rtc::TaskQueue> q_;
std::unique_ptr<QualityScaler> qs_;
std::unique_ptr<MockScaleObserver> observer_;
};
TEST_F(QualityScalerTest, UsesOriginalFrameInitially) {
ExpectOriginalFrame();
}
TEST_F(QualityScalerTest, ReportsOriginalResolutionInitially) {
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_EQ(input_frame_->width(), res.width);
EXPECT_EQ(input_frame_->height(), res.height);
}
TEST_F(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within " << kNumSeconds
<< " seconds.";
QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_LT(res.width, input_frame_->width());
EXPECT_LT(res.height, input_frame_->height());
q_->PostTask([this] { TriggerScale(kScaleDown); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
}
TEST_F(QualityScalerTest, KeepsScaleAtHighQp) {
EXPECT_FALSE(TriggerScale(kKeepScaleAtHighQp))
<< "Downscale at high threshold which should keep scale.";
QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_EQ(res.width, input_frame_->width());
EXPECT_EQ(res.height, input_frame_->height());
q_->PostTask([this] { TriggerScale(kKeepScaleAtHighQp); });
EXPECT_FALSE(observer_->event.Wait(50));
EXPECT_EQ(0, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DownscalesAboveHighQp) {
EXPECT_TRUE(TriggerScale(kScaleDownAboveHighQp))
<< "No downscale within " << kNumSeconds << " seconds.";
QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_LT(res.width, input_frame_->width());
EXPECT_LT(res.height, input_frame_->height());
q_->PostTask([this] { TriggerScale(kScaleDownAboveHighQp); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
for (int i = 0; i < kFramerate * kNumSeconds / 3; ++i) {
qs_.ReportQP(kNormalQp);
qs_.ReportDroppedFrame();
qs_.ReportDroppedFrame();
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
if (qs_.GetScaledResolution().width < input_frame_->width())
return;
}
FAIL() << "No downscale within " << kNumSeconds << " seconds.";
q_->PostTask([this] {
qs_->ReportDroppedFrame();
qs_->ReportDroppedFrame();
qs_->ReportQP(kHighQp);
});
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportQP(kNormalQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
}
q_->PostTask([this] { TriggerScale(kScaleDownAboveHighQp); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
for (int i = 0; i < kFramerate * kNumSeconds / 2; ++i) {
qs_.ReportQP(kNormalQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
qs_.ReportDroppedFrame();
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
}
q_->PostTask([this] {
qs_->ReportDroppedFrame();
qs_->ReportQP(kHighQp);
});
EXPECT_FALSE(observer_->event.Wait(50));
EXPECT_EQ(0, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
}
void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
const int initial_min_dimension =
input_frame_->width() < input_frame_->height() ? input_frame_->width()
: input_frame_->height();
int min_dimension = initial_min_dimension;
int current_shift = 0;
// Drop all frames to force-trigger downscaling.
while (min_dimension >= 2 * kMinDownscaleDimension) {
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within "
<< kNumSeconds << " seconds.";
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution res = qs_.GetScaledResolution();
min_dimension = res.width < res.height ? res.width : res.height;
++current_shift;
ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
ExpectScaleUsingReportedResolution();
}
// Make sure we can scale back with good-quality frames.
while (min_dimension < initial_min_dimension) {
EXPECT_TRUE(TriggerScale(kScaleUp)) << "No upscale within " << kNumSeconds
<< " seconds.";
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution res = qs_.GetScaledResolution();
min_dimension = res.width < res.height ? res.width : res.height;
--current_shift;
ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
ExpectScaleUsingReportedResolution();
}
// Verify we don't start upscaling after further low use.
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportQP(kLowQp);
ExpectOriginalFrame();
}
TEST_F(QualityScalerTest, UpscalesAfterLowQp) {
q_->PostTask([this] { TriggerScale(kScaleUp); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(0, observer_->scaled_down);
EXPECT_EQ(1, observer_->scaled_up);
}
TEST_F(QualityScalerTest, ContinuouslyDownscalesByHalfDimensionsAndBackUp) {
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
}
TEST_F(QualityScalerTest,
ContinuouslyDownscalesOddResolutionsByHalfDimensionsAndBackUp) {
const int kOddWidth = 517;
const int kOddHeight = 1239;
input_frame_ = I420Buffer::Create(kOddWidth, kOddHeight);
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
}
void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
input_frame_ = I420Buffer::Create(width, height);
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportDroppedFrame();
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
<< "Unexpected scale of minimal-size frame.";
}
}
TEST_F(QualityScalerTest, DoesNotDownscaleFrom1PxWidth) {
DoesNotDownscaleFrameDimensions(1, kHeight);
}
TEST_F(QualityScalerTest, DoesNotDownscaleFrom1PxHeight) {
DoesNotDownscaleFrameDimensions(kWidth, 1);
}
TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
DoesNotDownscaleFrameDimensions(1, 1);
}
TEST_F(QualityScalerTest, DoesNotDownscaleBelow2xDefaultMinDimensionsWidth) {
DoesNotDownscaleFrameDimensions(
2 * kMinDownscaleDimension - 1, 1000);
}
TEST_F(QualityScalerTest, DoesNotDownscaleBelow2xDefaultMinDimensionsHeight) {
DoesNotDownscaleFrameDimensions(
1000, 2 * kMinDownscaleDimension - 1);
}
TEST_F(QualityScalerTest, DownscaleToVgaOnLowInitialBitrate) {
static const int kWidth720p = 1280;
static const int kHeight720p = 720;
static const int kInitialBitrateKbps = 300;
input_frame_ = I420Buffer::Create(kWidth720p, kHeight720p);
qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
kWidth720p, kHeight720p, kFramerate);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
int init_width = qs_.GetScaledResolution().width;
int init_height = qs_.GetScaledResolution().height;
EXPECT_EQ(640, init_width);
EXPECT_EQ(360, init_height);
}
TEST_F(QualityScalerTest, DownscaleToQvgaOnLowerInitialBitrate) {
static const int kWidth720p = 1280;
static const int kHeight720p = 720;
static const int kInitialBitrateKbps = 200;
input_frame_ = I420Buffer::Create(kWidth720p, kHeight720p);
qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
kWidth720p, kHeight720p, kFramerate);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
int init_width = qs_.GetScaledResolution().width;
int init_height = qs_.GetScaledResolution().height;
EXPECT_EQ(320, init_width);
EXPECT_EQ(180, init_height);
}
TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
qs_.Init(kLowQpThreshold, kHighQp, 0, kWidth, kHeight, kFramerate);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
// Should not downscale if less than kMeasureSecondsDownscale seconds passed.
for (int i = 0; i < kFramerate * kMeasureSecondsDownscale - 1; ++i) {
qs_.ReportQP(kHighQp + 1);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
}
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
// Should downscale if more than kMeasureSecondsDownscale seconds passed (add
// last frame).
qs_.ReportQP(kHighQp + 1);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
// Should not upscale if less than kMeasureSecondsUpscale seconds passed since
// we saw issues initially (have already gone down).
for (int i = 0; i < kFramerate * kMeasureSecondsUpscale - 1; ++i) {
qs_.ReportQP(kLowQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
}
EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
// Should upscale (back to initial) if kMeasureSecondsUpscale seconds passed
// (add last frame).
qs_.ReportQP(kLowQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
}
TEST_F(QualityScalerTest, UpscaleQuicklyInitiallyAfterMeasuredSeconds) {
qs_.Init(kLowQpThreshold, kHighQp, kLowInitialBitrateKbps, kWidth, kHeight,
kFramerate);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
// Should not upscale if less than kMeasureSecondsFastUpscale seconds passed.
for (int i = 0; i < kFramerate * kMeasureSecondsFastUpscale - 1; ++i) {
qs_.ReportQP(kLowQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
}
EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
// Should upscale if kMeasureSecondsFastUpscale seconds passed (add last
// frame).
qs_.ReportQP(kLowQp);
qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
EXPECT_LT(initial_res.width, qs_.GetScaledResolution().width);
EXPECT_LT(initial_res.height, qs_.GetScaledResolution().height);
}
void QualityScalerTest::DownscaleEndsAt(int input_width,
int input_height,
int end_width,
int end_height) {
// Create a frame with 2x expected end width/height to verify that we can
// scale down to expected end width/height.
input_frame_ = I420Buffer::Create(input_width, input_height);
int last_width = input_width;
int last_height = input_height;
// Drop all frames to force-trigger downscaling.
while (true) {
TriggerScale(kScaleDown);
QualityScaler::Resolution res = qs_.GetScaledResolution();
if (last_width == res.width) {
EXPECT_EQ(last_height, res.height);
EXPECT_EQ(end_width, res.width);
EXPECT_EQ(end_height, res.height);
break;
}
last_width = res.width;
last_height = res.height;
}
}
TEST_F(QualityScalerTest, DownscalesTo320x180) {
DownscaleEndsAt(640, 360, 320, 180);
}
TEST_F(QualityScalerTest, DownscalesTo180x320) {
DownscaleEndsAt(360, 640, 180, 320);
}
TEST_F(QualityScalerTest, DownscalesFrom1280x720To320x180) {
DownscaleEndsAt(1280, 720, 320, 180);
}
TEST_F(QualityScalerTest, DoesntDownscaleInitialQvga) {
DownscaleEndsAt(320, 180, 320, 180);
TEST_F(QualityScalerTest, ScalesDownAndBackUp) {
q_->PostTask([this] { TriggerScale(kScaleDown); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
EXPECT_EQ(0, observer_->scaled_up);
q_->PostTask([this] { TriggerScale(kScaleUp); });
EXPECT_TRUE(observer_->event.Wait(50));
EXPECT_EQ(1, observer_->scaled_down);
EXPECT_EQ(1, observer_->scaled_up);
}
} // namespace webrtc

View File

@ -115,6 +115,7 @@ class VideoSender : public Module {
VCMGenericEncoder* _encoder;
media_optimization::MediaOptimization _mediaOpt;
VCMEncodedFrameCallback _encodedFrameCallback GUARDED_BY(encoder_crit_);
EncodedImageCallback* const post_encode_callback_;
VCMSendStatisticsCallback* const send_stats_callback_;
VCMCodecDataBase _codecDataBase GUARDED_BY(encoder_crit_);
bool frame_dropper_enabled_ GUARDED_BY(encoder_crit_);

View File

@ -34,6 +34,7 @@ VideoSender::VideoSender(Clock* clock,
_encoder(nullptr),
_mediaOpt(clock_),
_encodedFrameCallback(post_encode_callback, &_mediaOpt),
post_encode_callback_(post_encode_callback),
send_stats_callback_(send_stats_callback),
_codecDataBase(&_encodedFrameCallback),
frame_dropper_enabled_(true),
@ -310,7 +311,7 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
<< " loss rate " << encoder_params.loss_rate << " rtt "
<< encoder_params.rtt << " input frame rate "
<< encoder_params.input_frame_rate;
_encoder->OnDroppedFrame();
post_encode_callback_->OnDroppedFrame();
return VCM_OK;
}
// TODO(pbos): Make sure setting send codec is synchronized with video