Remove remaining quality-analysis (QM).

This was never turned on, contains a lot of complexity and somehow
manages triggering a bug in a downstream project.

BUG=webrtc:5066
R=marpan@webrtc.org
TBR=mflodman@webrtc.org

Review URL: https://codereview.webrtc.org/1917323002 .

Cr-Commit-Position: refs/heads/master@{#12692}
This commit is contained in:
Peter Boström
2016-05-12 03:01:31 +02:00
parent 919288f6ba
commit ad6fc5a05c
34 changed files with 30 additions and 3991 deletions

View File

@ -14,8 +14,6 @@ source_set("video_coding") {
"codec_database.h",
"codec_timer.cc",
"codec_timer.h",
"content_metrics_processing.cc",
"content_metrics_processing.h",
"decoding_state.cc",
"decoding_state.h",
"encoded_frame.cc",
@ -54,9 +52,6 @@ source_set("video_coding") {
"packet_buffer.h",
"percentile_filter.cc",
"percentile_filter.h",
"qm_select.cc",
"qm_select.h",
"qm_select_data.h",
"receiver.cc",
"receiver.h",
"rtt_filter.cc",

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/content_metrics_processing.h"
#include <math.h>
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
//////////////////////////////////
/// VCMContentMetricsProcessing //
//////////////////////////////////
VCMContentMetricsProcessing::VCMContentMetricsProcessing()
: recursive_avg_factor_(1 / 150.0f), // matched to 30fps.
frame_cnt_uniform_avg_(0),
avg_motion_level_(0.0f),
avg_spatial_level_(0.0f) {
recursive_avg_ = new VideoContentMetrics();
uniform_avg_ = new VideoContentMetrics();
}
VCMContentMetricsProcessing::~VCMContentMetricsProcessing() {
delete recursive_avg_;
delete uniform_avg_;
}
int VCMContentMetricsProcessing::Reset() {
recursive_avg_->Reset();
uniform_avg_->Reset();
frame_cnt_uniform_avg_ = 0;
avg_motion_level_ = 0.0f;
avg_spatial_level_ = 0.0f;
return VCM_OK;
}
void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
if (frameRate == 0)
frameRate = 1;
// Update factor for recursive averaging.
recursive_avg_factor_ = static_cast<float>(1000.0f) /
static_cast<float>(frameRate * kQmMinIntervalMs);
}
VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
return recursive_avg_;
}
VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
if (frame_cnt_uniform_avg_ == 0) {
return NULL;
}
// Two metrics are used: motion and spatial level.
uniform_avg_->motion_magnitude =
avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
uniform_avg_->spatial_pred_err =
avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
return uniform_avg_;
}
void VCMContentMetricsProcessing::ResetShortTermAvgData() {
// Reset.
avg_motion_level_ = 0.0f;
avg_spatial_level_ = 0.0f;
frame_cnt_uniform_avg_ = 0;
}
int VCMContentMetricsProcessing::UpdateContentData(
const VideoContentMetrics* contentMetrics) {
if (contentMetrics == NULL) {
return VCM_OK;
}
return ProcessContent(contentMetrics);
}
int VCMContentMetricsProcessing::ProcessContent(
const VideoContentMetrics* contentMetrics) {
// Update the recursive averaged metrics: average is over longer window
// of time: over QmMinIntervalMs ms.
UpdateRecursiveAvg(contentMetrics);
// Update the uniform averaged metrics: average is over shorter window
// of time: based on ~RTCP reports.
UpdateUniformAvg(contentMetrics);
return VCM_OK;
}
void VCMContentMetricsProcessing::UpdateUniformAvg(
const VideoContentMetrics* contentMetrics) {
// Update frame counter.
frame_cnt_uniform_avg_ += 1;
// Update averaged metrics: motion and spatial level are used.
avg_motion_level_ += contentMetrics->motion_magnitude;
avg_spatial_level_ += contentMetrics->spatial_pred_err;
return;
}
void VCMContentMetricsProcessing::UpdateRecursiveAvg(
const VideoContentMetrics* contentMetrics) {
// Spatial metrics: 2x2, 1x2(H), 2x1(V).
recursive_avg_->spatial_pred_err =
(1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
recursive_avg_factor_ * contentMetrics->spatial_pred_err;
recursive_avg_->spatial_pred_err_h =
(1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
recursive_avg_->spatial_pred_err_v =
(1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
// Motion metric: Derived from NFD (normalized frame difference).
recursive_avg_->motion_magnitude =
(1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
recursive_avg_factor_ * contentMetrics->motion_magnitude;
}
} // namespace webrtc

View File

@ -1,72 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
#include "webrtc/typedefs.h"
namespace webrtc {
struct VideoContentMetrics;
// QM interval time (in ms)
enum { kQmMinIntervalMs = 10000 };
// Flag for NFD metric vs motion metric
enum { kNfdMetric = 1 };
/**********************************/
/* Content Metrics Processing */
/**********************************/
class VCMContentMetricsProcessing {
public:
VCMContentMetricsProcessing();
~VCMContentMetricsProcessing();
// Update class with latest metrics.
int UpdateContentData(const VideoContentMetrics* contentMetrics);
// Reset the short-term averaged content data.
void ResetShortTermAvgData();
// Initialize.
int Reset();
// Inform class of current frame rate.
void UpdateFrameRate(uint32_t frameRate);
// Returns the long-term averaged content data: recursive average over longer
// time scale.
VideoContentMetrics* LongTermAvgData();
// Returns the short-term averaged content data: uniform average over
// shorter time scalE.
VideoContentMetrics* ShortTermAvgData();
private:
// Compute working average.
int ProcessContent(const VideoContentMetrics* contentMetrics);
// Update the recursive averaged metrics: longer time average (~5/10 secs).
void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
// Update the uniform averaged metrics: shorter time average (~RTCP report).
void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
VideoContentMetrics* recursive_avg_;
VideoContentMetrics* uniform_avg_;
float recursive_avg_factor_;
uint32_t frame_cnt_uniform_avg_;
float avg_motion_level_;
float avg_spatial_level_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_

View File

@ -31,6 +31,10 @@ namespace webrtc {
class Clock;
class EncodedImageCallback;
// TODO(pbos): Remove VCMQMSettingsCallback completely. This might be done by
// removing the VCM and use VideoSender/VideoReceiver as a public interface
// directly.
class VCMQMSettingsCallback;
class VideoEncoder;
class VideoDecoder;
struct CodecSpecificInfo;
@ -223,7 +227,6 @@ class VideoCodingModule : public Module {
// < 0, on error.
virtual int32_t AddVideoFrame(
const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics = NULL,
const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
// Next frame encoded should be an intra frame (keyframe).

View File

@ -176,18 +176,6 @@ class KeyFrameRequestSender {
virtual ~KeyFrameRequestSender() {}
};
// Callback used to inform the user of the the desired resolution
// as subscribed by Media Optimization (Quality Modes)
class VCMQMSettingsCallback {
public:
virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
const uint32_t width,
const uint32_t height) = 0;
protected:
virtual ~VCMQMSettingsCallback() {}
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_

View File

@ -18,7 +18,6 @@
#include "webrtc/base/exp_filter.h"
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/qm_select.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/typedefs.h"

View File

@ -11,8 +11,6 @@
#include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/video_coding/content_metrics_processing.h"
#include "webrtc/modules/video_coding/qm_select.h"
#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/system_wrappers/include/clock.h"
@ -81,16 +79,11 @@ MediaOptimization::MediaOptimization(Clock* clock)
max_payload_size_(1460),
video_target_bitrate_(0),
incoming_frame_rate_(0),
enable_qm_(false),
encoded_frame_samples_(),
avg_sent_bit_rate_bps_(0),
avg_sent_framerate_(0),
key_frame_cnt_(0),
delta_frame_cnt_(0),
content_(new VCMContentMetricsProcessing()),
qm_resolution_(new VCMQmResolution()),
last_qm_update_time_(0),
last_change_time_(0),
num_layers_(0),
suspension_enabled_(false),
video_suspended_(false),
@ -113,8 +106,6 @@ void MediaOptimization::Reset() {
frame_dropper_->Reset();
loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
frame_dropper_->SetRates(0, 0);
content_->Reset();
qm_resolution_->Reset();
loss_prot_logic_->UpdateFrameRate(incoming_frame_rate_);
loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
send_statistics_zero_encode_ = 0;
@ -124,8 +115,6 @@ void MediaOptimization::Reset() {
user_frame_rate_ = 0;
key_frame_cnt_ = 0;
delta_frame_cnt_ = 0;
last_qm_update_time_ = 0;
last_change_time_ = 0;
encoded_frame_samples_.clear();
avg_sent_bit_rate_bps_ = 0;
num_layers_ = 1;
@ -153,12 +142,7 @@ void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
int num_layers,
int32_t mtu) {
// Everything codec specific should be reset here since this means the codec
// has changed. If native dimension values have changed, then either user
// initiated change, or QM initiated change. Will be able to determine only
// after the processing of the first frame.
last_change_time_ = clock_->TimeInMilliseconds();
content_->Reset();
content_->UpdateFrameRate(frame_rate);
// has changed.
max_bit_rate_ = max_bit_rate;
send_codec_type_ = send_codec_type;
@ -175,16 +159,13 @@ void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
codec_height_ = height;
num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero.
max_payload_size_ = mtu;
qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
codec_width_, codec_height_, num_layers_);
}
uint32_t MediaOptimization::SetTargetRates(
uint32_t target_bitrate,
uint8_t fraction_lost,
int64_t round_trip_time_ms,
VCMProtectionCallback* protection_callback,
VCMQMSettingsCallback* qmsettings_callback) {
VCMProtectionCallback* protection_callback) {
CriticalSectionScoped lock(crit_sect_.get());
VCMProtectionMethod* selected_method = loss_prot_logic_->SelectedMethod();
float target_bitrate_kbps = static_cast<float>(target_bitrate) / 1000.0f;
@ -220,7 +201,6 @@ uint32_t MediaOptimization::SetTargetRates(
float protection_overhead_rate = 0.0f;
// Update protection settings, when applicable.
float sent_video_rate_kbps = 0.0f;
if (loss_prot_logic_->SelectedType() != kNone) {
// Update method will compute the robustness settings for the given
// protection method and the overhead cost
@ -255,7 +235,6 @@ uint32_t MediaOptimization::SetTargetRates(
// Get the effective packet loss for encoder ER when applicable. Should be
// passed to encoder via fraction_lost.
packet_loss_enc = selected_method->RequiredPacketLossER();
sent_video_rate_kbps = static_cast<float>(sent_video_rate_bps) / 1000.0f;
}
// Source coding rate: total rate - protection overhead.
@ -271,19 +250,6 @@ uint32_t MediaOptimization::SetTargetRates(
static_cast<float>(video_target_bitrate_) / 1000.0f;
frame_dropper_->SetRates(target_video_bitrate_kbps, incoming_frame_rate_);
if (enable_qm_ && qmsettings_callback) {
// Update QM with rates.
qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
incoming_frame_rate_, fraction_lost_);
// Check for QM selection.
bool select_qm = CheckStatusForQMchange();
if (select_qm) {
SelectQuality(qmsettings_callback);
}
// Reset the short-term averaged content data.
content_->ResetShortTermAvgData();
}
CheckSuspendConditions();
return video_target_bitrate_;
@ -357,11 +323,6 @@ int32_t MediaOptimization::UpdateWithEncodedData(
loss_prot_logic_->UpdatePacketsPerFrameKey(
min_packets_per_frame, clock_->TimeInMilliseconds());
}
if (enable_qm_) {
// Update quality select with encoded length.
qm_resolution_->UpdateEncodedSize(encoded_length);
}
}
if (!delta_frame && encoded_length > 0) {
loss_prot_logic_->UpdateKeyFrameSize(static_cast<float>(encoded_length));
@ -378,11 +339,6 @@ int32_t MediaOptimization::UpdateWithEncodedData(
return VCM_OK;
}
void MediaOptimization::EnableQM(bool enable) {
CriticalSectionScoped lock(crit_sect_.get());
enable_qm_ = enable;
}
void MediaOptimization::EnableFrameDropper(bool enable) {
CriticalSectionScoped lock(crit_sect_.get());
frame_dropper_->Enable(enable);
@ -414,19 +370,6 @@ bool MediaOptimization::DropFrame() {
return frame_dropper_->DropFrame();
}
void MediaOptimization::UpdateContentData(
const VideoContentMetrics* content_metrics) {
CriticalSectionScoped lock(crit_sect_.get());
// Updating content metrics.
if (content_metrics == NULL) {
// Disable QM if metrics are NULL.
enable_qm_ = false;
qm_resolution_->Reset();
} else {
content_->UpdateContentData(content_metrics);
}
}
void MediaOptimization::UpdateIncomingFrameRate() {
int64_t now = clock_->TimeInMilliseconds();
if (incoming_frame_times_[0] == 0) {
@ -441,36 +384,6 @@ void MediaOptimization::UpdateIncomingFrameRate() {
ProcessIncomingFrameRate(now);
}
int32_t MediaOptimization::SelectQuality(
VCMQMSettingsCallback* video_qmsettings_callback) {
// Reset quantities for QM select.
qm_resolution_->ResetQM();
// Update QM will long-term averaged content metrics.
qm_resolution_->UpdateContent(content_->LongTermAvgData());
// Select quality mode.
VCMResolutionScale* qm = NULL;
int32_t ret = qm_resolution_->SelectResolution(&qm);
if (ret < 0) {
return ret;
}
// Check for updates to spatial/temporal modes.
QMUpdate(qm, video_qmsettings_callback);
// Reset all the rate and related frame counters quantities.
qm_resolution_->ResetRates();
// Reset counters.
last_qm_update_time_ = clock_->TimeInMilliseconds();
// Reset content metrics.
content_->Reset();
return VCM_OK;
}
void MediaOptimization::PurgeOldFrameSamples(int64_t now_ms) {
while (!encoded_frame_samples_.empty()) {
if (now_ms - encoded_frame_samples_.front().time_complete_ms >
@ -517,65 +430,6 @@ void MediaOptimization::UpdateSentFramerate() {
}
}
bool MediaOptimization::QMUpdate(
VCMResolutionScale* qm,
VCMQMSettingsCallback* video_qmsettings_callback) {
// Check for no change.
if (!qm->change_resolution_spatial && !qm->change_resolution_temporal) {
return false;
}
// Check for change in frame rate.
if (qm->change_resolution_temporal) {
incoming_frame_rate_ = qm->frame_rate;
// Reset frame rate estimate.
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
}
// Check for change in frame size.
if (qm->change_resolution_spatial) {
codec_width_ = qm->codec_width;
codec_height_ = qm->codec_height;
}
LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
"to "
<< qm->codec_width << "x" << qm->codec_height << "@"
<< qm->frame_rate;
// Update VPM with new target frame rate and frame size.
// Note: use |qm->frame_rate| instead of |_incoming_frame_rate| for updating
// target frame rate in VPM frame dropper. The quantity |_incoming_frame_rate|
// will vary/fluctuate, and since we don't want to change the state of the
// VPM frame dropper, unless a temporal action was selected, we use the
// quantity |qm->frame_rate| for updating.
video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
codec_height_);
content_->UpdateFrameRate(qm->frame_rate);
qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
codec_height_);
return true;
}
// Check timing constraints and look for significant change in:
// (1) scene content,
// (2) target bit rate.
bool MediaOptimization::CheckStatusForQMchange() {
bool status = true;
// Check that we do not call QMSelect too often, and that we waited some time
// (to sample the metrics) from the event last_change_time
// last_change_time is the time where user changed the size/rate/frame rate
// (via SetEncodingData).
int64_t now = clock_->TimeInMilliseconds();
if ((now - last_qm_update_time_) < kQmMinIntervalMs ||
(now - last_change_time_) < kQmMinIntervalMs) {
status = false;
}
return status;
}
// Allowing VCM to keep track of incoming frame rate.
void MediaOptimization::ProcessIncomingFrameRate(int64_t now) {
int32_t num = 0;

View File

@ -17,7 +17,6 @@
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/modules/video_coding/media_opt_util.h"
#include "webrtc/modules/video_coding/qm_select.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -59,11 +58,9 @@ class MediaOptimization {
uint32_t SetTargetRates(uint32_t target_bitrate,
uint8_t fraction_lost,
int64_t round_trip_time_ms,
VCMProtectionCallback* protection_callback,
VCMQMSettingsCallback* qmsettings_callback);
VCMProtectionCallback* protection_callback);
void SetProtectionMethod(VCMProtectionMethodEnum method);
void EnableQM(bool enable);
void EnableFrameDropper(bool enable);
// Lets the sender suspend video when the rate drops below
@ -74,8 +71,6 @@ class MediaOptimization {
bool DropFrame();
void UpdateContentData(const VideoContentMetrics* content_metrics);
// Informs Media Optimization of encoded output.
int32_t UpdateWithEncodedData(const EncodedImage& encoded_image);
@ -98,19 +93,6 @@ class MediaOptimization {
void UpdateSentBitrate(int64_t now_ms) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
void UpdateSentFramerate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Computes new Quality Mode.
int32_t SelectQuality(VCMQMSettingsCallback* qmsettings_callback)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Verifies if QM settings differ from default, i.e. if an update is required.
// Computes actual values, as will be sent to the encoder.
bool QMUpdate(VCMResolutionScale* qm,
VCMQMSettingsCallback* qmsettings_callback)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Checks if we should make a QM change. Return true if yes, false otherwise.
bool CheckStatusForQMchange() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
void ProcessIncomingFrameRate(int64_t now)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
@ -152,16 +134,11 @@ class MediaOptimization {
int video_target_bitrate_ GUARDED_BY(crit_sect_);
float incoming_frame_rate_ GUARDED_BY(crit_sect_);
int64_t incoming_frame_times_[kFrameCountHistorySize] GUARDED_BY(crit_sect_);
bool enable_qm_ GUARDED_BY(crit_sect_);
std::list<EncodedFrameSample> encoded_frame_samples_ GUARDED_BY(crit_sect_);
uint32_t avg_sent_bit_rate_bps_ GUARDED_BY(crit_sect_);
uint32_t avg_sent_framerate_ GUARDED_BY(crit_sect_);
uint32_t key_frame_cnt_ GUARDED_BY(crit_sect_);
uint32_t delta_frame_cnt_ GUARDED_BY(crit_sect_);
std::unique_ptr<VCMContentMetricsProcessing> content_ GUARDED_BY(crit_sect_);
std::unique_ptr<VCMQmResolution> qm_resolution_ GUARDED_BY(crit_sect_);
int64_t last_qm_update_time_ GUARDED_BY(crit_sect_);
int64_t last_change_time_ GUARDED_BY(crit_sect_); // Content/user triggered.
int num_layers_ GUARDED_BY(crit_sect_);
bool suspension_enabled_ GUARDED_BY(crit_sect_);
bool video_suspended_ GUARDED_BY(crit_sect_);

View File

@ -66,7 +66,7 @@ TEST_F(TestMediaOptimization, VerifyMuting) {
media_opt_.SetTargetRates(target_bitrate_kbps * 1000,
0, // Lossrate.
100, // RTT in ms.
nullptr, nullptr);
nullptr);
media_opt_.EnableFrameDropper(true);
for (int time = 0; time < 2000; time += frame_time_ms_) {
ASSERT_NO_FATAL_FAILURE(AddFrameAndAdvanceTime(target_bitrate_kbps, false));
@ -76,7 +76,7 @@ TEST_F(TestMediaOptimization, VerifyMuting) {
media_opt_.SetTargetRates(kThresholdBps - 1000,
0, // Lossrate.
100, // RTT in ms.
nullptr, nullptr);
nullptr);
// Expect the muter to engage immediately and stay muted.
// Test during 2 seconds.
for (int time = 0; time < 2000; time += frame_time_ms_) {
@ -89,7 +89,7 @@ TEST_F(TestMediaOptimization, VerifyMuting) {
media_opt_.SetTargetRates(kThresholdBps + 1000,
0, // Lossrate.
100, // RTT in ms.
nullptr, nullptr);
nullptr);
// Expect the muter to stay muted.
// Test during 2 seconds.
for (int time = 0; time < 2000; time += frame_time_ms_) {
@ -101,7 +101,7 @@ TEST_F(TestMediaOptimization, VerifyMuting) {
media_opt_.SetTargetRates(kThresholdBps + kWindowBps + 1000,
0, // Lossrate.
100, // RTT in ms.
nullptr, nullptr);
nullptr);
// Expect the muter to disengage immediately.
// Test during 2 seconds.
for (int time = 0; time < 2000; time += frame_time_ms_) {
@ -138,7 +138,7 @@ TEST_F(TestMediaOptimization, ProtectsUsingFecBitrateAboveCodecMax) {
// Using 10% of codec bitrate for FEC, should still be able to use all of it.
protection_callback.fec_rate_bps_ = kCodecBitrateBps / 10;
uint32_t target_bitrate = media_opt_.SetTargetRates(
kMaxBitrateBps, 0, 0, &protection_callback, nullptr);
kMaxBitrateBps, 0, 0, &protection_callback);
EXPECT_EQ(kCodecBitrateBps, static_cast<int>(target_bitrate));
@ -146,7 +146,7 @@ TEST_F(TestMediaOptimization, ProtectsUsingFecBitrateAboveCodecMax) {
// both equally, but only be half of max (since that ceiling should be hit).
protection_callback.fec_rate_bps_ = kCodecBitrateBps;
target_bitrate = media_opt_.SetTargetRates(kMaxBitrateBps, 128, 100,
&protection_callback, nullptr);
&protection_callback);
EXPECT_EQ(kMaxBitrateBps / 2, static_cast<int>(target_bitrate));
}

View File

@ -1,901 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/qm_select.h"
#include <math.h>
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/qm_select_data.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
// QM-METHOD class
VCMQmMethod::VCMQmMethod()
: content_metrics_(NULL),
width_(0),
height_(0),
user_frame_rate_(0.0f),
native_width_(0),
native_height_(0),
native_frame_rate_(0.0f),
image_type_(kVGA),
framerate_level_(kFrameRateHigh),
init_(false) {
ResetQM();
}
VCMQmMethod::~VCMQmMethod() {}
void VCMQmMethod::ResetQM() {
aspect_ratio_ = 1.0f;
motion_.Reset();
spatial_.Reset();
content_class_ = 0;
}
uint8_t VCMQmMethod::ComputeContentClass() {
ComputeMotionNFD();
ComputeSpatial();
return content_class_ = 3 * motion_.level + spatial_.level;
}
void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
content_metrics_ = contentMetrics;
}
void VCMQmMethod::ComputeMotionNFD() {
if (content_metrics_) {
motion_.value = content_metrics_->motion_magnitude;
}
// Determine motion level.
if (motion_.value < kLowMotionNfd) {
motion_.level = kLow;
} else if (motion_.value > kHighMotionNfd) {
motion_.level = kHigh;
} else {
motion_.level = kDefault;
}
}
void VCMQmMethod::ComputeSpatial() {
float spatial_err = 0.0;
float spatial_err_h = 0.0;
float spatial_err_v = 0.0;
if (content_metrics_) {
spatial_err = content_metrics_->spatial_pred_err;
spatial_err_h = content_metrics_->spatial_pred_err_h;
spatial_err_v = content_metrics_->spatial_pred_err_v;
}
// Spatial measure: take average of 3 prediction errors.
spatial_.value = (spatial_err + spatial_err_h + spatial_err_v) / 3.0f;
// Reduce thresholds for large scenes/higher pixel correlation.
float scale2 = image_type_ > kVGA ? kScaleTexture : 1.0;
if (spatial_.value > scale2 * kHighTexture) {
spatial_.level = kHigh;
} else if (spatial_.value < scale2 * kLowTexture) {
spatial_.level = kLow;
} else {
spatial_.level = kDefault;
}
}
ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
// Get the image type for the encoder frame size.
uint32_t image_size = width * height;
if (image_size == kSizeOfImageType[kQCIF]) {
return kQCIF;
} else if (image_size == kSizeOfImageType[kHCIF]) {
return kHCIF;
} else if (image_size == kSizeOfImageType[kQVGA]) {
return kQVGA;
} else if (image_size == kSizeOfImageType[kCIF]) {
return kCIF;
} else if (image_size == kSizeOfImageType[kHVGA]) {
return kHVGA;
} else if (image_size == kSizeOfImageType[kVGA]) {
return kVGA;
} else if (image_size == kSizeOfImageType[kQFULLHD]) {
return kQFULLHD;
} else if (image_size == kSizeOfImageType[kWHD]) {
return kWHD;
} else if (image_size == kSizeOfImageType[kFULLHD]) {
return kFULLHD;
} else {
// No exact match, find closet one.
return FindClosestImageType(width, height);
}
}
ImageType VCMQmMethod::FindClosestImageType(uint16_t width, uint16_t height) {
float size = static_cast<float>(width * height);
float min = size;
int isel = 0;
for (int i = 0; i < kNumImageTypes; ++i) {
float dist = fabs(size - kSizeOfImageType[i]);
if (dist < min) {
min = dist;
isel = i;
}
}
return static_cast<ImageType>(isel);
}
FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
if (avg_framerate <= kLowFrameRate) {
return kFrameRateLow;
} else if (avg_framerate <= kMiddleFrameRate) {
return kFrameRateMiddle1;
} else if (avg_framerate <= kHighFrameRate) {
return kFrameRateMiddle2;
} else {
return kFrameRateHigh;
}
}
// RESOLUTION CLASS
VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
Reset();
}
VCMQmResolution::~VCMQmResolution() {
delete qm_;
}
void VCMQmResolution::ResetRates() {
sum_target_rate_ = 0.0f;
sum_incoming_framerate_ = 0.0f;
sum_rate_MM_ = 0.0f;
sum_rate_MM_sgn_ = 0.0f;
sum_packet_loss_ = 0.0f;
buffer_level_ = kInitBufferLevel * target_bitrate_;
frame_cnt_ = 0;
frame_cnt_delta_ = 0;
low_buffer_cnt_ = 0;
update_rate_cnt_ = 0;
}
void VCMQmResolution::ResetDownSamplingState() {
state_dec_factor_spatial_ = 1.0;
state_dec_factor_temporal_ = 1.0;
for (int i = 0; i < kDownActionHistorySize; i++) {
down_action_history_[i].spatial = kNoChangeSpatial;
down_action_history_[i].temporal = kNoChangeTemporal;
}
}
void VCMQmResolution::Reset() {
target_bitrate_ = 0.0f;
incoming_framerate_ = 0.0f;
buffer_level_ = 0.0f;
per_frame_bandwidth_ = 0.0f;
avg_target_rate_ = 0.0f;
avg_incoming_framerate_ = 0.0f;
avg_ratio_buffer_low_ = 0.0f;
avg_rate_mismatch_ = 0.0f;
avg_rate_mismatch_sgn_ = 0.0f;
avg_packet_loss_ = 0.0f;
encoder_state_ = kStableEncoding;
num_layers_ = 1;
ResetRates();
ResetDownSamplingState();
ResetQM();
}
EncoderState VCMQmResolution::GetEncoderState() {
return encoder_state_;
}
// Initialize state after re-initializing the encoder,
// i.e., after SetEncodingData() in mediaOpt.
int VCMQmResolution::Initialize(float bitrate,
float user_framerate,
uint16_t width,
uint16_t height,
int num_layers) {
if (user_framerate == 0.0f || width == 0 || height == 0) {
return VCM_PARAMETER_ERROR;
}
Reset();
target_bitrate_ = bitrate;
incoming_framerate_ = user_framerate;
UpdateCodecParameters(user_framerate, width, height);
native_width_ = width;
native_height_ = height;
native_frame_rate_ = user_framerate;
num_layers_ = num_layers;
// Initial buffer level.
buffer_level_ = kInitBufferLevel * target_bitrate_;
// Per-frame bandwidth.
per_frame_bandwidth_ = target_bitrate_ / user_framerate;
init_ = true;
return VCM_OK;
}
void VCMQmResolution::UpdateCodecParameters(float frame_rate,
uint16_t width,
uint16_t height) {
width_ = width;
height_ = height;
// |user_frame_rate| is the target frame rate for VPM frame dropper.
user_frame_rate_ = frame_rate;
image_type_ = GetImageType(width, height);
}
// Update rate data after every encoded frame.
void VCMQmResolution::UpdateEncodedSize(size_t encoded_size) {
frame_cnt_++;
// Convert to Kbps.
float encoded_size_kbits = 8.0f * static_cast<float>(encoded_size) / 1000.0f;
// Update the buffer level:
// Note this is not the actual encoder buffer level.
// |buffer_level_| is reset to an initial value after SelectResolution is
// called, and does not account for frame dropping by encoder or VCM.
buffer_level_ += per_frame_bandwidth_ - encoded_size_kbits;
// Counter for occurrences of low buffer level:
// low/negative values means encoder is likely dropping frames.
if (buffer_level_ <= kPercBufferThr * kInitBufferLevel * target_bitrate_) {
low_buffer_cnt_++;
}
}
// Update various quantities after SetTargetRates in MediaOpt.
void VCMQmResolution::UpdateRates(float target_bitrate,
float encoder_sent_rate,
float incoming_framerate,
uint8_t packet_loss) {
// Sum the target bitrate: this is the encoder rate from previous update
// (~1sec), i.e, before the update for next ~1sec.
sum_target_rate_ += target_bitrate_;
update_rate_cnt_++;
// Sum the received (from RTCP reports) packet loss rates.
sum_packet_loss_ += static_cast<float>(packet_loss / 255.0);
// Sum the sequence rate mismatch:
// Mismatch here is based on the difference between the target rate
// used (in previous ~1sec) and the average actual encoding rate measured
// at previous ~1sec.
float diff = target_bitrate_ - encoder_sent_rate;
if (target_bitrate_ > 0.0)
sum_rate_MM_ += fabs(diff) / target_bitrate_;
int sgnDiff = diff > 0 ? 1 : (diff < 0 ? -1 : 0);
// To check for consistent under(+)/over_shooting(-) of target rate.
sum_rate_MM_sgn_ += sgnDiff;
// Update with the current new target and frame rate:
// these values are ones the encoder will use for the current/next ~1sec.
target_bitrate_ = target_bitrate;
incoming_framerate_ = incoming_framerate;
sum_incoming_framerate_ += incoming_framerate_;
// Update the per_frame_bandwidth:
// this is the per_frame_bw for the current/next ~1sec.
per_frame_bandwidth_ = 0.0f;
if (incoming_framerate_ > 0.0f) {
per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
}
}
// Select the resolution factors: frame size and frame rate change (qm scales).
// Selection is for going down in resolution, or for going back up
// (if a previous down-sampling action was taken).
// In the current version the following constraints are imposed:
// 1) We only allow for one action, either down or up, at a given time.
// 2) The possible down-sampling actions are: spatial by 1/2x1/2, 3/4x3/4;
// temporal/frame rate reduction by 1/2 and 2/3.
// 3) The action for going back up is the reverse of last (spatial or temporal)
// down-sampling action. The list of down-sampling actions from the
// Initialize() state are kept in |down_action_history_|.
// 4) The total amount of down-sampling (spatial and/or temporal) from the
// Initialize() state (native resolution) is limited by various factors.
int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
if (!init_) {
return VCM_UNINITIALIZED;
}
if (content_metrics_ == NULL) {
Reset();
*qm = qm_;
return VCM_OK;
}
// Check conditions on down-sampling state.
assert(state_dec_factor_spatial_ >= 1.0f);
assert(state_dec_factor_temporal_ >= 1.0f);
assert(state_dec_factor_spatial_ <= kMaxSpatialDown);
assert(state_dec_factor_temporal_ <= kMaxTempDown);
assert(state_dec_factor_temporal_ * state_dec_factor_spatial_ <=
kMaxTotalDown);
// Compute content class for selection.
content_class_ = ComputeContentClass();
// Compute various rate quantities for selection.
ComputeRatesForSelection();
// Get the encoder state.
ComputeEncoderState();
// Default settings: no action.
SetDefaultAction();
*qm = qm_;
// Check for going back up in resolution, if we have had some down-sampling
// relative to native state in Initialize().
if (down_action_history_[0].spatial != kNoChangeSpatial ||
down_action_history_[0].temporal != kNoChangeTemporal) {
if (GoingUpResolution()) {
*qm = qm_;
return VCM_OK;
}
}
// Check for going down in resolution.
if (GoingDownResolution()) {
*qm = qm_;
return VCM_OK;
}
return VCM_OK;
}
void VCMQmResolution::SetDefaultAction() {
qm_->codec_width = width_;
qm_->codec_height = height_;
qm_->frame_rate = user_frame_rate_;
qm_->change_resolution_spatial = false;
qm_->change_resolution_temporal = false;
qm_->spatial_width_fact = 1.0f;
qm_->spatial_height_fact = 1.0f;
qm_->temporal_fact = 1.0f;
action_.spatial = kNoChangeSpatial;
action_.temporal = kNoChangeTemporal;
}
void VCMQmResolution::ComputeRatesForSelection() {
avg_target_rate_ = 0.0f;
avg_incoming_framerate_ = 0.0f;
avg_ratio_buffer_low_ = 0.0f;
avg_rate_mismatch_ = 0.0f;
avg_rate_mismatch_sgn_ = 0.0f;
avg_packet_loss_ = 0.0f;
if (frame_cnt_ > 0) {
avg_ratio_buffer_low_ =
static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
}
if (update_rate_cnt_ > 0) {
avg_rate_mismatch_ =
static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
static_cast<float>(update_rate_cnt_);
avg_target_rate_ = static_cast<float>(sum_target_rate_) /
static_cast<float>(update_rate_cnt_);
avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
static_cast<float>(update_rate_cnt_);
avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
static_cast<float>(update_rate_cnt_);
}
// For selection we may want to weight some quantities more heavily
// with the current (i.e., next ~1sec) rate values.
avg_target_rate_ =
kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
(1.0 - kWeightRate) * incoming_framerate_;
// Use base layer frame rate for temporal layers: this will favor spatial.
assert(num_layers_ > 0);
framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
static_cast<float>(1 << (num_layers_ - 1)));
}
void VCMQmResolution::ComputeEncoderState() {
// Default.
encoder_state_ = kStableEncoding;
// Assign stressed state if:
// 1) occurrences of low buffer levels is high, or
// 2) rate mis-match is high, and consistent over-shooting by encoder.
if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
((avg_rate_mismatch_ > kMaxRateMisMatch) &&
(avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
encoder_state_ = kStressedEncoding;
}
// Assign easy state if:
// 1) rate mis-match is high, and
// 2) consistent under-shooting by encoder.
if ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
(avg_rate_mismatch_sgn_ > kRateUnderShoot)) {
encoder_state_ = kEasyEncoding;
}
}
bool VCMQmResolution::GoingUpResolution() {
// For going up, we check for undoing the previous down-sampling action.
float fac_width = kFactorWidthSpatial[down_action_history_[0].spatial];
float fac_height = kFactorHeightSpatial[down_action_history_[0].spatial];
float fac_temp = kFactorTemporal[down_action_history_[0].temporal];
// For going up spatially, we allow for going up by 3/4x3/4 at each stage.
// So if the last spatial action was 1/2x1/2 it would be undone in 2 stages.
// Modify the fac_width/height for this case.
if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
kFactorWidthSpatial[kOneHalfSpatialUniform];
fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
kFactorHeightSpatial[kOneHalfSpatialUniform];
}
// Check if we should go up both spatially and temporally.
if (down_action_history_[0].spatial != kNoChangeSpatial &&
down_action_history_[0].temporal != kNoChangeTemporal) {
if (ConditionForGoingUp(fac_width, fac_height, fac_temp,
kTransRateScaleUpSpatialTemp)) {
action_.spatial = down_action_history_[0].spatial;
action_.temporal = down_action_history_[0].temporal;
UpdateDownsamplingState(kUpResolution);
return true;
}
}
// Check if we should go up either spatially or temporally.
bool selected_up_spatial = false;
bool selected_up_temporal = false;
if (down_action_history_[0].spatial != kNoChangeSpatial) {
selected_up_spatial = ConditionForGoingUp(fac_width, fac_height, 1.0f,
kTransRateScaleUpSpatial);
}
if (down_action_history_[0].temporal != kNoChangeTemporal) {
selected_up_temporal =
ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
}
if (selected_up_spatial && !selected_up_temporal) {
action_.spatial = down_action_history_[0].spatial;
action_.temporal = kNoChangeTemporal;
UpdateDownsamplingState(kUpResolution);
return true;
} else if (!selected_up_spatial && selected_up_temporal) {
action_.spatial = kNoChangeSpatial;
action_.temporal = down_action_history_[0].temporal;
UpdateDownsamplingState(kUpResolution);
return true;
} else if (selected_up_spatial && selected_up_temporal) {
PickSpatialOrTemporal();
UpdateDownsamplingState(kUpResolution);
return true;
}
return false;
}
bool VCMQmResolution::ConditionForGoingUp(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
float estimated_transition_rate_up =
GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
// Go back up if:
// 1) target rate is above threshold and current encoder state is stable, or
// 2) encoder state is easy (encoder is significantly under-shooting target).
if (((avg_target_rate_ > estimated_transition_rate_up) &&
(encoder_state_ == kStableEncoding)) ||
(encoder_state_ == kEasyEncoding)) {
return true;
} else {
return false;
}
}
bool VCMQmResolution::GoingDownResolution() {
float estimated_transition_rate_down =
GetTransitionRate(1.0f, 1.0f, 1.0f, 1.0f);
float max_rate = kFrameRateFac[framerate_level_] * kMaxRateQm[image_type_];
// Resolution reduction if:
// (1) target rate is below transition rate, or
// (2) encoder is in stressed state and target rate below a max threshold.
if ((avg_target_rate_ < estimated_transition_rate_down) ||
(encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
// Get the down-sampling action: based on content class, and how low
// average target rate is relative to transition rate.
uint8_t spatial_fact =
kSpatialAction[content_class_ +
9 * RateClass(estimated_transition_rate_down)];
uint8_t temp_fact =
kTemporalAction[content_class_ +
9 * RateClass(estimated_transition_rate_down)];
switch (spatial_fact) {
case 4: {
action_.spatial = kOneQuarterSpatialUniform;
break;
}
case 2: {
action_.spatial = kOneHalfSpatialUniform;
break;
}
case 1: {
action_.spatial = kNoChangeSpatial;
break;
}
default: { assert(false); }
}
switch (temp_fact) {
case 3: {
action_.temporal = kTwoThirdsTemporal;
break;
}
case 2: {
action_.temporal = kOneHalfTemporal;
break;
}
case 1: {
action_.temporal = kNoChangeTemporal;
break;
}
default: { assert(false); }
}
// Only allow for one action (spatial or temporal) at a given time.
assert(action_.temporal == kNoChangeTemporal ||
action_.spatial == kNoChangeSpatial);
// Adjust cases not captured in tables, mainly based on frame rate, and
// also check for odd frame sizes.
AdjustAction();
// Update down-sampling state.
if (action_.spatial != kNoChangeSpatial ||
action_.temporal != kNoChangeTemporal) {
UpdateDownsamplingState(kDownResolution);
return true;
}
}
return false;
}
float VCMQmResolution::GetTransitionRate(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
ImageType image_type =
GetImageType(static_cast<uint16_t>(fac_width * width_),
static_cast<uint16_t>(fac_height * height_));
FrameRateLevelClass framerate_level =
FrameRateLevel(fac_temp * avg_incoming_framerate_);
// If we are checking for going up temporally, and this is the last
// temporal action, then use native frame rate.
if (down_action_history_[1].temporal == kNoChangeTemporal &&
fac_temp > 1.0f) {
framerate_level = FrameRateLevel(native_frame_rate_);
}
// The maximum allowed rate below which down-sampling is allowed:
// Nominal values based on image format (frame size and frame rate).
float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
uint8_t image_class = image_type > kVGA ? 1 : 0;
uint8_t table_index = image_class * 9 + content_class_;
// Scale factor for down-sampling transition threshold:
// factor based on the content class and the image size.
float scaleTransRate = kScaleTransRateQm[table_index];
// Threshold bitrate for resolution action.
return static_cast<float>(scale_fac * scaleTransRate * max_rate);
}
void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
if (up_down == kUpResolution) {
qm_->spatial_width_fact = 1.0f / kFactorWidthSpatial[action_.spatial];
qm_->spatial_height_fact = 1.0f / kFactorHeightSpatial[action_.spatial];
// If last spatial action was 1/2x1/2, we undo it in two steps, so the
// spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
if (action_.spatial == kOneQuarterSpatialUniform) {
qm_->spatial_width_fact = 1.0f *
kFactorWidthSpatial[kOneHalfSpatialUniform] /
kFactorWidthSpatial[kOneQuarterSpatialUniform];
qm_->spatial_height_fact =
1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
kFactorHeightSpatial[kOneQuarterSpatialUniform];
}
qm_->temporal_fact = 1.0f / kFactorTemporal[action_.temporal];
RemoveLastDownAction();
} else if (up_down == kDownResolution) {
ConstrainAmountOfDownSampling();
ConvertSpatialFractionalToWhole();
qm_->spatial_width_fact = kFactorWidthSpatial[action_.spatial];
qm_->spatial_height_fact = kFactorHeightSpatial[action_.spatial];
qm_->temporal_fact = kFactorTemporal[action_.temporal];
InsertLatestDownAction();
} else {
// This function should only be called if either the Up or Down action
// has been selected.
assert(false);
}
UpdateCodecResolution();
state_dec_factor_spatial_ = state_dec_factor_spatial_ *
qm_->spatial_width_fact *
qm_->spatial_height_fact;
state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
}
void VCMQmResolution::UpdateCodecResolution() {
if (action_.spatial != kNoChangeSpatial) {
qm_->change_resolution_spatial = true;
qm_->codec_width =
static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
qm_->codec_height =
static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
// Size should not exceed native sizes.
assert(qm_->codec_width <= native_width_);
assert(qm_->codec_height <= native_height_);
// New sizes should be multiple of 2, otherwise spatial should not have
// been selected.
assert(qm_->codec_width % 2 == 0);
assert(qm_->codec_height % 2 == 0);
}
if (action_.temporal != kNoChangeTemporal) {
qm_->change_resolution_temporal = true;
// Update the frame rate based on the average incoming frame rate.
qm_->frame_rate = avg_incoming_framerate_ / qm_->temporal_fact + 0.5f;
if (down_action_history_[0].temporal == 0) {
// When we undo the last temporal-down action, make sure we go back up
// to the native frame rate. Since the incoming frame rate may
// fluctuate over time, |avg_incoming_framerate_| scaled back up may
// be smaller than |native_frame rate_|.
qm_->frame_rate = native_frame_rate_;
}
}
}
uint8_t VCMQmResolution::RateClass(float transition_rate) {
return avg_target_rate_ < (kFacLowRate * transition_rate)
? 0
: (avg_target_rate_ >= transition_rate ? 2 : 1);
}
// TODO(marpan): Would be better to capture these frame rate adjustments by
// extending the table data (qm_select_data.h).
void VCMQmResolution::AdjustAction() {
// If the spatial level is default state (neither low or high), motion level
// is not high, and spatial action was selected, switch to 2/3 frame rate
// reduction if the average incoming frame rate is high.
if (spatial_.level == kDefault && motion_.level != kHigh &&
action_.spatial != kNoChangeSpatial &&
framerate_level_ == kFrameRateHigh) {
action_.spatial = kNoChangeSpatial;
action_.temporal = kTwoThirdsTemporal;
}
// If both motion and spatial level are low, and temporal down action was
// selected, switch to spatial 3/4x3/4 if the frame rate is not above the
// lower middle level (|kFrameRateMiddle1|).
if (motion_.level == kLow && spatial_.level == kLow &&
framerate_level_ <= kFrameRateMiddle1 &&
action_.temporal != kNoChangeTemporal) {
action_.spatial = kOneHalfSpatialUniform;
action_.temporal = kNoChangeTemporal;
}
// If spatial action is selected, and there has been too much spatial
// reduction already (i.e., 1/4), then switch to temporal action if the
// average frame rate is not low.
if (action_.spatial != kNoChangeSpatial &&
down_action_history_[0].spatial == kOneQuarterSpatialUniform &&
framerate_level_ != kFrameRateLow) {
action_.spatial = kNoChangeSpatial;
action_.temporal = kTwoThirdsTemporal;
}
// Never use temporal action if number of temporal layers is above 2.
if (num_layers_ > 2) {
if (action_.temporal != kNoChangeTemporal) {
action_.spatial = kOneHalfSpatialUniform;
}
action_.temporal = kNoChangeTemporal;
}
// If spatial action was selected, we need to make sure the frame sizes
// are multiples of two. Otherwise switch to 2/3 temporal.
if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
action_.spatial = kNoChangeSpatial;
// Only one action (spatial or temporal) is allowed at a given time, so need
// to check whether temporal action is currently selected.
action_.temporal = kTwoThirdsTemporal;
}
}
void VCMQmResolution::ConvertSpatialFractionalToWhole() {
// If 3/4 spatial is selected, check if there has been another 3/4,
// and if so, combine them into 1/2. 1/2 scaling is more efficient than 9/16.
// Note we define 3/4x3/4 spatial as kOneHalfSpatialUniform.
if (action_.spatial == kOneHalfSpatialUniform) {
bool found = false;
int isel = kDownActionHistorySize;
for (int i = 0; i < kDownActionHistorySize; ++i) {
if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
isel = i;
found = true;
break;
}
}
if (found) {
action_.spatial = kOneQuarterSpatialUniform;
state_dec_factor_spatial_ =
state_dec_factor_spatial_ /
(kFactorWidthSpatial[kOneHalfSpatialUniform] *
kFactorHeightSpatial[kOneHalfSpatialUniform]);
// Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
ConstrainAmountOfDownSampling();
if (action_.spatial == kNoChangeSpatial) {
// Not allowed. Go back to 3/4x3/4 spatial.
action_.spatial = kOneHalfSpatialUniform;
state_dec_factor_spatial_ =
state_dec_factor_spatial_ *
kFactorWidthSpatial[kOneHalfSpatialUniform] *
kFactorHeightSpatial[kOneHalfSpatialUniform];
} else {
// Switching is allowed. Remove 3/4x3/4 from the history, and update
// the frame size.
for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
}
width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
}
}
}
}
// Returns false if the new frame sizes, under the current spatial action,
// are not multiples of two.
bool VCMQmResolution::EvenFrameSize() {
if (action_.spatial == kOneHalfSpatialUniform) {
if ((width_ * 3 / 4) % 2 != 0 || (height_ * 3 / 4) % 2 != 0) {
return false;
}
} else if (action_.spatial == kOneQuarterSpatialUniform) {
if ((width_ * 1 / 2) % 2 != 0 || (height_ * 1 / 2) % 2 != 0) {
return false;
}
}
return true;
}
void VCMQmResolution::InsertLatestDownAction() {
if (action_.spatial != kNoChangeSpatial) {
for (int i = kDownActionHistorySize - 1; i > 0; --i) {
down_action_history_[i].spatial = down_action_history_[i - 1].spatial;
}
down_action_history_[0].spatial = action_.spatial;
}
if (action_.temporal != kNoChangeTemporal) {
for (int i = kDownActionHistorySize - 1; i > 0; --i) {
down_action_history_[i].temporal = down_action_history_[i - 1].temporal;
}
down_action_history_[0].temporal = action_.temporal;
}
}
void VCMQmResolution::RemoveLastDownAction() {
if (action_.spatial != kNoChangeSpatial) {
// If the last spatial action was 1/2x1/2 we replace it with 3/4x3/4.
if (action_.spatial == kOneQuarterSpatialUniform) {
down_action_history_[0].spatial = kOneHalfSpatialUniform;
} else {
for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
}
down_action_history_[kDownActionHistorySize - 1].spatial =
kNoChangeSpatial;
}
}
if (action_.temporal != kNoChangeTemporal) {
for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
down_action_history_[i].temporal = down_action_history_[i + 1].temporal;
}
down_action_history_[kDownActionHistorySize - 1].temporal =
kNoChangeTemporal;
}
}
void VCMQmResolution::ConstrainAmountOfDownSampling() {
// Sanity checks on down-sampling selection:
// override the settings for too small image size and/or frame rate.
// Also check the limit on current down-sampling states.
float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
float temporal_fact = kFactorTemporal[action_.temporal];
float new_dec_factor_spatial =
state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
// No spatial sampling if current frame size is too small, or if the
// amount of spatial down-sampling is above maximum spatial down-action.
if ((width_ * height_) <= kMinImageSize ||
new_dec_factor_spatial > kMaxSpatialDown) {
action_.spatial = kNoChangeSpatial;
new_dec_factor_spatial = state_dec_factor_spatial_;
}
// No frame rate reduction if average frame rate is below some point, or if
// the amount of temporal down-sampling is above maximum temporal down-action.
if (avg_incoming_framerate_ <= kMinFrameRate ||
new_dec_factor_temp > kMaxTempDown) {
action_.temporal = kNoChangeTemporal;
new_dec_factor_temp = state_dec_factor_temporal_;
}
// Check if the total (spatial-temporal) down-action is above maximum allowed,
// if so, disallow the current selected down-action.
if (new_dec_factor_spatial * new_dec_factor_temp > kMaxTotalDown) {
if (action_.spatial != kNoChangeSpatial) {
action_.spatial = kNoChangeSpatial;
} else if (action_.temporal != kNoChangeTemporal) {
action_.temporal = kNoChangeTemporal;
} else {
// We only allow for one action (spatial or temporal) at a given time, so
// either spatial or temporal action is selected when this function is
// called. If the selected action is disallowed from one of the above
// 2 prior conditions (on spatial & temporal max down-action), then this
// condition "total down-action > |kMaxTotalDown|" would not be entered.
assert(false);
}
}
}
void VCMQmResolution::PickSpatialOrTemporal() {
// Pick the one that has had the most down-sampling thus far.
if (state_dec_factor_spatial_ > state_dec_factor_temporal_) {
action_.spatial = down_action_history_[0].spatial;
action_.temporal = kNoChangeTemporal;
} else {
action_.spatial = kNoChangeSpatial;
action_.temporal = down_action_history_[0].temporal;
}
}
// TODO(marpan): Update when we allow for directional spatial down-sampling.
void VCMQmResolution::SelectSpatialDirectionMode(float transition_rate) {
// Default is 4/3x4/3
// For bit rates well below transitional rate, we select 2x2.
if (avg_target_rate_ < transition_rate * kRateRedSpatial2X2) {
qm_->spatial_width_fact = 2.0f;
qm_->spatial_height_fact = 2.0f;
}
// Otherwise check prediction errors and aspect ratio.
float spatial_err = 0.0f;
float spatial_err_h = 0.0f;
float spatial_err_v = 0.0f;
if (content_metrics_) {
spatial_err = content_metrics_->spatial_pred_err;
spatial_err_h = content_metrics_->spatial_pred_err_h;
spatial_err_v = content_metrics_->spatial_pred_err_v;
}
// Favor 1x2 if aspect_ratio is 16:9.
if (aspect_ratio_ >= 16.0f / 9.0f) {
// Check if 1x2 has lowest prediction error.
if (spatial_err_h < spatial_err && spatial_err_h < spatial_err_v) {
qm_->spatial_width_fact = 2.0f;
qm_->spatial_height_fact = 1.0f;
}
}
// Check for 4/3x4/3 selection: favor 2x2 over 1x2 and 2x1.
if (spatial_err < spatial_err_h * (1.0f + kSpatialErr2x2VsHoriz) &&
spatial_err < spatial_err_v * (1.0f + kSpatialErr2X2VsVert)) {
qm_->spatial_width_fact = 4.0f / 3.0f;
qm_->spatial_height_fact = 4.0f / 3.0f;
}
// Check for 2x1 selection.
if (spatial_err_v < spatial_err_h * (1.0f - kSpatialErrVertVsHoriz) &&
spatial_err_v < spatial_err * (1.0f - kSpatialErr2X2VsVert)) {
qm_->spatial_width_fact = 1.0f;
qm_->spatial_height_fact = 2.0f;
}
}
} // namespace webrtc

View File

@ -1,326 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
#include "webrtc/common_types.h"
#include "webrtc/typedefs.h"
/******************************************************/
/* Quality Modes: Resolution and Robustness settings */
/******************************************************/
namespace webrtc {
struct VideoContentMetrics;
struct VCMResolutionScale {
VCMResolutionScale()
: codec_width(640),
codec_height(480),
frame_rate(30.0f),
spatial_width_fact(1.0f),
spatial_height_fact(1.0f),
temporal_fact(1.0f),
change_resolution_spatial(false),
change_resolution_temporal(false) {}
uint16_t codec_width;
uint16_t codec_height;
float frame_rate;
float spatial_width_fact;
float spatial_height_fact;
float temporal_fact;
bool change_resolution_spatial;
bool change_resolution_temporal;
};
enum ImageType {
kQCIF = 0, // 176x144
kHCIF, // 264x216 = half(~3/4x3/4) CIF.
kQVGA, // 320x240 = quarter VGA.
kCIF, // 352x288
kHVGA, // 480x360 = half(~3/4x3/4) VGA.
kVGA, // 640x480
kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
kWHD, // 1280x720
kFULLHD, // 1920x1080
kNumImageTypes
};
const uint32_t kSizeOfImageType[kNumImageTypes] = {
25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
enum FrameRateLevelClass {
kFrameRateLow,
kFrameRateMiddle1,
kFrameRateMiddle2,
kFrameRateHigh
};
enum ContentLevelClass { kLow, kHigh, kDefault };
struct VCMContFeature {
VCMContFeature() : value(0.0f), level(kDefault) {}
void Reset() {
value = 0.0f;
level = kDefault;
}
float value;
ContentLevelClass level;
};
enum UpDownAction { kUpResolution, kDownResolution };
enum SpatialAction {
kNoChangeSpatial,
kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
kNumModesSpatial
};
enum TemporalAction {
kNoChangeTemporal,
kTwoThirdsTemporal, // 2/3 frame rate reduction
kOneHalfTemporal, // 1/2 frame rate reduction
kNumModesTemporal
};
struct ResolutionAction {
ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
SpatialAction spatial;
TemporalAction temporal;
};
// Down-sampling factors for spatial (width and height), and temporal.
const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
enum EncoderState {
kStableEncoding, // Low rate mis-match, stable buffer levels.
kStressedEncoding, // Significant over-shooting of target rate,
// Buffer under-flow, etc.
kEasyEncoding // Significant under-shooting of target rate.
};
// QmMethod class: main class for resolution and robustness settings
class VCMQmMethod {
public:
VCMQmMethod();
virtual ~VCMQmMethod();
// Reset values
void ResetQM();
virtual void Reset() = 0;
// Compute content class.
uint8_t ComputeContentClass();
// Update with the content metrics.
void UpdateContent(const VideoContentMetrics* content_metrics);
// Compute spatial texture magnitude and level.
// Spatial texture is a spatial prediction error measure.
void ComputeSpatial();
// Compute motion magnitude and level for NFD metric.
// NFD is normalized frame difference (normalized by spatial variance).
void ComputeMotionNFD();
// Get the imageType (CIF, VGA, HD, etc) for the system width/height.
ImageType GetImageType(uint16_t width, uint16_t height);
// Return the closest image type.
ImageType FindClosestImageType(uint16_t width, uint16_t height);
// Get the frame rate level.
FrameRateLevelClass FrameRateLevel(float frame_rate);
protected:
// Content Data.
const VideoContentMetrics* content_metrics_;
// Encoder frame sizes and native frame sizes.
uint16_t width_;
uint16_t height_;
float user_frame_rate_;
uint16_t native_width_;
uint16_t native_height_;
float native_frame_rate_;
float aspect_ratio_;
// Image type and frame rate leve, for the current encoder resolution.
ImageType image_type_;
FrameRateLevelClass framerate_level_;
// Content class data.
VCMContFeature motion_;
VCMContFeature spatial_;
uint8_t content_class_;
bool init_;
};
// Resolution settings class
class VCMQmResolution : public VCMQmMethod {
public:
VCMQmResolution();
virtual ~VCMQmResolution();
// Reset all quantities.
virtual void Reset();
// Reset rate quantities and counters after every SelectResolution() call.
void ResetRates();
// Reset down-sampling state.
void ResetDownSamplingState();
// Get the encoder state.
EncoderState GetEncoderState();
// Initialize after SetEncodingData in media_opt.
int Initialize(float bitrate,
float user_framerate,
uint16_t width,
uint16_t height,
int num_layers);
// Update the encoder frame size.
void UpdateCodecParameters(float frame_rate, uint16_t width, uint16_t height);
// Update with actual bit rate (size of the latest encoded frame)
// and frame type, after every encoded frame.
void UpdateEncodedSize(size_t encoded_size);
// Update with new target bitrate, actual encoder sent rate, frame_rate,
// loss rate: every ~1 sec from SetTargetRates in media_opt.
void UpdateRates(float target_bitrate,
float encoder_sent_rate,
float incoming_framerate,
uint8_t packet_loss);
// Extract ST (spatio-temporal) resolution action.
// Inputs: qm: Reference to the quality modes pointer.
// Output: the spatial and/or temporal scale change.
int SelectResolution(VCMResolutionScale** qm);
private:
// Set the default resolution action.
void SetDefaultAction();
// Compute rates for the selection of down-sampling action.
void ComputeRatesForSelection();
// Compute the encoder state.
void ComputeEncoderState();
// Return true if the action is to go back up in resolution.
bool GoingUpResolution();
// Return true if the action is to go down in resolution.
bool GoingDownResolution();
// Check the condition for going up in resolution by the scale factors:
// |facWidth|, |facHeight|, |facTemp|.
// |scaleFac| is a scale factor for the transition rate.
bool ConditionForGoingUp(float fac_width,
float fac_height,
float fac_temp,
float scale_fac);
// Get the bitrate threshold for the resolution action.
// The case |facWidth|=|facHeight|=|facTemp|==1 is for down-sampling action.
// |scaleFac| is a scale factor for the transition rate.
float GetTransitionRate(float fac_width,
float fac_height,
float fac_temp,
float scale_fac);
// Update the down-sampling state.
void UpdateDownsamplingState(UpDownAction up_down);
// Update the codec frame size and frame rate.
void UpdateCodecResolution();
// Return a state based on average target rate relative transition rate.
uint8_t RateClass(float transition_rate);
// Adjust the action selected from the table.
void AdjustAction();
// Covert 2 stages of 3/4 (=9/16) spatial decimation to 1/2.
void ConvertSpatialFractionalToWhole();
// Returns true if the new frame sizes, under the selected spatial action,
// are of even size.
bool EvenFrameSize();
// Insert latest down-sampling action into the history list.
void InsertLatestDownAction();
// Remove the last (first element) down-sampling action from the list.
void RemoveLastDownAction();
// Check constraints on the amount of down-sampling allowed.
void ConstrainAmountOfDownSampling();
// For going up in resolution: pick spatial or temporal action,
// if both actions were separately selected.
void PickSpatialOrTemporal();
// Select the directional (1x2 or 2x1) spatial down-sampling action.
void SelectSpatialDirectionMode(float transition_rate);
enum { kDownActionHistorySize = 10 };
VCMResolutionScale* qm_;
// Encoder rate control parameters.
float target_bitrate_;
float incoming_framerate_;
float per_frame_bandwidth_;
float buffer_level_;
// Data accumulated every ~1sec from MediaOpt.
float sum_target_rate_;
float sum_incoming_framerate_;
float sum_rate_MM_;
float sum_rate_MM_sgn_;
float sum_packet_loss_;
// Counters.
uint32_t frame_cnt_;
uint32_t frame_cnt_delta_;
uint32_t update_rate_cnt_;
uint32_t low_buffer_cnt_;
// Resolution state parameters.
float state_dec_factor_spatial_;
float state_dec_factor_temporal_;
// Quantities used for selection.
float avg_target_rate_;
float avg_incoming_framerate_;
float avg_ratio_buffer_low_;
float avg_rate_mismatch_;
float avg_rate_mismatch_sgn_;
float avg_packet_loss_;
EncoderState encoder_state_;
ResolutionAction action_;
// Short history of the down-sampling actions from the Initialize() state.
// This is needed for going up in resolution. Since the total amount of
// down-sampling actions are constrained, the length of the list need not be
// large: i.e., (4/3) ^{kDownActionHistorySize} <= kMaxDownSample.
ResolutionAction down_action_history_[kDownActionHistorySize];
int num_layers_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_

View File

@ -1,227 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
/***************************************************************
*QMSelectData.h
* This file includes parameters for content-aware media optimization
****************************************************************/
#include "webrtc/typedefs.h"
namespace webrtc {
//
// PARAMETERS FOR RESOLUTION ADAPTATION
//
// Initial level of buffer in secs.
const float kInitBufferLevel = 0.5f;
// Threshold of (max) buffer size below which we consider too low (underflow).
const float kPercBufferThr = 0.10f;
// Threshold on the occurrences of low buffer levels.
const float kMaxBufferLow = 0.30f;
// Threshold on rate mismatch.
const float kMaxRateMisMatch = 0.5f;
// Threshold on amount of under/over encoder shooting.
const float kRateOverShoot = 0.75f;
const float kRateUnderShoot = 0.75f;
// Factor to favor weighting the average rates with the current/last data.
const float kWeightRate = 0.70f;
// Factor for transitional rate for going back up in resolution.
const float kTransRateScaleUpSpatial = 1.25f;
const float kTransRateScaleUpTemp = 1.25f;
const float kTransRateScaleUpSpatialTemp = 1.25f;
// Threshold on packet loss rate, above which favor resolution reduction.
const float kPacketLossThr = 0.1f;
// Factor for reducing transitional bitrate under packet loss.
const float kPacketLossRateFac = 1.0f;
// Maximum possible transitional rate for down-sampling:
// (units in kbps), for 30fps.
const uint16_t kMaxRateQm[9] = {
0, // QCIF
50, // kHCIF
125, // kQVGA
200, // CIF
280, // HVGA
400, // VGA
700, // QFULLHD
1000, // WHD
1500 // FULLHD
};
// Frame rate scale for maximum transition rate.
const float kFrameRateFac[4] = {
0.5f, // Low
0.7f, // Middle level 1
0.85f, // Middle level 2
1.0f, // High
};
// Scale for transitional rate: based on content class
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
const float kScaleTransRateQm[18] = {
// VGA and lower
0.40f, // L, L
0.50f, // L, H
0.40f, // L, D
0.60f, // H ,L
0.60f, // H, H
0.60f, // H, D
0.50f, // D, L
0.50f, // D, D
0.50f, // D, H
// over VGA
0.40f, // L, L
0.50f, // L, H
0.40f, // L, D
0.60f, // H ,L
0.60f, // H, H
0.60f, // H, D
0.50f, // D, L
0.50f, // D, D
0.50f, // D, H
};
// Threshold on the target rate relative to transitional rate.
const float kFacLowRate = 0.5f;
// Action for down-sampling:
// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
// rate = 0/1/2, for target rate state relative to transition rate.
const uint8_t kSpatialAction[27] = {
// rateClass = 0:
1, // L, L
1, // L, H
1, // L, D
4, // H ,L
1, // H, H
4, // H, D
4, // D, L
1, // D, H
2, // D, D
// rateClass = 1:
1, // L, L
1, // L, H
1, // L, D
2, // H ,L
1, // H, H
2, // H, D
2, // D, L
1, // D, H
2, // D, D
// rateClass = 2:
1, // L, L
1, // L, H
1, // L, D
2, // H ,L
1, // H, H
2, // H, D
2, // D, L
1, // D, H
2, // D, D
};
const uint8_t kTemporalAction[27] = {
// rateClass = 0:
3, // L, L
2, // L, H
2, // L, D
1, // H ,L
3, // H, H
1, // H, D
1, // D, L
2, // D, H
1, // D, D
// rateClass = 1:
3, // L, L
3, // L, H
3, // L, D
1, // H ,L
3, // H, H
1, // H, D
1, // D, L
3, // D, H
1, // D, D
// rateClass = 2:
1, // L, L
3, // L, H
3, // L, D
1, // H ,L
3, // H, H
1, // H, D
1, // D, L
3, // D, H
1, // D, D
};
// Control the total amount of down-sampling allowed.
const float kMaxSpatialDown = 8.0f;
const float kMaxTempDown = 3.0f;
const float kMaxTotalDown = 9.0f;
// Minimum image size for a spatial down-sampling.
const int kMinImageSize = 176 * 144;
// Minimum frame rate for temporal down-sampling:
// no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE.
const int kMinFrameRate = 8;
//
// PARAMETERS FOR FEC ADJUSTMENT: TODO (marpan)
//
//
// PARAMETETS FOR SETTING LOW/HIGH STATES OF CONTENT METRICS:
//
// Thresholds for frame rate:
const int kLowFrameRate = 10;
const int kMiddleFrameRate = 15;
const int kHighFrameRate = 25;
// Thresholds for motion: motion level is from NFD.
const float kHighMotionNfd = 0.075f;
const float kLowMotionNfd = 0.03f;
// Thresholds for spatial prediction error:
// this is applied on the average of (2x2,1x2,2x1).
const float kHighTexture = 0.035f;
const float kLowTexture = 0.020f;
// Used to reduce thresholds for larger/HD scenes: correction factor since
// higher correlation in HD scenes means lower spatial prediction error.
const float kScaleTexture = 0.9f;
// Percentage reduction in transitional bitrate for 2x2 selected over 1x2/2x1.
const float kRateRedSpatial2X2 = 0.6f;
const float kSpatialErr2x2VsHoriz = 0.1f; // percentage to favor 2x2 over H
const float kSpatialErr2X2VsVert = 0.1f; // percentage to favor 2x2 over V
const float kSpatialErrVertVsHoriz = 0.1f; // percentage to favor H over V
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
# headers
'codec_database.h',
'codec_timer.h',
'content_metrics_processing.h',
'decoding_state.h',
'encoded_frame.h',
'fec_tables_xor.h',
@ -49,8 +48,6 @@
'packet.h',
'packet_buffer.h',
'percentile_filter.h',
'qm_select_data.h',
'qm_select.h',
'receiver.h',
'rtt_filter.h',
'session_info.h',
@ -61,7 +58,6 @@
# sources
'codec_database.cc',
'codec_timer.cc',
'content_metrics_processing.cc',
'decoding_state.cc',
'encoded_frame.cc',
'frame_buffer.cc',
@ -78,7 +74,6 @@
'packet.cc',
'packet_buffer.cc',
'percentile_filter.cc',
'qm_select.cc',
'receiver.cc',
'rtt_filter.cc',
'session_info.cc',

View File

@ -74,16 +74,11 @@ class VideoCodingModuleImpl : public VideoCodingModule {
VideoCodingModuleImpl(Clock* clock,
EventFactory* event_factory,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender,
EncodedImageCallback* pre_decode_image_callback)
: VideoCodingModule(),
sender_(clock,
&post_encode_callback_,
encoder_rate_observer,
qm_settings_callback,
nullptr),
sender_(clock, &post_encode_callback_, encoder_rate_observer, nullptr),
receiver_(clock,
event_factory,
pre_decode_image_callback,
@ -147,9 +142,8 @@ class VideoCodingModuleImpl : public VideoCodingModule {
}
int32_t AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) override {
return sender_.AddVideoFrame(videoFrame, contentMetrics, codecSpecificInfo);
return sender_.AddVideoFrame(videoFrame, codecSpecificInfo);
}
int32_t IntraFrameRequest(size_t stream_index) override {
@ -298,9 +292,9 @@ VideoCodingModule* VideoCodingModule::Create(
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender,
EncodedImageCallback* pre_decode_image_callback) {
return new VideoCodingModuleImpl(
clock, nullptr, encoder_rate_observer, qm_settings_callback, nack_sender,
keyframe_request_sender, pre_decode_image_callback);
return new VideoCodingModuleImpl(clock, nullptr, encoder_rate_observer,
nack_sender, keyframe_request_sender,
pre_decode_image_callback);
}
// Create method for current interface, will be removed when the
@ -320,9 +314,8 @@ VideoCodingModule* VideoCodingModule::Create(
KeyFrameRequestSender* keyframe_request_sender) {
assert(clock);
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, nullptr, nullptr,
nack_sender, keyframe_request_sender,
nullptr);
return new VideoCodingModuleImpl(clock, event_factory, nullptr, nack_sender,
keyframe_request_sender, nullptr);
}
} // namespace webrtc

View File

@ -59,7 +59,6 @@ class VideoSender : public Module {
VideoSender(Clock* clock,
EncodedImageCallback* post_encode_callback,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
VCMSendStatisticsCallback* send_stats_callback);
~VideoSender();
@ -85,7 +84,6 @@ class VideoSender : public Module {
void SetVideoProtection(VCMVideoProtection videoProtection);
int32_t AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* _contentMetrics,
const CodecSpecificInfo* codecSpecificInfo);
int32_t IntraFrameRequest(size_t stream_index);
@ -116,7 +114,6 @@ class VideoSender : public Module {
VideoCodec current_codec_;
rtc::ThreadChecker main_thread_;
VCMQMSettingsCallback* const qm_settings_callback_;
VCMProtectionCallback* protection_callback_;
rtc::CriticalSection params_crit_;

View File

@ -27,7 +27,6 @@ namespace vcm {
VideoSender::VideoSender(Clock* clock,
EncodedImageCallback* post_encode_callback,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
VCMSendStatisticsCallback* send_stats_callback)
: clock_(clock),
_encoder(nullptr),
@ -38,16 +37,14 @@ VideoSender::VideoSender(Clock* clock,
frame_dropper_enabled_(true),
_sendStatsTimer(1000, clock_),
current_codec_(),
qm_settings_callback_(qm_settings_callback),
protection_callback_(nullptr),
encoder_params_({0, 0, 0, 0}),
encoder_has_internal_source_(false),
next_frame_types_(1, kVideoFrameDelta) {
_mediaOpt.Reset();
// Allow VideoSender to be created on one thread but used on another, post
// construction. This is currently how this class is being used by at least
// one external project (diffractor).
_mediaOpt.EnableQM(qm_settings_callback_ != nullptr);
_mediaOpt.Reset();
main_thread_.DetachFromThread();
}
@ -203,9 +200,8 @@ int VideoSender::FrameRate(unsigned int* framerate) const {
int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
uint8_t lossRate,
int64_t rtt) {
uint32_t target_rate =
_mediaOpt.SetTargetRates(target_bitrate, lossRate, rtt,
protection_callback_, qm_settings_callback_);
uint32_t target_rate = _mediaOpt.SetTargetRates(target_bitrate, lossRate, rtt,
protection_callback_);
uint32_t input_frame_rate = _mediaOpt.InputFrameRate();
@ -274,7 +270,6 @@ void VideoSender::SetVideoProtection(VCMVideoProtection videoProtection) {
}
// Add one raw video frame to the encoder, blocking.
int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) {
EncoderParameters encoder_params;
std::vector<FrameType> next_frame_types;
@ -296,7 +291,6 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
_encoder->OnDroppedFrame();
return VCM_OK;
}
_mediaOpt.UpdateContentData(contentMetrics);
// TODO(pbos): Make sure setting send codec is synchronized with video
// processing so frame size always matches.
if (!_codecDataBase.MatchesCurrentResolution(videoFrame.width(),

View File

@ -180,13 +180,13 @@ class TestVideoSender : public ::testing::Test {
TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
void SetUp() override {
sender_.reset(new VideoSender(&clock_, &encoded_frame_callback_, nullptr,
nullptr, nullptr));
sender_.reset(
new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
}
void AddFrame() {
assert(generator_.get());
sender_->AddVideoFrame(*generator_->NextFrame(), NULL, NULL);
sender_->AddVideoFrame(*generator_->NextFrame(), NULL);
}
SimulatedClock clock_;