Delete unused methods of the VideoProcessing class. And fix a typo.

Rename EnableDenosing --> EnableDenoising.
Delete VideoProcessing FrameStats methods.
Delete VideoProcessingImpl::BrightnessDetection and related files.
Delete VideoProcessingImpl::Deflickering and related files.
Delete VideoProcessing::Brighten.

BUG=

Review URL: https://codereview.webrtc.org/1901393003

Cr-Commit-Position: refs/heads/master@{#12521}
This commit is contained in:
nisse
2016-04-27 00:59:22 -07:00
committed by Commit bot
parent 8833f850cf
commit 90c335a100
15 changed files with 8 additions and 1124 deletions

View File

@ -385,9 +385,7 @@
'video_coding/utility/frame_dropper_unittest.cc', 'video_coding/utility/frame_dropper_unittest.cc',
'video_coding/utility/ivf_file_writer_unittest.cc', 'video_coding/utility/ivf_file_writer_unittest.cc',
'video_coding/utility/quality_scaler_unittest.cc', 'video_coding/utility/quality_scaler_unittest.cc',
'video_processing/test/brightness_detection_test.cc',
'video_processing/test/content_metrics_test.cc', 'video_processing/test/content_metrics_test.cc',
'video_processing/test/deflickering_test.cc',
'video_processing/test/denoiser_test.cc', 'video_processing/test/denoiser_test.cc',
'video_processing/test/video_processing_unittest.cc', 'video_processing/test/video_processing_unittest.cc',
'video_processing/test/video_processing_unittest.h', 'video_processing/test/video_processing_unittest.h',

View File

@ -13,12 +13,8 @@ build_video_processing_sse2 = current_cpu == "x86" || current_cpu == "x64"
source_set("video_processing") { source_set("video_processing") {
sources = [ sources = [
"brightness_detection.cc",
"brightness_detection.h",
"content_analysis.cc", "content_analysis.cc",
"content_analysis.h", "content_analysis.h",
"deflickering.cc",
"deflickering.h",
"frame_preprocessor.cc", "frame_preprocessor.cc",
"frame_preprocessor.h", "frame_preprocessor.h",
"include/video_processing.h", "include/video_processing.h",

View File

@ -1,136 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_processing/brightness_detection.h"
#include <math.h>
#include "webrtc/modules/video_processing/include/video_processing.h"
namespace webrtc {
VPMBrightnessDetection::VPMBrightnessDetection() {
Reset();
}
VPMBrightnessDetection::~VPMBrightnessDetection() {}
void VPMBrightnessDetection::Reset() {
frame_cnt_bright_ = 0;
frame_cnt_dark_ = 0;
}
int32_t VPMBrightnessDetection::ProcessFrame(
const VideoFrame& frame,
const VideoProcessing::FrameStats& stats) {
if (frame.IsZeroSize()) {
return VPM_PARAMETER_ERROR;
}
int width = frame.width();
int height = frame.height();
if (!VideoProcessing::ValidFrameStats(stats)) {
return VPM_PARAMETER_ERROR;
}
const uint8_t frame_cnt_alarm = 2;
// Get proportion in lowest bins.
uint8_t low_th = 20;
float prop_low = 0;
for (uint32_t i = 0; i < low_th; i++) {
prop_low += stats.hist[i];
}
prop_low /= stats.num_pixels;
// Get proportion in highest bins.
unsigned char high_th = 230;
float prop_high = 0;
for (uint32_t i = high_th; i < 256; i++) {
prop_high += stats.hist[i];
}
prop_high /= stats.num_pixels;
if (prop_high < 0.4) {
if (stats.mean < 90 || stats.mean > 170) {
// Standard deviation of Y
const uint8_t* buffer = frame.buffer(kYPlane);
float std_y = 0;
for (int h = 0; h < height; h += (1 << stats.sub_sampling_factor)) {
int row = h * width;
for (int w = 0; w < width; w += (1 << stats.sub_sampling_factor)) {
std_y +=
(buffer[w + row] - stats.mean) * (buffer[w + row] - stats.mean);
}
}
std_y = sqrt(std_y / stats.num_pixels);
// Get percentiles.
uint32_t sum = 0;
uint32_t median_y = 140;
uint32_t perc05 = 0;
uint32_t perc95 = 255;
float pos_perc05 = stats.num_pixels * 0.05f;
float pos_median = stats.num_pixels * 0.5f;
float posPerc95 = stats.num_pixels * 0.95f;
for (uint32_t i = 0; i < 256; i++) {
sum += stats.hist[i];
if (sum < pos_perc05)
perc05 = i; // 5th perc.
if (sum < pos_median)
median_y = i; // 50th perc.
if (sum < posPerc95)
perc95 = i; // 95th perc.
else
break;
}
// Check if image is too dark
if ((std_y < 55) && (perc05 < 50)) {
if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
prop_low > 0.20) {
frame_cnt_dark_++;
} else {
frame_cnt_dark_ = 0;
}
} else {
frame_cnt_dark_ = 0;
}
// Check if image is too bright
if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
prop_high > 0.25) {
frame_cnt_bright_++;
} else {
frame_cnt_bright_ = 0;
}
} else {
frame_cnt_bright_ = 0;
}
} else {
frame_cnt_dark_ = 0;
frame_cnt_bright_ = 0;
}
} else {
frame_cnt_bright_++;
frame_cnt_dark_ = 0;
}
if (frame_cnt_dark_ > frame_cnt_alarm) {
return VideoProcessing::kDarkWarning;
} else if (frame_cnt_bright_ > frame_cnt_alarm) {
return VideoProcessing::kBrightWarning;
} else {
return VideoProcessing::kNoWarning;
}
}
} // namespace webrtc

View File

@ -1,35 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
#define WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class VPMBrightnessDetection {
public:
VPMBrightnessDetection();
~VPMBrightnessDetection();
void Reset();
int32_t ProcessFrame(const VideoFrame& frame,
const VideoProcessing::FrameStats& stats);
private:
uint32_t frame_cnt_bright_;
uint32_t frame_cnt_dark_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_

View File

@ -1,402 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_processing/deflickering.h"
#include <math.h>
#include <stdlib.h>
#include "webrtc/base/logging.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/system_wrappers/include/sort.h"
namespace webrtc {
// Detection constants
// (Q4) Maximum allowed deviation for detection.
enum { kFrequencyDeviation = 39 };
// (Q4) Minimum frequency that can be detected.
enum { kMinFrequencyToDetect = 32 };
// Number of flickers before we accept detection
enum { kNumFlickerBeforeDetect = 2 };
enum { kmean_valueScaling = 4 }; // (Q4) In power of 2
// Dead-zone region in terms of pixel values
enum { kZeroCrossingDeadzone = 10 };
// Deflickering constants.
// Compute the quantiles over 1 / DownsamplingFactor of the image.
enum { kDownsamplingFactor = 8 };
enum { kLog2OfDownsamplingFactor = 3 };
// To generate in Matlab:
// >> probUW16 = round(2^11 *
// [0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.97]);
// >> fprintf('%d, ', probUW16)
// Resolution reduced to avoid overflow when multiplying with the
// (potentially) large number of pixels.
const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {
102, 205, 410, 614, 819, 1024,
1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
// To generate in Matlab:
// >> numQuants = 14; maxOnlyLength = 5;
// >> weightUW16 = round(2^15 *
// [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
// >> fprintf('%d, %d,\n ', weightUW16);
const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] = {
16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
VPMDeflickering::VPMDeflickering() {
Reset();
}
VPMDeflickering::~VPMDeflickering() {}
void VPMDeflickering::Reset() {
mean_buffer_length_ = 0;
detection_state_ = 0;
frame_rate_ = 0;
memset(mean_buffer_, 0, sizeof(int32_t) * kMeanBufferLength);
memset(timestamp_buffer_, 0, sizeof(int32_t) * kMeanBufferLength);
// Initialize the history with a uniformly distributed histogram.
quant_hist_uw8_[0][0] = 0;
quant_hist_uw8_[0][kNumQuants - 1] = 255;
for (int32_t i = 0; i < kNumProbs; i++) {
// Unsigned round. <Q0>
quant_hist_uw8_[0][i + 1] =
static_cast<uint8_t>((prob_uw16_[i] * 255 + (1 << 10)) >> 11);
}
for (int32_t i = 1; i < kFrameHistory_size; i++) {
memcpy(quant_hist_uw8_[i], quant_hist_uw8_[0],
sizeof(uint8_t) * kNumQuants);
}
}
int32_t VPMDeflickering::ProcessFrame(VideoFrame* frame,
VideoProcessing::FrameStats* stats) {
assert(frame);
uint32_t frame_memory;
uint8_t quant_uw8[kNumQuants];
uint8_t maxquant_uw8[kNumQuants];
uint8_t minquant_uw8[kNumQuants];
uint16_t target_quant_uw16[kNumQuants];
uint16_t increment_uw16;
uint8_t map_uw8[256];
uint16_t tmp_uw16;
uint32_t tmp_uw32;
int width = frame->width();
int height = frame->height();
if (frame->IsZeroSize()) {
return VPM_GENERAL_ERROR;
}
// Stricter height check due to subsampling size calculation below.
if (height < 2) {
LOG(LS_ERROR) << "Invalid frame size.";
return VPM_GENERAL_ERROR;
}
if (!VideoProcessing::ValidFrameStats(*stats)) {
return VPM_GENERAL_ERROR;
}
if (PreDetection(frame->timestamp(), *stats) == -1)
return VPM_GENERAL_ERROR;
// Flicker detection
int32_t det_flicker = DetectFlicker();
if (det_flicker < 0) {
return VPM_GENERAL_ERROR;
} else if (det_flicker != 1) {
return 0;
}
// Size of luminance component.
const uint32_t y_size = height * width;
const uint32_t y_sub_size =
width * (((height - 1) >> kLog2OfDownsamplingFactor) + 1);
uint8_t* y_sorted = new uint8_t[y_sub_size];
uint32_t sort_row_idx = 0;
for (int i = 0; i < height; i += kDownsamplingFactor) {
memcpy(y_sorted + sort_row_idx * width, frame->buffer(kYPlane) + i * width,
width);
sort_row_idx++;
}
webrtc::Sort(y_sorted, y_sub_size, webrtc::TYPE_UWord8);
uint32_t prob_idx_uw32 = 0;
quant_uw8[0] = 0;
quant_uw8[kNumQuants - 1] = 255;
// Ensure we won't get an overflow below.
// In practice, the number of subsampled pixels will not become this large.
if (y_sub_size > (1 << 21) - 1) {
LOG(LS_ERROR) << "Subsampled number of pixels too large.";
return -1;
}
for (int32_t i = 0; i < kNumProbs; i++) {
// <Q0>.
prob_idx_uw32 = WEBRTC_SPL_UMUL_32_16(y_sub_size, prob_uw16_[i]) >> 11;
quant_uw8[i + 1] = y_sorted[prob_idx_uw32];
}
delete[] y_sorted;
y_sorted = NULL;
// Shift history for new frame.
memmove(quant_hist_uw8_[1], quant_hist_uw8_[0],
(kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
// Store current frame in history.
memcpy(quant_hist_uw8_[0], quant_uw8, kNumQuants * sizeof(uint8_t));
// We use a frame memory equal to the ceiling of half the frame rate to
// ensure we capture an entire period of flicker.
frame_memory = (frame_rate_ + (1 << 5)) >> 5; // Unsigned ceiling. <Q0>
// frame_rate_ in Q4.
if (frame_memory > kFrameHistory_size) {
frame_memory = kFrameHistory_size;
}
// Get maximum and minimum.
for (int32_t i = 0; i < kNumQuants; i++) {
maxquant_uw8[i] = 0;
minquant_uw8[i] = 255;
for (uint32_t j = 0; j < frame_memory; j++) {
if (quant_hist_uw8_[j][i] > maxquant_uw8[i]) {
maxquant_uw8[i] = quant_hist_uw8_[j][i];
}
if (quant_hist_uw8_[j][i] < minquant_uw8[i]) {
minquant_uw8[i] = quant_hist_uw8_[j][i];
}
}
}
// Get target quantiles.
for (int32_t i = 0; i < kNumQuants - kMaxOnlyLength; i++) {
// target = w * maxquant_uw8 + (1 - w) * minquant_uw8
// Weights w = |weight_uw16_| are in Q15, hence the final output has to be
// right shifted by 8 to end up in Q7.
target_quant_uw16[i] = static_cast<uint16_t>(
(weight_uw16_[i] * maxquant_uw8[i] +
((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >>
8); // <Q7>
}
for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++) {
target_quant_uw16[i] = ((uint16_t)maxquant_uw8[i]) << 7;
}
// Compute the map from input to output pixels.
uint16_t mapUW16; // <Q7>
for (int32_t i = 1; i < kNumQuants; i++) {
// As quant and targetQuant are limited to UWord8, it's safe to use Q7 here.
tmp_uw32 =
static_cast<uint32_t>(target_quant_uw16[i] - target_quant_uw16[i - 1]);
tmp_uw16 = static_cast<uint16_t>(quant_uw8[i] - quant_uw8[i - 1]); // <Q0>
if (tmp_uw16 > 0) {
increment_uw16 =
static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
tmp_uw16)); // <Q7>
} else {
// The value is irrelevant; the loop below will only iterate once.
increment_uw16 = 0;
}
mapUW16 = target_quant_uw16[i - 1];
for (uint32_t j = quant_uw8[i - 1]; j < (uint32_t)(quant_uw8[i] + 1); j++) {
// Unsigned round. <Q0>
map_uw8[j] = (uint8_t)((mapUW16 + (1 << 6)) >> 7);
mapUW16 += increment_uw16;
}
}
// Map to the output frame.
uint8_t* buffer = frame->buffer(kYPlane);
for (uint32_t i = 0; i < y_size; i++) {
buffer[i] = map_uw8[buffer[i]];
}
// Frame was altered, so reset stats.
VideoProcessing::ClearFrameStats(stats);
return VPM_OK;
}
/**
Performs some pre-detection operations. Must be called before
DetectFlicker().
\param[in] timestamp Timestamp of the current frame.
\param[in] stats Statistics of the current frame.
\return 0: Success\n
2: Detection not possible due to flickering frequency too close to
zero.\n
-1: Error
*/
int32_t VPMDeflickering::PreDetection(
const uint32_t timestamp,
const VideoProcessing::FrameStats& stats) {
int32_t mean_val; // Mean value of frame (Q4)
uint32_t frame_rate = 0;
int32_t meanBufferLength; // Temp variable.
mean_val = ((stats.sum << kmean_valueScaling) / stats.num_pixels);
// Update mean value buffer.
// This should be done even though we might end up in an unreliable detection.
memmove(mean_buffer_ + 1, mean_buffer_,
(kMeanBufferLength - 1) * sizeof(int32_t));
mean_buffer_[0] = mean_val;
// Update timestamp buffer.
// This should be done even though we might end up in an unreliable detection.
memmove(timestamp_buffer_ + 1, timestamp_buffer_,
(kMeanBufferLength - 1) * sizeof(uint32_t));
timestamp_buffer_[0] = timestamp;
/* Compute current frame rate (Q4) */
if (timestamp_buffer_[kMeanBufferLength - 1] != 0) {
frame_rate = ((90000 << 4) * (kMeanBufferLength - 1));
frame_rate /=
(timestamp_buffer_[0] - timestamp_buffer_[kMeanBufferLength - 1]);
} else if (timestamp_buffer_[1] != 0) {
frame_rate = (90000 << 4) / (timestamp_buffer_[0] - timestamp_buffer_[1]);
}
/* Determine required size of mean value buffer (mean_buffer_length_) */
if (frame_rate == 0) {
meanBufferLength = 1;
} else {
meanBufferLength =
(kNumFlickerBeforeDetect * frame_rate) / kMinFrequencyToDetect;
}
/* Sanity check of buffer length */
if (meanBufferLength >= kMeanBufferLength) {
/* Too long buffer. The flickering frequency is too close to zero, which
* makes the estimation unreliable.
*/
mean_buffer_length_ = 0;
return 2;
}
mean_buffer_length_ = meanBufferLength;
if ((timestamp_buffer_[mean_buffer_length_ - 1] != 0) &&
(mean_buffer_length_ != 1)) {
frame_rate = ((90000 << 4) * (mean_buffer_length_ - 1));
frame_rate /=
(timestamp_buffer_[0] - timestamp_buffer_[mean_buffer_length_ - 1]);
} else if (timestamp_buffer_[1] != 0) {
frame_rate = (90000 << 4) / (timestamp_buffer_[0] - timestamp_buffer_[1]);
}
frame_rate_ = frame_rate;
return VPM_OK;
}
/**
This function detects flicker in the video stream. As a side effect the
mean value buffer is updated with the new mean value.
\return 0: No flickering detected\n
1: Flickering detected\n
2: Detection not possible due to unreliable frequency interval
-1: Error
*/
int32_t VPMDeflickering::DetectFlicker() {
uint32_t i;
int32_t freqEst; // (Q4) Frequency estimate to base detection upon
int32_t ret_val = -1;
/* Sanity check for mean_buffer_length_ */
if (mean_buffer_length_ < 2) {
/* Not possible to estimate frequency */
return 2;
}
// Count zero crossings with a dead zone to be robust against noise. If the
// noise std is 2 pixel this corresponds to about 95% confidence interval.
int32_t deadzone = (kZeroCrossingDeadzone << kmean_valueScaling); // Q4
int32_t meanOfBuffer = 0; // Mean value of mean value buffer.
int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
int32_t cntState = 0; // State variable for zero crossing regions.
int32_t cntStateOld = 0; // Previous state for zero crossing regions.
for (i = 0; i < mean_buffer_length_; i++) {
meanOfBuffer += mean_buffer_[i];
}
meanOfBuffer += (mean_buffer_length_ >> 1); // Rounding, not truncation.
meanOfBuffer /= mean_buffer_length_;
// Count zero crossings.
cntStateOld = (mean_buffer_[0] >= (meanOfBuffer + deadzone));
cntStateOld -= (mean_buffer_[0] <= (meanOfBuffer - deadzone));
for (i = 1; i < mean_buffer_length_; i++) {
cntState = (mean_buffer_[i] >= (meanOfBuffer + deadzone));
cntState -= (mean_buffer_[i] <= (meanOfBuffer - deadzone));
if (cntStateOld == 0) {
cntStateOld = -cntState;
}
if (((cntState + cntStateOld) == 0) && (cntState != 0)) {
numZeros++;
cntStateOld = cntState;
}
}
// END count zero crossings.
/* Frequency estimation according to:
* freqEst = numZeros * frame_rate / 2 / mean_buffer_length_;
*
* Resolution is set to Q4
*/
freqEst = ((numZeros * 90000) << 3);
freqEst /=
(timestamp_buffer_[0] - timestamp_buffer_[mean_buffer_length_ - 1]);
/* Translate frequency estimate to regions close to 100 and 120 Hz */
uint8_t freqState = 0; // Current translation state;
// (0) Not in interval,
// (1) Within valid interval,
// (2) Out of range
int32_t freqAlias = freqEst;
if (freqEst > kMinFrequencyToDetect) {
uint8_t aliasState = 1;
while (freqState == 0) {
/* Increase frequency */
freqAlias += (aliasState * frame_rate_);
freqAlias += ((freqEst << 1) * (1 - (aliasState << 1)));
/* Compute state */
freqState = (abs(freqAlias - (100 << 4)) <= kFrequencyDeviation);
freqState += (abs(freqAlias - (120 << 4)) <= kFrequencyDeviation);
freqState += 2 * (freqAlias > ((120 << 4) + kFrequencyDeviation));
/* Switch alias state */
aliasState++;
aliasState &= 0x01;
}
}
/* Is frequency estimate within detection region? */
if (freqState == 1) {
ret_val = 1;
} else if (freqState == 0) {
ret_val = 2;
} else {
ret_val = 0;
}
return ret_val;
}
} // namespace webrtc

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
#define WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
#include <string.h> // NULL
#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class VPMDeflickering {
public:
VPMDeflickering();
~VPMDeflickering();
void Reset();
int32_t ProcessFrame(VideoFrame* frame, VideoProcessing::FrameStats* stats);
private:
int32_t PreDetection(uint32_t timestamp,
const VideoProcessing::FrameStats& stats);
int32_t DetectFlicker();
enum { kMeanBufferLength = 32 };
enum { kFrameHistory_size = 15 };
enum { kNumProbs = 12 };
enum { kNumQuants = kNumProbs + 2 };
enum { kMaxOnlyLength = 5 };
uint32_t mean_buffer_length_;
uint8_t detection_state_; // 0: No flickering
// 1: Flickering detected
// 2: In flickering
int32_t mean_buffer_[kMeanBufferLength];
uint32_t timestamp_buffer_[kMeanBufferLength];
uint32_t frame_rate_;
static const uint16_t prob_uw16_[kNumProbs];
static const uint16_t weight_uw16_[kNumQuants - kMaxOnlyLength];
uint8_t quant_hist_uw8_[kFrameHistory_size][kNumQuants];
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_

View File

@ -22,7 +22,7 @@ VPMFramePreprocessor::VPMFramePreprocessor()
spatial_resampler_ = new VPMSimpleSpatialResampler(); spatial_resampler_ = new VPMSimpleSpatialResampler();
ca_ = new VPMContentAnalysis(true); ca_ = new VPMContentAnalysis(true);
vd_ = new VPMVideoDecimator(); vd_ = new VPMVideoDecimator();
EnableDenosing(false); EnableDenoising(false);
denoised_frame_toggle_ = 0; denoised_frame_toggle_ = 0;
} }
@ -87,7 +87,7 @@ uint32_t VPMFramePreprocessor::GetDecimatedHeight() const {
return spatial_resampler_->TargetHeight(); return spatial_resampler_->TargetHeight();
} }
void VPMFramePreprocessor::EnableDenosing(bool enable) { void VPMFramePreprocessor::EnableDenoising(bool enable) {
if (enable) { if (enable) {
denoiser_.reset(new VideoDenoiser(true)); denoiser_.reset(new VideoDenoiser(true));
} else { } else {

View File

@ -57,7 +57,7 @@ class VPMFramePreprocessor {
uint32_t GetDecimatedHeight() const; uint32_t GetDecimatedHeight() const;
// Preprocess output: // Preprocess output:
void EnableDenosing(bool enable); void EnableDenoising(bool enable);
const VideoFrame* PreprocessFrame(const VideoFrame& frame); const VideoFrame* PreprocessFrame(const VideoFrame& frame);
VideoContentMetrics* GetContentMetrics() const; VideoContentMetrics* GetContentMetrics() const;

View File

@ -28,46 +28,9 @@ namespace webrtc {
class VideoProcessing { class VideoProcessing {
public: public:
struct FrameStats {
uint32_t hist[256]; // Frame histogram.
uint32_t mean;
uint32_t sum;
uint32_t num_pixels;
uint32_t sub_sampling_factor; // Sub-sampling factor, in powers of 2.
};
enum BrightnessWarning { kNoWarning, kDarkWarning, kBrightWarning };
static VideoProcessing* Create(); static VideoProcessing* Create();
virtual ~VideoProcessing() {} virtual ~VideoProcessing() {}
// Retrieves statistics for the input frame. This function must be used to
// prepare a FrameStats struct for use in certain VPM functions.
static void GetFrameStats(const VideoFrame& frame, FrameStats* stats);
// Checks the validity of a FrameStats struct. Currently, valid implies only
// that is had changed from its initialized state.
static bool ValidFrameStats(const FrameStats& stats);
static void ClearFrameStats(FrameStats* stats);
// Increases/decreases the luminance value. 'delta' can be in the range {}
static void Brighten(int delta, VideoFrame* frame);
// Detects and removes camera flicker from a video stream. Every frame from
// the stream must be passed in. A frame will only be altered if flicker has
// been detected. Has a fixed-point implementation.
// Frame statistics provided by GetFrameStats(). On return the stats will
// be reset to zero if the frame was altered. Call GetFrameStats() again
// if the statistics for the altered frame are required.
virtual int32_t Deflickering(VideoFrame* frame, FrameStats* stats) = 0;
// Detects if a video frame is excessively bright or dark. Returns a
// warning if this is the case. Multiple frames should be passed in before
// expecting a warning. Has a floating-point implementation.
virtual int32_t BrightnessDetection(const VideoFrame& frame,
const FrameStats& stats) = 0;
// The following functions refer to the pre-processor unit within VPM. The // The following functions refer to the pre-processor unit within VPM. The
// pre-processor perfoms spatial/temporal decimation and content analysis on // pre-processor perfoms spatial/temporal decimation and content analysis on
// the frames prior to encoding. // the frames prior to encoding.
@ -88,7 +51,7 @@ class VideoProcessing {
virtual void SetInputFrameResampleMode( virtual void SetInputFrameResampleMode(
VideoFrameResampling resampling_mode) = 0; VideoFrameResampling resampling_mode) = 0;
virtual void EnableDenosing(bool enable) = 0; virtual void EnableDenoising(bool enable) = 0;
virtual const VideoFrame* PreprocessFrame(const VideoFrame& frame) = 0; virtual const VideoFrame* PreprocessFrame(const VideoFrame& frame) = 0;
virtual VideoContentMetrics* GetContentMetrics() const = 0; virtual VideoContentMetrics* GetContentMetrics() const = 0;

View File

@ -1,122 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
namespace webrtc {
#if defined(WEBRTC_IOS)
#define MAYBE_BrightnessDetection DISABLED_BrightnessDetection
#else
#define MAYBE_BrightnessDetection BrightnessDetection
#endif
TEST_F(VideoProcessingTest, MAYBE_BrightnessDetection) {
uint32_t frameNum = 0;
int32_t brightnessWarning = 0;
uint32_t warningCount = 0;
std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
frame_length_) {
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
frameNum++;
VideoProcessing::FrameStats stats;
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
0);
if (brightnessWarning != VideoProcessing::kNoWarning) {
warningCount++;
}
}
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
// Expect few warnings
float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
printf("\nWarning proportions:\n");
printf("Stock foreman: %.1f %%\n", warningProportion);
EXPECT_LT(warningProportion, 10);
rewind(source_file_);
frameNum = 0;
warningCount = 0;
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
frame_length_ &&
frameNum < 300) {
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
frameNum++;
uint8_t* frame = video_frame_.buffer(kYPlane);
uint32_t yTmp = 0;
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
yTmp = frame[yIdx] << 1;
if (yTmp > 255) {
yTmp = 255;
}
frame[yIdx] = static_cast<uint8_t>(yTmp);
}
VideoProcessing::FrameStats stats;
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
0);
EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
if (brightnessWarning == VideoProcessing::kBrightWarning) {
warningCount++;
}
}
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
// Expect many brightness warnings
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
printf("Bright foreman: %.1f %%\n", warningProportion);
EXPECT_GT(warningProportion, 95);
rewind(source_file_);
frameNum = 0;
warningCount = 0;
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
frame_length_ &&
frameNum < 300) {
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
frameNum++;
uint8_t* y_plane = video_frame_.buffer(kYPlane);
int32_t yTmp = 0;
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
yTmp = y_plane[yIdx] >> 1;
y_plane[yIdx] = static_cast<uint8_t>(yTmp);
}
VideoProcessing::FrameStats stats;
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
0);
EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
if (brightnessWarning == VideoProcessing::kDarkWarning) {
warningCount++;
}
}
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
// Expect many darkness warnings
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
printf("Dark foreman: %.1f %%\n\n", warningProportion);
EXPECT_GT(warningProportion, 90);
}
} // namespace webrtc

View File

@ -1,100 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <stdlib.h>
#include <memory>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
#if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_Deflickering) {
#else
TEST_F(VideoProcessingTest, Deflickering) {
#endif
enum { NumRuns = 30 };
uint32_t frameNum = 0;
const uint32_t frame_rate = 15;
int64_t min_runtime = 0;
int64_t avg_runtime = 0;
// Close automatically opened Foreman.
fclose(source_file_);
const std::string input_file =
webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
source_file_ = fopen(input_file.c_str(), "rb");
ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file: " << input_file
<< "\n";
const std::string output_file =
webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
FILE* deflickerFile = fopen(output_file.c_str(), "wb");
ASSERT_TRUE(deflickerFile != NULL)
<< "Could not open output file: " << output_file << "\n";
printf("\nRun time [us / frame]:\n");
std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
TickTime t0;
TickTime t1;
TickInterval acc_ticks;
uint32_t timeStamp = 1;
frameNum = 0;
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
frame_length_) {
frameNum++;
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
height_, 0, kVideoRotation_0, &video_frame_));
video_frame_.set_timestamp(timeStamp);
t0 = TickTime::Now();
VideoProcessing::FrameStats stats;
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
t1 = TickTime::Now();
acc_ticks += (t1 - t0);
if (run_idx == 0) {
if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
return;
}
}
timeStamp += (90000 / frame_rate);
}
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
min_runtime = acc_ticks.Microseconds();
}
avg_runtime += acc_ticks.Microseconds();
rewind(source_file_);
}
ASSERT_EQ(0, fclose(deflickerFile));
// TODO(kjellander): Add verification of deflicker output file.
printf("\nAverage run time = %d us / frame\n",
static_cast<int>(avg_runtime / frameNum / NumRuns));
printf("Min run time = %d us / frame\n\n",
static_cast<int>(min_runtime / frameNum));
}
} // namespace webrtc

View File

@ -51,8 +51,6 @@ static void TestSize(const VideoFrame& source_frame,
int target_height, int target_height,
double expected_psnr, double expected_psnr,
VideoProcessing* vpm); VideoProcessing* vpm);
static bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2);
static void WriteProcessedFrameForVisualInspection(const VideoFrame& source, static void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
const VideoFrame& processed); const VideoFrame& processed);
@ -92,108 +90,6 @@ void VideoProcessingTest::TearDown() {
vp_ = NULL; vp_ = NULL;
} }
#if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_HandleNullBuffer) {
#else
TEST_F(VideoProcessingTest, HandleNullBuffer) {
#endif
// TODO(mikhal/stefan): Do we need this one?
VideoProcessing::FrameStats stats;
// Video frame with unallocated buffer.
VideoFrame videoFrame;
vp_->GetFrameStats(videoFrame, &stats);
EXPECT_EQ(stats.num_pixels, 0u);
EXPECT_EQ(-1, vp_->Deflickering(&videoFrame, &stats));
EXPECT_EQ(-3, vp_->BrightnessDetection(videoFrame, stats));
}
#if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_HandleBadStats) {
#else
TEST_F(VideoProcessingTest, HandleBadStats) {
#endif
VideoProcessing::FrameStats stats;
vp_->ClearFrameStats(&stats);
std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
ASSERT_EQ(frame_length_,
fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
EXPECT_EQ(-1, vp_->Deflickering(&video_frame_, &stats));
EXPECT_EQ(-3, vp_->BrightnessDetection(video_frame_, stats));
}
#if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_IdenticalResultsAfterReset) {
#else
TEST_F(VideoProcessingTest, IdenticalResultsAfterReset) {
#endif
VideoFrame video_frame2;
VideoProcessing::FrameStats stats;
// Only testing non-static functions here.
std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
ASSERT_EQ(frame_length_,
fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
video_frame2.CopyFrame(video_frame_);
ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
// Retrieve frame stats again in case Deflickering() has zeroed them.
vp_->GetFrameStats(video_frame2, &stats);
EXPECT_GT(stats.num_pixels, 0u);
ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
ASSERT_EQ(frame_length_,
fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
video_frame2.CopyFrame(video_frame_);
ASSERT_EQ(0, vp_->BrightnessDetection(video_frame_, stats));
ASSERT_EQ(0, vp_->BrightnessDetection(video_frame2, stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
}
#if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_FrameStats) {
#else
TEST_F(VideoProcessingTest, FrameStats) {
#endif
VideoProcessing::FrameStats stats;
vp_->ClearFrameStats(&stats);
std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
ASSERT_EQ(frame_length_,
fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
EXPECT_FALSE(vp_->ValidFrameStats(stats));
vp_->GetFrameStats(video_frame_, &stats);
EXPECT_GT(stats.num_pixels, 0u);
EXPECT_TRUE(vp_->ValidFrameStats(stats));
printf("\nFrameStats\n");
printf("mean: %u\nnum_pixels: %u\nsubSamplFactor: %u\nsum: %u\n\n",
static_cast<unsigned int>(stats.mean),
static_cast<unsigned int>(stats.num_pixels),
static_cast<unsigned int>(stats.sub_sampling_factor),
static_cast<unsigned int>(stats.sum));
vp_->ClearFrameStats(&stats);
EXPECT_FALSE(vp_->ValidFrameStats(stats));
}
#if defined(WEBRTC_IOS) #if defined(WEBRTC_IOS)
TEST_F(VideoProcessingTest, DISABLED_PreprocessorLogic) { TEST_F(VideoProcessingTest, DISABLED_PreprocessorLogic) {
#else #else
@ -378,22 +274,6 @@ void TestSize(const VideoFrame& source_frame,
target_height); target_height);
} }
bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2) {
for (int plane = 0; plane < webrtc::kNumOfPlanes; plane++) {
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
int allocated_size1 = frame1.allocated_size(plane_type);
int allocated_size2 = frame2.allocated_size(plane_type);
if (allocated_size1 != allocated_size2)
return false;
const uint8_t* plane_buffer1 = frame1.buffer(plane_type);
const uint8_t* plane_buffer2 = frame2.buffer(plane_type);
if (memcmp(plane_buffer1, plane_buffer2, allocated_size1))
return false;
}
return true;
}
void WriteProcessedFrameForVisualInspection(const VideoFrame& source, void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
const VideoFrame& processed) { const VideoFrame& processed) {
// Skip if writing to files is not enabled. // Skip if writing to files is not enabled.

View File

@ -20,12 +20,8 @@
'sources': [ 'sources': [
'include/video_processing.h', 'include/video_processing.h',
'include/video_processing_defines.h', 'include/video_processing_defines.h',
'brightness_detection.cc',
'brightness_detection.h',
'content_analysis.cc', 'content_analysis.cc',
'content_analysis.h', 'content_analysis.h',
'deflickering.cc',
'deflickering.h',
'frame_preprocessor.cc', 'frame_preprocessor.cc',
'frame_preprocessor.h', 'frame_preprocessor.h',
'spatial_resampler.cc', 'spatial_resampler.cc',

View File

@ -18,21 +18,6 @@
namespace webrtc { namespace webrtc {
namespace {
int GetSubSamplingFactor(int width, int height) {
if (width * height >= 640 * 480) {
return 3;
} else if (width * height >= 352 * 288) {
return 2;
} else if (width * height >= 176 * 144) {
return 1;
} else {
return 0;
}
}
} // namespace
VideoProcessing* VideoProcessing::Create() { VideoProcessing* VideoProcessing::Create() {
return new VideoProcessingImpl(); return new VideoProcessingImpl();
} }
@ -40,83 +25,6 @@ VideoProcessing* VideoProcessing::Create() {
VideoProcessingImpl::VideoProcessingImpl() {} VideoProcessingImpl::VideoProcessingImpl() {}
VideoProcessingImpl::~VideoProcessingImpl() {} VideoProcessingImpl::~VideoProcessingImpl() {}
void VideoProcessing::GetFrameStats(const VideoFrame& frame,
FrameStats* stats) {
ClearFrameStats(stats); // The histogram needs to be zeroed out.
if (frame.IsZeroSize()) {
return;
}
int width = frame.width();
int height = frame.height();
stats->sub_sampling_factor = GetSubSamplingFactor(width, height);
const uint8_t* buffer = frame.buffer(kYPlane);
// Compute histogram and sum of frame
for (int i = 0; i < height; i += (1 << stats->sub_sampling_factor)) {
int k = i * width;
for (int j = 0; j < width; j += (1 << stats->sub_sampling_factor)) {
stats->hist[buffer[k + j]]++;
stats->sum += buffer[k + j];
}
}
stats->num_pixels = (width * height) / ((1 << stats->sub_sampling_factor) *
(1 << stats->sub_sampling_factor));
assert(stats->num_pixels > 0);
// Compute mean value of frame
stats->mean = stats->sum / stats->num_pixels;
}
bool VideoProcessing::ValidFrameStats(const FrameStats& stats) {
if (stats.num_pixels == 0) {
LOG(LS_WARNING) << "Invalid frame stats.";
return false;
}
return true;
}
void VideoProcessing::ClearFrameStats(FrameStats* stats) {
stats->mean = 0;
stats->sum = 0;
stats->num_pixels = 0;
stats->sub_sampling_factor = 0;
memset(stats->hist, 0, sizeof(stats->hist));
}
void VideoProcessing::Brighten(int delta, VideoFrame* frame) {
RTC_DCHECK(!frame->IsZeroSize());
RTC_DCHECK(frame->width() > 0);
RTC_DCHECK(frame->height() > 0);
int num_pixels = frame->width() * frame->height();
int look_up[256];
for (int i = 0; i < 256; i++) {
int val = i + delta;
look_up[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
}
uint8_t* temp_ptr = frame->buffer(kYPlane);
for (int i = 0; i < num_pixels; i++) {
*temp_ptr = static_cast<uint8_t>(look_up[*temp_ptr]);
temp_ptr++;
}
}
int32_t VideoProcessingImpl::Deflickering(VideoFrame* frame,
FrameStats* stats) {
rtc::CritScope mutex(&mutex_);
return deflickering_.ProcessFrame(frame, stats);
}
int32_t VideoProcessingImpl::BrightnessDetection(const VideoFrame& frame,
const FrameStats& stats) {
rtc::CritScope mutex(&mutex_);
return brightness_detection_.ProcessFrame(frame, stats);
}
void VideoProcessingImpl::EnableTemporalDecimation(bool enable) { void VideoProcessingImpl::EnableTemporalDecimation(bool enable) {
rtc::CritScope mutex(&mutex_); rtc::CritScope mutex(&mutex_);
frame_pre_processor_.EnableTemporalDecimation(enable); frame_pre_processor_.EnableTemporalDecimation(enable);
@ -150,9 +58,9 @@ uint32_t VideoProcessingImpl::GetDecimatedHeight() const {
return frame_pre_processor_.GetDecimatedHeight(); return frame_pre_processor_.GetDecimatedHeight();
} }
void VideoProcessingImpl::EnableDenosing(bool enable) { void VideoProcessingImpl::EnableDenoising(bool enable) {
rtc::CritScope cs(&mutex_); rtc::CritScope cs(&mutex_);
frame_pre_processor_.EnableDenosing(enable); frame_pre_processor_.EnableDenoising(enable);
} }
const VideoFrame* VideoProcessingImpl::PreprocessFrame( const VideoFrame* VideoProcessingImpl::PreprocessFrame(

View File

@ -13,8 +13,6 @@
#include "webrtc/base/criticalsection.h" #include "webrtc/base/criticalsection.h"
#include "webrtc/modules/video_processing/include/video_processing.h" #include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/modules/video_processing/brightness_detection.h"
#include "webrtc/modules/video_processing/deflickering.h"
#include "webrtc/modules/video_processing/frame_preprocessor.h" #include "webrtc/modules/video_processing/frame_preprocessor.h"
namespace webrtc { namespace webrtc {
@ -26,9 +24,6 @@ class VideoProcessingImpl : public VideoProcessing {
~VideoProcessingImpl() override; ~VideoProcessingImpl() override;
// Implements VideoProcessing. // Implements VideoProcessing.
int32_t Deflickering(VideoFrame* frame, FrameStats* stats) override;
int32_t BrightnessDetection(const VideoFrame& frame,
const FrameStats& stats) override;
void EnableTemporalDecimation(bool enable) override; void EnableTemporalDecimation(bool enable) override;
void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override; void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
void EnableContentAnalysis(bool enable) override; void EnableContentAnalysis(bool enable) override;
@ -38,15 +33,13 @@ class VideoProcessingImpl : public VideoProcessing {
uint32_t GetDecimatedFrameRate() override; uint32_t GetDecimatedFrameRate() override;
uint32_t GetDecimatedWidth() const override; uint32_t GetDecimatedWidth() const override;
uint32_t GetDecimatedHeight() const override; uint32_t GetDecimatedHeight() const override;
void EnableDenosing(bool enable) override; void EnableDenoising(bool enable) override;
const VideoFrame* PreprocessFrame(const VideoFrame& frame) override; const VideoFrame* PreprocessFrame(const VideoFrame& frame) override;
VideoContentMetrics* GetContentMetrics() const override; VideoContentMetrics* GetContentMetrics() const override;
private: private:
rtc::CriticalSection mutex_; rtc::CriticalSection mutex_;
VPMDeflickering deflickering_ GUARDED_BY(mutex_); VPMFramePreprocessor frame_pre_processor_ GUARDED_BY(mutex_);
VPMBrightnessDetection brightness_detection_;
VPMFramePreprocessor frame_pre_processor_;
}; };
} // namespace webrtc } // namespace webrtc