Clang format of video_processing folder.
BUG=webrtc:5259 Review URL: https://codereview.webrtc.org/1508793002 Cr-Commit-Position: refs/heads/master@{#10925}
This commit is contained in:
@ -64,10 +64,10 @@ int32_t VPMBrightnessDetection::ProcessFrame(
|
|||||||
const uint8_t* buffer = frame.buffer(kYPlane);
|
const uint8_t* buffer = frame.buffer(kYPlane);
|
||||||
float std_y = 0;
|
float std_y = 0;
|
||||||
for (int h = 0; h < height; h += (1 << stats.sub_sampling_factor)) {
|
for (int h = 0; h < height; h += (1 << stats.sub_sampling_factor)) {
|
||||||
int row = h*width;
|
int row = h * width;
|
||||||
for (int w = 0; w < width; w += (1 << stats.sub_sampling_factor)) {
|
for (int w = 0; w < width; w += (1 << stats.sub_sampling_factor)) {
|
||||||
std_y += (buffer[w + row] - stats.mean) * (buffer[w + row] -
|
std_y +=
|
||||||
stats.mean);
|
(buffer[w + row] - stats.mean) * (buffer[w + row] - stats.mean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std_y = sqrt(std_y / stats.num_pixels);
|
std_y = sqrt(std_y / stats.num_pixels);
|
||||||
@ -82,37 +82,39 @@ int32_t VPMBrightnessDetection::ProcessFrame(
|
|||||||
float posPerc95 = stats.num_pixels * 0.95f;
|
float posPerc95 = stats.num_pixels * 0.95f;
|
||||||
for (uint32_t i = 0; i < 256; i++) {
|
for (uint32_t i = 0; i < 256; i++) {
|
||||||
sum += stats.hist[i];
|
sum += stats.hist[i];
|
||||||
if (sum < pos_perc05) perc05 = i; // 5th perc.
|
if (sum < pos_perc05)
|
||||||
if (sum < pos_median) median_y = i; // 50th perc.
|
perc05 = i; // 5th perc.
|
||||||
|
if (sum < pos_median)
|
||||||
|
median_y = i; // 50th perc.
|
||||||
if (sum < posPerc95)
|
if (sum < posPerc95)
|
||||||
perc95 = i; // 95th perc.
|
perc95 = i; // 95th perc.
|
||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if image is too dark
|
// Check if image is too dark
|
||||||
if ((std_y < 55) && (perc05 < 50)) {
|
if ((std_y < 55) && (perc05 < 50)) {
|
||||||
if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
|
if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
|
||||||
prop_low > 0.20) {
|
prop_low > 0.20) {
|
||||||
frame_cnt_dark_++;
|
frame_cnt_dark_++;
|
||||||
} else {
|
|
||||||
frame_cnt_dark_ = 0;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
frame_cnt_dark_ = 0;
|
frame_cnt_dark_ = 0;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
frame_cnt_dark_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if image is too bright
|
// Check if image is too bright
|
||||||
if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
|
if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
|
||||||
if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
|
if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
|
||||||
prop_high > 0.25) {
|
prop_high > 0.25) {
|
||||||
frame_cnt_bright_++;
|
frame_cnt_bright_++;
|
||||||
} else {
|
|
||||||
frame_cnt_bright_ = 0;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
frame_cnt_bright_ = 0;
|
frame_cnt_bright_ = 0;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
frame_cnt_bright_ = 0;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
frame_cnt_dark_ = 0;
|
frame_cnt_dark_ = 0;
|
||||||
frame_cnt_bright_ = 0;
|
frame_cnt_bright_ = 0;
|
||||||
|
@ -72,7 +72,7 @@ VideoContentMetrics* VPMContentAnalysis::ComputeContentMetrics(
|
|||||||
// Saving current frame as previous one: Y only.
|
// Saving current frame as previous one: Y only.
|
||||||
memcpy(prev_frame_, orig_frame_, width_ * height_);
|
memcpy(prev_frame_, orig_frame_, width_ * height_);
|
||||||
|
|
||||||
first_frame_ = false;
|
first_frame_ = false;
|
||||||
ca_Init_ = true;
|
ca_Init_ = true;
|
||||||
|
|
||||||
return ContentMetrics();
|
return ContentMetrics();
|
||||||
@ -85,7 +85,7 @@ int32_t VPMContentAnalysis::Release() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (prev_frame_ != NULL) {
|
if (prev_frame_ != NULL) {
|
||||||
delete [] prev_frame_;
|
delete[] prev_frame_;
|
||||||
prev_frame_ = NULL;
|
prev_frame_ = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,11 +106,11 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
|
|||||||
skip_num_ = 1;
|
skip_num_ = 1;
|
||||||
|
|
||||||
// use skipNum = 2 for 4CIF, WHD
|
// use skipNum = 2 for 4CIF, WHD
|
||||||
if ( (height_ >= 576) && (width_ >= 704) ) {
|
if ((height_ >= 576) && (width_ >= 704)) {
|
||||||
skip_num_ = 2;
|
skip_num_ = 2;
|
||||||
}
|
}
|
||||||
// use skipNum = 4 for FULLL_HD images
|
// use skipNum = 4 for FULLL_HD images
|
||||||
if ( (height_ >= 1080) && (width_ >= 1920) ) {
|
if ((height_ >= 1080) && (width_ >= 1920)) {
|
||||||
skip_num_ = 4;
|
skip_num_ = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (prev_frame_ != NULL) {
|
if (prev_frame_ != NULL) {
|
||||||
delete [] prev_frame_;
|
delete[] prev_frame_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spatial Metrics don't work on a border of 8. Minimum processing
|
// Spatial Metrics don't work on a border of 8. Minimum processing
|
||||||
@ -135,12 +135,12 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
prev_frame_ = new uint8_t[width_ * height_]; // Y only.
|
prev_frame_ = new uint8_t[width_ * height_]; // Y only.
|
||||||
if (prev_frame_ == NULL) return VPM_MEMORY;
|
if (prev_frame_ == NULL)
|
||||||
|
return VPM_MEMORY;
|
||||||
|
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Compute motion metrics: magnitude over non-zero motion vectors,
|
// Compute motion metrics: magnitude over non-zero motion vectors,
|
||||||
// and size of zero cluster
|
// and size of zero cluster
|
||||||
int32_t VPMContentAnalysis::ComputeMotionMetrics() {
|
int32_t VPMContentAnalysis::ComputeMotionMetrics() {
|
||||||
@ -163,36 +163,41 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_C() {
|
|||||||
uint64_t pixelSqSum = 0;
|
uint64_t pixelSqSum = 0;
|
||||||
|
|
||||||
uint32_t num_pixels = 0; // Counter for # of pixels.
|
uint32_t num_pixels = 0; // Counter for # of pixels.
|
||||||
const int width_end = ((width_ - 2*border_) & -16) + border_;
|
const int width_end = ((width_ - 2 * border_) & -16) + border_;
|
||||||
|
|
||||||
for (int i = border_; i < sizei - border_; i += skip_num_) {
|
for (int i = border_; i < sizei - border_; i += skip_num_) {
|
||||||
for (int j = border_; j < width_end; j++) {
|
for (int j = border_; j < width_end; j++) {
|
||||||
num_pixels += 1;
|
num_pixels += 1;
|
||||||
int ssn = i * sizej + j;
|
int ssn = i * sizej + j;
|
||||||
|
|
||||||
uint8_t currPixel = orig_frame_[ssn];
|
uint8_t currPixel = orig_frame_[ssn];
|
||||||
uint8_t prevPixel = prev_frame_[ssn];
|
uint8_t prevPixel = prev_frame_[ssn];
|
||||||
|
|
||||||
tempDiffSum += (uint32_t)abs((int16_t)(currPixel - prevPixel));
|
tempDiffSum +=
|
||||||
pixelSum += (uint32_t) currPixel;
|
static_cast<uint32_t>(abs((int16_t)(currPixel - prevPixel)));
|
||||||
pixelSqSum += (uint64_t) (currPixel * currPixel);
|
pixelSum += static_cast<uint32_t>(currPixel);
|
||||||
|
pixelSqSum += static_cast<uint64_t>(currPixel * currPixel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default.
|
// Default.
|
||||||
motion_magnitude_ = 0.0f;
|
motion_magnitude_ = 0.0f;
|
||||||
|
|
||||||
if (tempDiffSum == 0) return VPM_OK;
|
if (tempDiffSum == 0)
|
||||||
|
return VPM_OK;
|
||||||
|
|
||||||
// Normalize over all pixels.
|
// Normalize over all pixels.
|
||||||
float const tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
|
float const tempDiffAvg =
|
||||||
float const pixelSumAvg = (float)pixelSum / (float)(num_pixels);
|
static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
|
||||||
float const pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
|
float const pixelSumAvg =
|
||||||
|
static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
|
||||||
|
float const pixelSqSumAvg =
|
||||||
|
static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
|
||||||
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
|
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
|
||||||
|
|
||||||
if (contrast > 0.0) {
|
if (contrast > 0.0) {
|
||||||
contrast = sqrt(contrast);
|
contrast = sqrt(contrast);
|
||||||
motion_magnitude_ = tempDiffAvg/contrast;
|
motion_magnitude_ = tempDiffAvg / contrast;
|
||||||
}
|
}
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
}
|
}
|
||||||
@ -216,39 +221,40 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
|
|||||||
uint32_t spatialErrHSum = 0;
|
uint32_t spatialErrHSum = 0;
|
||||||
|
|
||||||
// make sure work section is a multiple of 16
|
// make sure work section is a multiple of 16
|
||||||
const int width_end = ((sizej - 2*border_) & -16) + border_;
|
const int width_end = ((sizej - 2 * border_) & -16) + border_;
|
||||||
|
|
||||||
for (int i = border_; i < sizei - border_; i += skip_num_) {
|
for (int i = border_; i < sizei - border_; i += skip_num_) {
|
||||||
for (int j = border_; j < width_end; j++) {
|
for (int j = border_; j < width_end; j++) {
|
||||||
int ssn1= i * sizej + j;
|
int ssn1 = i * sizej + j;
|
||||||
int ssn2 = (i + 1) * sizej + j; // bottom
|
int ssn2 = (i + 1) * sizej + j; // bottom
|
||||||
int ssn3 = (i - 1) * sizej + j; // top
|
int ssn3 = (i - 1) * sizej + j; // top
|
||||||
int ssn4 = i * sizej + j + 1; // right
|
int ssn4 = i * sizej + j + 1; // right
|
||||||
int ssn5 = i * sizej + j - 1; // left
|
int ssn5 = i * sizej + j - 1; // left
|
||||||
|
|
||||||
uint16_t refPixel1 = orig_frame_[ssn1] << 1;
|
uint16_t refPixel1 = orig_frame_[ssn1] << 1;
|
||||||
uint16_t refPixel2 = orig_frame_[ssn1] << 2;
|
uint16_t refPixel2 = orig_frame_[ssn1] << 2;
|
||||||
|
|
||||||
uint8_t bottPixel = orig_frame_[ssn2];
|
uint8_t bottPixel = orig_frame_[ssn2];
|
||||||
uint8_t topPixel = orig_frame_[ssn3];
|
uint8_t topPixel = orig_frame_[ssn3];
|
||||||
uint8_t rightPixel = orig_frame_[ssn4];
|
uint8_t rightPixel = orig_frame_[ssn4];
|
||||||
uint8_t leftPixel = orig_frame_[ssn5];
|
uint8_t leftPixel = orig_frame_[ssn5];
|
||||||
|
|
||||||
spatialErrSum += (uint32_t) abs((int16_t)(refPixel2
|
spatialErrSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
|
||||||
- (uint16_t)(bottPixel + topPixel + leftPixel + rightPixel)));
|
refPixel2 - static_cast<uint16_t>(bottPixel + topPixel + leftPixel +
|
||||||
spatialErrVSum += (uint32_t) abs((int16_t)(refPixel1
|
rightPixel))));
|
||||||
- (uint16_t)(bottPixel + topPixel)));
|
spatialErrVSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
|
||||||
spatialErrHSum += (uint32_t) abs((int16_t)(refPixel1
|
refPixel1 - static_cast<uint16_t>(bottPixel + topPixel))));
|
||||||
- (uint16_t)(leftPixel + rightPixel)));
|
spatialErrHSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
|
||||||
|
refPixel1 - static_cast<uint16_t>(leftPixel + rightPixel))));
|
||||||
pixelMSA += orig_frame_[ssn1];
|
pixelMSA += orig_frame_[ssn1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalize over all pixels.
|
// Normalize over all pixels.
|
||||||
const float spatialErr = (float)(spatialErrSum >> 2);
|
const float spatialErr = static_cast<float>(spatialErrSum >> 2);
|
||||||
const float spatialErrH = (float)(spatialErrHSum >> 1);
|
const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
|
||||||
const float spatialErrV = (float)(spatialErrVSum >> 1);
|
const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
|
||||||
const float norm = (float)pixelMSA;
|
const float norm = static_cast<float>(pixelMSA);
|
||||||
|
|
||||||
// 2X2:
|
// 2X2:
|
||||||
spatial_pred_err_ = spatialErr / norm;
|
spatial_pred_err_ = spatialErr / norm;
|
||||||
@ -260,7 +266,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
|
VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
|
||||||
if (ca_Init_ == false) return NULL;
|
if (ca_Init_ == false)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
content_metrics_->spatial_pred_err = spatial_pred_err_;
|
content_metrics_->spatial_pred_err = spatial_pred_err_;
|
||||||
content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
|
content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
|
||||||
|
@ -72,14 +72,14 @@ class VPMContentAnalysis {
|
|||||||
int border_;
|
int border_;
|
||||||
|
|
||||||
// Content Metrics: Stores the local average of the metrics.
|
// Content Metrics: Stores the local average of the metrics.
|
||||||
float motion_magnitude_; // motion class
|
float motion_magnitude_; // motion class
|
||||||
float spatial_pred_err_; // spatial class
|
float spatial_pred_err_; // spatial class
|
||||||
float spatial_pred_err_h_; // spatial class
|
float spatial_pred_err_h_; // spatial class
|
||||||
float spatial_pred_err_v_; // spatial class
|
float spatial_pred_err_v_; // spatial class
|
||||||
bool first_frame_;
|
bool first_frame_;
|
||||||
bool ca_Init_;
|
bool ca_Init_;
|
||||||
|
|
||||||
VideoContentMetrics* content_metrics_;
|
VideoContentMetrics* content_metrics_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -16,22 +16,22 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
||||||
uint32_t num_pixels = 0; // counter for # of pixels
|
uint32_t num_pixels = 0; // counter for # of pixels
|
||||||
const uint8_t* imgBufO = orig_frame_ + border_*width_ + border_;
|
const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_;
|
||||||
const uint8_t* imgBufP = prev_frame_ + border_*width_ + border_;
|
const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_;
|
||||||
|
|
||||||
const int32_t width_end = ((width_ - 2*border_) & -16) + border_;
|
const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
|
||||||
|
|
||||||
__m128i sad_64 = _mm_setzero_si128();
|
__m128i sad_64 = _mm_setzero_si128();
|
||||||
__m128i sum_64 = _mm_setzero_si128();
|
__m128i sum_64 = _mm_setzero_si128();
|
||||||
__m128i sqsum_64 = _mm_setzero_si128();
|
__m128i sqsum_64 = _mm_setzero_si128();
|
||||||
const __m128i z = _mm_setzero_si128();
|
const __m128i z = _mm_setzero_si128();
|
||||||
|
|
||||||
for (uint16_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
|
for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
|
||||||
__m128i sqsum_32 = _mm_setzero_si128();
|
__m128i sqsum_32 = _mm_setzero_si128();
|
||||||
|
|
||||||
const uint8_t *lineO = imgBufO;
|
const uint8_t* lineO = imgBufO;
|
||||||
const uint8_t *lineP = imgBufP;
|
const uint8_t* lineP = imgBufP;
|
||||||
|
|
||||||
// Work on 16 pixels at a time. For HD content with a width of 1920
|
// Work on 16 pixels at a time. For HD content with a width of 1920
|
||||||
// this loop will run ~67 times (depending on border). Maximum for
|
// this loop will run ~67 times (depending on border). Maximum for
|
||||||
@ -49,14 +49,14 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
|||||||
lineP += 16;
|
lineP += 16;
|
||||||
|
|
||||||
// Abs pixel difference between frames.
|
// Abs pixel difference between frames.
|
||||||
sad_64 = _mm_add_epi64 (sad_64, _mm_sad_epu8(o, p));
|
sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p));
|
||||||
|
|
||||||
// sum of all pixels in frame
|
// sum of all pixels in frame
|
||||||
sum_64 = _mm_add_epi64 (sum_64, _mm_sad_epu8(o, z));
|
sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z));
|
||||||
|
|
||||||
// Squared sum of all pixels in frame.
|
// Squared sum of all pixels in frame.
|
||||||
const __m128i olo = _mm_unpacklo_epi8(o,z);
|
const __m128i olo = _mm_unpacklo_epi8(o, z);
|
||||||
const __m128i ohi = _mm_unpackhi_epi8(o,z);
|
const __m128i ohi = _mm_unpackhi_epi8(o, z);
|
||||||
|
|
||||||
const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
|
const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
|
||||||
const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
|
const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
|
||||||
@ -66,9 +66,9 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add to 64 bit running sum as to not roll over.
|
// Add to 64 bit running sum as to not roll over.
|
||||||
sqsum_64 = _mm_add_epi64(sqsum_64,
|
sqsum_64 =
|
||||||
_mm_add_epi64(_mm_unpackhi_epi32(sqsum_32,z),
|
_mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z),
|
||||||
_mm_unpacklo_epi32(sqsum_32,z)));
|
_mm_unpacklo_epi32(sqsum_32, z)));
|
||||||
|
|
||||||
imgBufO += width_ * skip_num_;
|
imgBufO += width_ * skip_num_;
|
||||||
imgBufP += width_ * skip_num_;
|
imgBufP += width_ * skip_num_;
|
||||||
@ -81,13 +81,13 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
|||||||
|
|
||||||
// Bring sums out of vector registers and into integer register
|
// Bring sums out of vector registers and into integer register
|
||||||
// domain, summing them along the way.
|
// domain, summing them along the way.
|
||||||
_mm_store_si128 (&sad_final_128, sad_64);
|
_mm_store_si128(&sad_final_128, sad_64);
|
||||||
_mm_store_si128 (&sum_final_128, sum_64);
|
_mm_store_si128(&sum_final_128, sum_64);
|
||||||
_mm_store_si128 (&sqsum_final_128, sqsum_64);
|
_mm_store_si128(&sqsum_final_128, sqsum_64);
|
||||||
|
|
||||||
uint64_t *sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
|
uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
|
||||||
uint64_t *sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
|
uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
|
||||||
uint64_t *sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
|
uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
|
||||||
|
|
||||||
const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
|
const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
|
||||||
const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
|
const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
|
||||||
@ -96,27 +96,31 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
|||||||
// Default.
|
// Default.
|
||||||
motion_magnitude_ = 0.0f;
|
motion_magnitude_ = 0.0f;
|
||||||
|
|
||||||
if (tempDiffSum == 0) return VPM_OK;
|
if (tempDiffSum == 0)
|
||||||
|
return VPM_OK;
|
||||||
|
|
||||||
// Normalize over all pixels.
|
// Normalize over all pixels.
|
||||||
const float tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
|
const float tempDiffAvg =
|
||||||
const float pixelSumAvg = (float)pixelSum / (float)(num_pixels);
|
static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
|
||||||
const float pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
|
const float pixelSumAvg =
|
||||||
|
static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
|
||||||
|
const float pixelSqSumAvg =
|
||||||
|
static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
|
||||||
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
|
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
|
||||||
|
|
||||||
if (contrast > 0.0) {
|
if (contrast > 0.0) {
|
||||||
contrast = sqrt(contrast);
|
contrast = sqrt(contrast);
|
||||||
motion_magnitude_ = tempDiffAvg/contrast;
|
motion_magnitude_ = tempDiffAvg / contrast;
|
||||||
}
|
}
|
||||||
|
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
||||||
const uint8_t* imgBuf = orig_frame_ + border_*width_;
|
const uint8_t* imgBuf = orig_frame_ + border_ * width_;
|
||||||
const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
|
const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
|
||||||
|
|
||||||
__m128i se_32 = _mm_setzero_si128();
|
__m128i se_32 = _mm_setzero_si128();
|
||||||
__m128i sev_32 = _mm_setzero_si128();
|
__m128i sev_32 = _mm_setzero_si128();
|
||||||
__m128i seh_32 = _mm_setzero_si128();
|
__m128i seh_32 = _mm_setzero_si128();
|
||||||
__m128i msa_32 = _mm_setzero_si128();
|
__m128i msa_32 = _mm_setzero_si128();
|
||||||
@ -127,8 +131,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
// value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
|
// value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
|
||||||
// will not roll over a 32 bit accumulator.
|
// will not roll over a 32 bit accumulator.
|
||||||
// skip_num_ is also used to reduce the number of rows
|
// skip_num_ is also used to reduce the number of rows
|
||||||
for (int32_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
|
for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
|
||||||
__m128i se_16 = _mm_setzero_si128();
|
__m128i se_16 = _mm_setzero_si128();
|
||||||
__m128i sev_16 = _mm_setzero_si128();
|
__m128i sev_16 = _mm_setzero_si128();
|
||||||
__m128i seh_16 = _mm_setzero_si128();
|
__m128i seh_16 = _mm_setzero_si128();
|
||||||
__m128i msa_16 = _mm_setzero_si128();
|
__m128i msa_16 = _mm_setzero_si128();
|
||||||
@ -143,9 +147,9 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
// border_ could also be adjusted to concentrate on just the center of
|
// border_ could also be adjusted to concentrate on just the center of
|
||||||
// the images for an HD capture in order to reduce the possiblity of
|
// the images for an HD capture in order to reduce the possiblity of
|
||||||
// rollover.
|
// rollover.
|
||||||
const uint8_t *lineTop = imgBuf - width_ + border_;
|
const uint8_t* lineTop = imgBuf - width_ + border_;
|
||||||
const uint8_t *lineCen = imgBuf + border_;
|
const uint8_t* lineCen = imgBuf + border_;
|
||||||
const uint8_t *lineBot = imgBuf + width_ + border_;
|
const uint8_t* lineBot = imgBuf + width_ + border_;
|
||||||
|
|
||||||
for (int32_t j = 0; j < width_end - border_; j += 16) {
|
for (int32_t j = 0; j < width_end - border_; j += 16) {
|
||||||
const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
|
const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
|
||||||
@ -159,20 +163,20 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
lineBot += 16;
|
lineBot += 16;
|
||||||
|
|
||||||
// center pixel unpacked
|
// center pixel unpacked
|
||||||
__m128i clo = _mm_unpacklo_epi8(c,z);
|
__m128i clo = _mm_unpacklo_epi8(c, z);
|
||||||
__m128i chi = _mm_unpackhi_epi8(c,z);
|
__m128i chi = _mm_unpackhi_epi8(c, z);
|
||||||
|
|
||||||
// left right pixels unpacked and added together
|
// left right pixels unpacked and added together
|
||||||
const __m128i lrlo = _mm_add_epi16(_mm_unpacklo_epi8(l,z),
|
const __m128i lrlo =
|
||||||
_mm_unpacklo_epi8(r,z));
|
_mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z));
|
||||||
const __m128i lrhi = _mm_add_epi16(_mm_unpackhi_epi8(l,z),
|
const __m128i lrhi =
|
||||||
_mm_unpackhi_epi8(r,z));
|
_mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z));
|
||||||
|
|
||||||
// top & bottom pixels unpacked and added together
|
// top & bottom pixels unpacked and added together
|
||||||
const __m128i tblo = _mm_add_epi16(_mm_unpacklo_epi8(t,z),
|
const __m128i tblo =
|
||||||
_mm_unpacklo_epi8(b,z));
|
_mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z));
|
||||||
const __m128i tbhi = _mm_add_epi16(_mm_unpackhi_epi8(t,z),
|
const __m128i tbhi =
|
||||||
_mm_unpackhi_epi8(b,z));
|
_mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z));
|
||||||
|
|
||||||
// running sum of all pixels
|
// running sum of all pixels
|
||||||
msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
|
msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
|
||||||
@ -190,29 +194,32 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
|
const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
|
||||||
|
|
||||||
// Add to 16 bit running sum
|
// Add to 16 bit running sum
|
||||||
se_16 = _mm_add_epi16(se_16, _mm_max_epi16(setlo,
|
se_16 =
|
||||||
_mm_subs_epi16(z, setlo)));
|
_mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo)));
|
||||||
se_16 = _mm_add_epi16(se_16, _mm_max_epi16(sethi,
|
se_16 =
|
||||||
_mm_subs_epi16(z, sethi)));
|
_mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi)));
|
||||||
sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevtlo,
|
sev_16 = _mm_add_epi16(sev_16,
|
||||||
_mm_subs_epi16(z, sevtlo)));
|
_mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo)));
|
||||||
sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevthi,
|
sev_16 = _mm_add_epi16(sev_16,
|
||||||
_mm_subs_epi16(z, sevthi)));
|
_mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi)));
|
||||||
seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehtlo,
|
seh_16 = _mm_add_epi16(seh_16,
|
||||||
_mm_subs_epi16(z, sehtlo)));
|
_mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo)));
|
||||||
seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehthi,
|
seh_16 = _mm_add_epi16(seh_16,
|
||||||
_mm_subs_epi16(z, sehthi)));
|
_mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add to 32 bit running sum as to not roll over.
|
// Add to 32 bit running sum as to not roll over.
|
||||||
se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16,z),
|
se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z),
|
||||||
_mm_unpacklo_epi16(se_16,z)));
|
_mm_unpacklo_epi16(se_16, z)));
|
||||||
sev_32 = _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16,z),
|
sev_32 =
|
||||||
_mm_unpacklo_epi16(sev_16,z)));
|
_mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z),
|
||||||
seh_32 = _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16,z),
|
_mm_unpacklo_epi16(sev_16, z)));
|
||||||
_mm_unpacklo_epi16(seh_16,z)));
|
seh_32 =
|
||||||
msa_32 = _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16,z),
|
_mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z),
|
||||||
_mm_unpacklo_epi16(msa_16,z)));
|
_mm_unpacklo_epi16(seh_16, z)));
|
||||||
|
msa_32 =
|
||||||
|
_mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z),
|
||||||
|
_mm_unpacklo_epi16(msa_16, z)));
|
||||||
|
|
||||||
imgBuf += width_ * skip_num_;
|
imgBuf += width_ * skip_num_;
|
||||||
}
|
}
|
||||||
@ -224,30 +231,30 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
|
|
||||||
// Bring sums out of vector registers and into integer register
|
// Bring sums out of vector registers and into integer register
|
||||||
// domain, summing them along the way.
|
// domain, summing them along the way.
|
||||||
_mm_store_si128 (&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32,z),
|
_mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z),
|
||||||
_mm_unpacklo_epi32(se_32,z)));
|
_mm_unpacklo_epi32(se_32, z)));
|
||||||
_mm_store_si128 (&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32,z),
|
_mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z),
|
||||||
_mm_unpacklo_epi32(sev_32,z)));
|
_mm_unpacklo_epi32(sev_32, z)));
|
||||||
_mm_store_si128 (&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32,z),
|
_mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z),
|
||||||
_mm_unpacklo_epi32(seh_32,z)));
|
_mm_unpacklo_epi32(seh_32, z)));
|
||||||
_mm_store_si128 (&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32,z),
|
_mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z),
|
||||||
_mm_unpacklo_epi32(msa_32,z)));
|
_mm_unpacklo_epi32(msa_32, z)));
|
||||||
|
|
||||||
uint64_t *se_64 = reinterpret_cast<uint64_t*>(&se_128);
|
uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128);
|
||||||
uint64_t *sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
|
uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
|
||||||
uint64_t *seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
|
uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
|
||||||
uint64_t *msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
|
uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
|
||||||
|
|
||||||
const uint32_t spatialErrSum = se_64[0] + se_64[1];
|
const uint32_t spatialErrSum = se_64[0] + se_64[1];
|
||||||
const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
|
const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
|
||||||
const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
|
const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
|
||||||
const uint32_t pixelMSA = msa_64[0] + msa_64[1];
|
const uint32_t pixelMSA = msa_64[0] + msa_64[1];
|
||||||
|
|
||||||
// Normalize over all pixels.
|
// Normalize over all pixels.
|
||||||
const float spatialErr = (float)(spatialErrSum >> 2);
|
const float spatialErr = static_cast<float>(spatialErrSum >> 2);
|
||||||
const float spatialErrH = (float)(spatialErrHSum >> 1);
|
const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
|
||||||
const float spatialErrV = (float)(spatialErrVSum >> 1);
|
const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
|
||||||
const float norm = (float)pixelMSA;
|
const float norm = static_cast<float>(pixelMSA);
|
||||||
|
|
||||||
// 2X2:
|
// 2X2:
|
||||||
spatial_pred_err_ = spatialErr / norm;
|
spatial_pred_err_ = spatialErr / norm;
|
||||||
@ -258,7 +265,7 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|||||||
// 2X1:
|
// 2X1:
|
||||||
spatial_pred_err_v_ = spatialErrV / norm;
|
spatial_pred_err_v_ = spatialErrV / norm;
|
||||||
|
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -40,16 +40,17 @@ enum { kLog2OfDownsamplingFactor = 3 };
|
|||||||
// >> fprintf('%d, ', probUW16)
|
// >> fprintf('%d, ', probUW16)
|
||||||
// Resolution reduced to avoid overflow when multiplying with the
|
// Resolution reduced to avoid overflow when multiplying with the
|
||||||
// (potentially) large number of pixels.
|
// (potentially) large number of pixels.
|
||||||
const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {102, 205, 410, 614,
|
const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {
|
||||||
819, 1024, 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
|
102, 205, 410, 614, 819, 1024,
|
||||||
|
1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
|
||||||
|
|
||||||
// To generate in Matlab:
|
// To generate in Matlab:
|
||||||
// >> numQuants = 14; maxOnlyLength = 5;
|
// >> numQuants = 14; maxOnlyLength = 5;
|
||||||
// >> weightUW16 = round(2^15 *
|
// >> weightUW16 = round(2^15 *
|
||||||
// [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
|
// [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
|
||||||
// >> fprintf('%d, %d,\n ', weightUW16);
|
// >> fprintf('%d, %d,\n ', weightUW16);
|
||||||
const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] =
|
const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] = {
|
||||||
{16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
|
16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
|
||||||
|
|
||||||
VPMDeflickering::VPMDeflickering() {
|
VPMDeflickering::VPMDeflickering() {
|
||||||
Reset();
|
Reset();
|
||||||
@ -70,8 +71,8 @@ void VPMDeflickering::Reset() {
|
|||||||
quant_hist_uw8_[0][kNumQuants - 1] = 255;
|
quant_hist_uw8_[0][kNumQuants - 1] = 255;
|
||||||
for (int32_t i = 0; i < kNumProbs; i++) {
|
for (int32_t i = 0; i < kNumProbs; i++) {
|
||||||
// Unsigned round. <Q0>
|
// Unsigned round. <Q0>
|
||||||
quant_hist_uw8_[0][i + 1] = static_cast<uint8_t>(
|
quant_hist_uw8_[0][i + 1] =
|
||||||
(prob_uw16_[i] * 255 + (1 << 10)) >> 11);
|
static_cast<uint8_t>((prob_uw16_[i] * 255 + (1 << 10)) >> 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 1; i < kFrameHistory_size; i++) {
|
for (int32_t i = 1; i < kFrameHistory_size; i++) {
|
||||||
@ -80,9 +81,8 @@ void VPMDeflickering::Reset() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t VPMDeflickering::ProcessFrame(
|
int32_t VPMDeflickering::ProcessFrame(VideoFrame* frame,
|
||||||
VideoFrame* frame,
|
VideoProcessing::FrameStats* stats) {
|
||||||
VideoProcessing::FrameStats* stats) {
|
|
||||||
assert(frame);
|
assert(frame);
|
||||||
uint32_t frame_memory;
|
uint32_t frame_memory;
|
||||||
uint8_t quant_uw8[kNumQuants];
|
uint8_t quant_uw8[kNumQuants];
|
||||||
@ -111,7 +111,8 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
return VPM_GENERAL_ERROR;
|
return VPM_GENERAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PreDetection(frame->timestamp(), *stats) == -1) return VPM_GENERAL_ERROR;
|
if (PreDetection(frame->timestamp(), *stats) == -1)
|
||||||
|
return VPM_GENERAL_ERROR;
|
||||||
|
|
||||||
// Flicker detection
|
// Flicker detection
|
||||||
int32_t det_flicker = DetectFlicker();
|
int32_t det_flicker = DetectFlicker();
|
||||||
@ -124,13 +125,13 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
// Size of luminance component.
|
// Size of luminance component.
|
||||||
const uint32_t y_size = height * width;
|
const uint32_t y_size = height * width;
|
||||||
|
|
||||||
const uint32_t y_sub_size = width * (((height - 1) >>
|
const uint32_t y_sub_size =
|
||||||
kLog2OfDownsamplingFactor) + 1);
|
width * (((height - 1) >> kLog2OfDownsamplingFactor) + 1);
|
||||||
uint8_t* y_sorted = new uint8_t[y_sub_size];
|
uint8_t* y_sorted = new uint8_t[y_sub_size];
|
||||||
uint32_t sort_row_idx = 0;
|
uint32_t sort_row_idx = 0;
|
||||||
for (int i = 0; i < height; i += kDownsamplingFactor) {
|
for (int i = 0; i < height; i += kDownsamplingFactor) {
|
||||||
memcpy(y_sorted + sort_row_idx * width,
|
memcpy(y_sorted + sort_row_idx * width, frame->buffer(kYPlane) + i * width,
|
||||||
frame->buffer(kYPlane) + i * width, width);
|
width);
|
||||||
sort_row_idx++;
|
sort_row_idx++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,12 +154,12 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
quant_uw8[i + 1] = y_sorted[prob_idx_uw32];
|
quant_uw8[i + 1] = y_sorted[prob_idx_uw32];
|
||||||
}
|
}
|
||||||
|
|
||||||
delete [] y_sorted;
|
delete[] y_sorted;
|
||||||
y_sorted = NULL;
|
y_sorted = NULL;
|
||||||
|
|
||||||
// Shift history for new frame.
|
// Shift history for new frame.
|
||||||
memmove(quant_hist_uw8_[1], quant_hist_uw8_[0],
|
memmove(quant_hist_uw8_[1], quant_hist_uw8_[0],
|
||||||
(kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
|
(kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
|
||||||
// Store current frame in history.
|
// Store current frame in history.
|
||||||
memcpy(quant_hist_uw8_[0], quant_uw8, kNumQuants * sizeof(uint8_t));
|
memcpy(quant_hist_uw8_[0], quant_uw8, kNumQuants * sizeof(uint8_t));
|
||||||
|
|
||||||
@ -190,9 +191,10 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
// target = w * maxquant_uw8 + (1 - w) * minquant_uw8
|
// target = w * maxquant_uw8 + (1 - w) * minquant_uw8
|
||||||
// Weights w = |weight_uw16_| are in Q15, hence the final output has to be
|
// Weights w = |weight_uw16_| are in Q15, hence the final output has to be
|
||||||
// right shifted by 8 to end up in Q7.
|
// right shifted by 8 to end up in Q7.
|
||||||
target_quant_uw16[i] = static_cast<uint16_t>((
|
target_quant_uw16[i] = static_cast<uint16_t>(
|
||||||
weight_uw16_[i] * maxquant_uw8[i] +
|
(weight_uw16_[i] * maxquant_uw8[i] +
|
||||||
((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >> 8); // <Q7>
|
((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >>
|
||||||
|
8); // <Q7>
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++) {
|
for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++) {
|
||||||
@ -203,13 +205,14 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
uint16_t mapUW16; // <Q7>
|
uint16_t mapUW16; // <Q7>
|
||||||
for (int32_t i = 1; i < kNumQuants; i++) {
|
for (int32_t i = 1; i < kNumQuants; i++) {
|
||||||
// As quant and targetQuant are limited to UWord8, it's safe to use Q7 here.
|
// As quant and targetQuant are limited to UWord8, it's safe to use Q7 here.
|
||||||
tmp_uw32 = static_cast<uint32_t>(target_quant_uw16[i] -
|
tmp_uw32 =
|
||||||
target_quant_uw16[i - 1]);
|
static_cast<uint32_t>(target_quant_uw16[i] - target_quant_uw16[i - 1]);
|
||||||
tmp_uw16 = static_cast<uint16_t>(quant_uw8[i] - quant_uw8[i - 1]); // <Q0>
|
tmp_uw16 = static_cast<uint16_t>(quant_uw8[i] - quant_uw8[i - 1]); // <Q0>
|
||||||
|
|
||||||
if (tmp_uw16 > 0) {
|
if (tmp_uw16 > 0) {
|
||||||
increment_uw16 = static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
|
increment_uw16 =
|
||||||
tmp_uw16)); // <Q7>
|
static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
|
||||||
|
tmp_uw16)); // <Q7>
|
||||||
} else {
|
} else {
|
||||||
// The value is irrelevant; the loop below will only iterate once.
|
// The value is irrelevant; the loop below will only iterate once.
|
||||||
increment_uw16 = 0;
|
increment_uw16 = 0;
|
||||||
@ -247,8 +250,9 @@ int32_t VPMDeflickering::ProcessFrame(
|
|||||||
zero.\n
|
zero.\n
|
||||||
-1: Error
|
-1: Error
|
||||||
*/
|
*/
|
||||||
int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
|
int32_t VPMDeflickering::PreDetection(
|
||||||
const VideoProcessing::FrameStats& stats) {
|
const uint32_t timestamp,
|
||||||
|
const VideoProcessing::FrameStats& stats) {
|
||||||
int32_t mean_val; // Mean value of frame (Q4)
|
int32_t mean_val; // Mean value of frame (Q4)
|
||||||
uint32_t frame_rate = 0;
|
uint32_t frame_rate = 0;
|
||||||
int32_t meanBufferLength; // Temp variable.
|
int32_t meanBufferLength; // Temp variable.
|
||||||
@ -257,16 +261,16 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
|
|||||||
// Update mean value buffer.
|
// Update mean value buffer.
|
||||||
// This should be done even though we might end up in an unreliable detection.
|
// This should be done even though we might end up in an unreliable detection.
|
||||||
memmove(mean_buffer_ + 1, mean_buffer_,
|
memmove(mean_buffer_ + 1, mean_buffer_,
|
||||||
(kMeanBufferLength - 1) * sizeof(int32_t));
|
(kMeanBufferLength - 1) * sizeof(int32_t));
|
||||||
mean_buffer_[0] = mean_val;
|
mean_buffer_[0] = mean_val;
|
||||||
|
|
||||||
// Update timestamp buffer.
|
// Update timestamp buffer.
|
||||||
// This should be done even though we might end up in an unreliable detection.
|
// This should be done even though we might end up in an unreliable detection.
|
||||||
memmove(timestamp_buffer_ + 1, timestamp_buffer_, (kMeanBufferLength - 1) *
|
memmove(timestamp_buffer_ + 1, timestamp_buffer_,
|
||||||
sizeof(uint32_t));
|
(kMeanBufferLength - 1) * sizeof(uint32_t));
|
||||||
timestamp_buffer_[0] = timestamp;
|
timestamp_buffer_[0] = timestamp;
|
||||||
|
|
||||||
/* Compute current frame rate (Q4) */
|
/* Compute current frame rate (Q4) */
|
||||||
if (timestamp_buffer_[kMeanBufferLength - 1] != 0) {
|
if (timestamp_buffer_[kMeanBufferLength - 1] != 0) {
|
||||||
frame_rate = ((90000 << 4) * (kMeanBufferLength - 1));
|
frame_rate = ((90000 << 4) * (kMeanBufferLength - 1));
|
||||||
frame_rate /=
|
frame_rate /=
|
||||||
@ -315,22 +319,22 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
|
|||||||
-1: Error
|
-1: Error
|
||||||
*/
|
*/
|
||||||
int32_t VPMDeflickering::DetectFlicker() {
|
int32_t VPMDeflickering::DetectFlicker() {
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
int32_t freqEst; // (Q4) Frequency estimate to base detection upon
|
int32_t freqEst; // (Q4) Frequency estimate to base detection upon
|
||||||
int32_t ret_val = -1;
|
int32_t ret_val = -1;
|
||||||
|
|
||||||
/* Sanity check for mean_buffer_length_ */
|
/* Sanity check for mean_buffer_length_ */
|
||||||
if (mean_buffer_length_ < 2) {
|
if (mean_buffer_length_ < 2) {
|
||||||
/* Not possible to estimate frequency */
|
/* Not possible to estimate frequency */
|
||||||
return(2);
|
return 2;
|
||||||
}
|
}
|
||||||
// Count zero crossings with a dead zone to be robust against noise. If the
|
// Count zero crossings with a dead zone to be robust against noise. If the
|
||||||
// noise std is 2 pixel this corresponds to about 95% confidence interval.
|
// noise std is 2 pixel this corresponds to about 95% confidence interval.
|
||||||
int32_t deadzone = (kZeroCrossingDeadzone << kmean_valueScaling); // Q4
|
int32_t deadzone = (kZeroCrossingDeadzone << kmean_valueScaling); // Q4
|
||||||
int32_t meanOfBuffer = 0; // Mean value of mean value buffer.
|
int32_t meanOfBuffer = 0; // Mean value of mean value buffer.
|
||||||
int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
|
int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
|
||||||
int32_t cntState = 0; // State variable for zero crossing regions.
|
int32_t cntState = 0; // State variable for zero crossing regions.
|
||||||
int32_t cntStateOld = 0; // Previous state for zero crossing regions.
|
int32_t cntStateOld = 0; // Previous state for zero crossing regions.
|
||||||
|
|
||||||
for (i = 0; i < mean_buffer_length_; i++) {
|
for (i = 0; i < mean_buffer_length_; i++) {
|
||||||
meanOfBuffer += mean_buffer_[i];
|
meanOfBuffer += mean_buffer_[i];
|
||||||
|
@ -38,13 +38,13 @@ class VPMDeflickering {
|
|||||||
enum { kNumQuants = kNumProbs + 2 };
|
enum { kNumQuants = kNumProbs + 2 };
|
||||||
enum { kMaxOnlyLength = 5 };
|
enum { kMaxOnlyLength = 5 };
|
||||||
|
|
||||||
uint32_t mean_buffer_length_;
|
uint32_t mean_buffer_length_;
|
||||||
uint8_t detection_state_; // 0: No flickering
|
uint8_t detection_state_; // 0: No flickering
|
||||||
// 1: Flickering detected
|
// 1: Flickering detected
|
||||||
// 2: In flickering
|
// 2: In flickering
|
||||||
int32_t mean_buffer_[kMeanBufferLength];
|
int32_t mean_buffer_[kMeanBufferLength];
|
||||||
uint32_t timestamp_buffer_[kMeanBufferLength];
|
uint32_t timestamp_buffer_[kMeanBufferLength];
|
||||||
uint32_t frame_rate_;
|
uint32_t frame_rate_;
|
||||||
static const uint16_t prob_uw16_[kNumProbs];
|
static const uint16_t prob_uw16_[kNumProbs];
|
||||||
static const uint16_t weight_uw16_[kNumQuants - kMaxOnlyLength];
|
static const uint16_t weight_uw16_[kNumQuants - kMaxOnlyLength];
|
||||||
uint8_t quant_hist_uw8_[kFrameHistory_size][kNumQuants];
|
uint8_t quant_hist_uw8_[kFrameHistory_size][kNumQuants];
|
||||||
|
@ -31,7 +31,7 @@ VPMFramePreprocessor::~VPMFramePreprocessor() {
|
|||||||
delete spatial_resampler_;
|
delete spatial_resampler_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VPMFramePreprocessor::Reset() {
|
void VPMFramePreprocessor::Reset() {
|
||||||
ca_->Release();
|
ca_->Release();
|
||||||
vd_->Reset();
|
vd_->Reset();
|
||||||
content_metrics_ = nullptr;
|
content_metrics_ = nullptr;
|
||||||
@ -40,7 +40,7 @@ void VPMFramePreprocessor::Reset() {
|
|||||||
frame_cnt_ = 0;
|
frame_cnt_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
|
void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
|
||||||
vd_->EnableTemporalDecimation(enable);
|
vd_->EnableTemporalDecimation(enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,20 +48,22 @@ void VPMFramePreprocessor::EnableContentAnalysis(bool enable) {
|
|||||||
enable_ca_ = enable;
|
enable_ca_ = enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VPMFramePreprocessor::SetInputFrameResampleMode(
|
void VPMFramePreprocessor::SetInputFrameResampleMode(
|
||||||
VideoFrameResampling resampling_mode) {
|
VideoFrameResampling resampling_mode) {
|
||||||
spatial_resampler_->SetInputFrameResampleMode(resampling_mode);
|
spatial_resampler_->SetInputFrameResampleMode(resampling_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t VPMFramePreprocessor::SetTargetResolution(
|
int32_t VPMFramePreprocessor::SetTargetResolution(uint32_t width,
|
||||||
uint32_t width, uint32_t height, uint32_t frame_rate) {
|
uint32_t height,
|
||||||
|
uint32_t frame_rate) {
|
||||||
if ((width == 0) || (height == 0) || (frame_rate == 0)) {
|
if ((width == 0) || (height == 0) || (frame_rate == 0)) {
|
||||||
return VPM_PARAMETER_ERROR;
|
return VPM_PARAMETER_ERROR;
|
||||||
}
|
}
|
||||||
int32_t ret_val = 0;
|
int32_t ret_val = 0;
|
||||||
ret_val = spatial_resampler_->SetTargetFrameSize(width, height);
|
ret_val = spatial_resampler_->SetTargetFrameSize(width, height);
|
||||||
|
|
||||||
if (ret_val < 0) return ret_val;
|
if (ret_val < 0)
|
||||||
|
return ret_val;
|
||||||
|
|
||||||
vd_->SetTargetFramerate(frame_rate);
|
vd_->SetTargetFramerate(frame_rate);
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
@ -84,12 +86,10 @@ uint32_t VPMFramePreprocessor::GetDecimatedFrameRate() {
|
|||||||
return vd_->GetDecimatedFrameRate();
|
return vd_->GetDecimatedFrameRate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint32_t VPMFramePreprocessor::GetDecimatedWidth() const {
|
uint32_t VPMFramePreprocessor::GetDecimatedWidth() const {
|
||||||
return spatial_resampler_->TargetWidth();
|
return spatial_resampler_->TargetWidth();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint32_t VPMFramePreprocessor::GetDecimatedHeight() const {
|
uint32_t VPMFramePreprocessor::GetDecimatedHeight() const {
|
||||||
return spatial_resampler_->TargetHeight();
|
return spatial_resampler_->TargetHeight();
|
||||||
}
|
}
|
||||||
@ -116,7 +116,7 @@ const VideoFrame* VPMFramePreprocessor::PreprocessFrame(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (spatial_resampler_->ApplyResample(current_frame->width(),
|
if (spatial_resampler_->ApplyResample(current_frame->width(),
|
||||||
current_frame->height())) {
|
current_frame->height())) {
|
||||||
if (spatial_resampler_->ResampleFrame(*current_frame, &resampled_frame_) !=
|
if (spatial_resampler_->ResampleFrame(*current_frame, &resampled_frame_) !=
|
||||||
VPM_OK) {
|
VPM_OK) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -41,7 +41,8 @@ class VPMFramePreprocessor {
|
|||||||
void EnableContentAnalysis(bool enable);
|
void EnableContentAnalysis(bool enable);
|
||||||
|
|
||||||
// Set target resolution: frame rate and dimension.
|
// Set target resolution: frame rate and dimension.
|
||||||
int32_t SetTargetResolution(uint32_t width, uint32_t height,
|
int32_t SetTargetResolution(uint32_t width,
|
||||||
|
uint32_t height,
|
||||||
uint32_t frame_rate);
|
uint32_t frame_rate);
|
||||||
|
|
||||||
// Set target frame rate.
|
// Set target frame rate.
|
||||||
|
@ -8,13 +8,6 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* video_processing.h
|
|
||||||
* This header file contains the API required for the video
|
|
||||||
* processing module class.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
|
#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
|
||||||
#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
|
#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
|
||||||
|
|
||||||
@ -43,11 +36,7 @@ class VideoProcessing {
|
|||||||
uint32_t sub_sampling_factor; // Sub-sampling factor, in powers of 2.
|
uint32_t sub_sampling_factor; // Sub-sampling factor, in powers of 2.
|
||||||
};
|
};
|
||||||
|
|
||||||
enum BrightnessWarning {
|
enum BrightnessWarning { kNoWarning, kDarkWarning, kBrightWarning };
|
||||||
kNoWarning,
|
|
||||||
kDarkWarning,
|
|
||||||
kBrightWarning
|
|
||||||
};
|
|
||||||
|
|
||||||
static VideoProcessing* Create();
|
static VideoProcessing* Create();
|
||||||
virtual ~VideoProcessing() {}
|
virtual ~VideoProcessing() {}
|
||||||
|
@ -21,19 +21,19 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
// Error codes
|
// Error codes
|
||||||
#define VPM_OK 0
|
#define VPM_OK 0
|
||||||
#define VPM_GENERAL_ERROR -1
|
#define VPM_GENERAL_ERROR -1
|
||||||
#define VPM_MEMORY -2
|
#define VPM_MEMORY -2
|
||||||
#define VPM_PARAMETER_ERROR -3
|
#define VPM_PARAMETER_ERROR -3
|
||||||
#define VPM_SCALE_ERROR -4
|
#define VPM_SCALE_ERROR -4
|
||||||
#define VPM_UNINITIALIZED -5
|
#define VPM_UNINITIALIZED -5
|
||||||
#define VPM_UNIMPLEMENTED -6
|
#define VPM_UNIMPLEMENTED -6
|
||||||
|
|
||||||
enum VideoFrameResampling {
|
enum VideoFrameResampling {
|
||||||
kNoRescaling, // Disables rescaling.
|
kNoRescaling, // Disables rescaling.
|
||||||
kFastRescaling, // Point filter.
|
kFastRescaling, // Point filter.
|
||||||
kBiLinear, // Bi-linear interpolation.
|
kBiLinear, // Bi-linear interpolation.
|
||||||
kBox, // Box inteprolation.
|
kBox, // Box inteprolation.
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
|
|
||||||
#include "webrtc/modules/video_processing/spatial_resampler.h"
|
#include "webrtc/modules/video_processing/spatial_resampler.h"
|
||||||
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
VPMSimpleSpatialResampler::VPMSimpleSpatialResampler()
|
VPMSimpleSpatialResampler::VPMSimpleSpatialResampler()
|
||||||
@ -21,12 +20,13 @@ VPMSimpleSpatialResampler::VPMSimpleSpatialResampler()
|
|||||||
|
|
||||||
VPMSimpleSpatialResampler::~VPMSimpleSpatialResampler() {}
|
VPMSimpleSpatialResampler::~VPMSimpleSpatialResampler() {}
|
||||||
|
|
||||||
|
|
||||||
int32_t VPMSimpleSpatialResampler::SetTargetFrameSize(int32_t width,
|
int32_t VPMSimpleSpatialResampler::SetTargetFrameSize(int32_t width,
|
||||||
int32_t height) {
|
int32_t height) {
|
||||||
if (resampling_mode_ == kNoRescaling) return VPM_OK;
|
if (resampling_mode_ == kNoRescaling)
|
||||||
|
return VPM_OK;
|
||||||
|
|
||||||
if (width < 1 || height < 1) return VPM_PARAMETER_ERROR;
|
if (width < 1 || height < 1)
|
||||||
|
return VPM_PARAMETER_ERROR;
|
||||||
|
|
||||||
target_width_ = width;
|
target_width_ = width;
|
||||||
target_height_ = height;
|
target_height_ = height;
|
||||||
@ -48,11 +48,11 @@ void VPMSimpleSpatialResampler::Reset() {
|
|||||||
int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
|
int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
|
||||||
VideoFrame* outFrame) {
|
VideoFrame* outFrame) {
|
||||||
// Don't copy if frame remains as is.
|
// Don't copy if frame remains as is.
|
||||||
if (resampling_mode_ == kNoRescaling)
|
if (resampling_mode_ == kNoRescaling) {
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
// Check if re-sampling is needed
|
// Check if re-sampling is needed
|
||||||
else if ((inFrame.width() == target_width_) &&
|
} else if ((inFrame.width() == target_width_) &&
|
||||||
(inFrame.height() == target_height_)) {
|
(inFrame.height() == target_height_)) {
|
||||||
return VPM_OK;
|
return VPM_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,8 +60,8 @@ int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
|
|||||||
// TODO(mikhal/marpan): Should we allow for setting the filter mode in
|
// TODO(mikhal/marpan): Should we allow for setting the filter mode in
|
||||||
// _scale.Set() with |resampling_mode_|?
|
// _scale.Set() with |resampling_mode_|?
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
ret_val = scaler_.Set(inFrame.width(), inFrame.height(),
|
ret_val = scaler_.Set(inFrame.width(), inFrame.height(), target_width_,
|
||||||
target_width_, target_height_, kI420, kI420, kScaleBox);
|
target_height_, kI420, kI420, kScaleBox);
|
||||||
if (ret_val < 0)
|
if (ret_val < 0)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
||||||
@ -86,10 +86,9 @@ int32_t VPMSimpleSpatialResampler::TargetWidth() {
|
|||||||
return target_width_;
|
return target_width_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VPMSimpleSpatialResampler::ApplyResample(int32_t width,
|
bool VPMSimpleSpatialResampler::ApplyResample(int32_t width, int32_t height) {
|
||||||
int32_t height) {
|
|
||||||
if ((width == target_width_ && height == target_height_) ||
|
if ((width == target_width_ && height == target_height_) ||
|
||||||
resampling_mode_ == kNoRescaling)
|
resampling_mode_ == kNoRescaling)
|
||||||
return false;
|
return false;
|
||||||
else
|
else
|
||||||
return true;
|
return true;
|
||||||
|
@ -23,10 +23,10 @@ namespace webrtc {
|
|||||||
|
|
||||||
class VPMSpatialResampler {
|
class VPMSpatialResampler {
|
||||||
public:
|
public:
|
||||||
virtual ~VPMSpatialResampler() {};
|
virtual ~VPMSpatialResampler() {}
|
||||||
virtual int32_t SetTargetFrameSize(int32_t width, int32_t height) = 0;
|
virtual int32_t SetTargetFrameSize(int32_t width, int32_t height) = 0;
|
||||||
virtual void SetInputFrameResampleMode(VideoFrameResampling
|
virtual void SetInputFrameResampleMode(
|
||||||
resampling_mode) = 0;
|
VideoFrameResampling resampling_mode) = 0;
|
||||||
virtual void Reset() = 0;
|
virtual void Reset() = 0;
|
||||||
virtual int32_t ResampleFrame(const VideoFrame& inFrame,
|
virtual int32_t ResampleFrame(const VideoFrame& inFrame,
|
||||||
VideoFrame* outFrame) = 0;
|
VideoFrame* outFrame) = 0;
|
||||||
@ -49,11 +49,10 @@ class VPMSimpleSpatialResampler : public VPMSpatialResampler {
|
|||||||
virtual bool ApplyResample(int32_t width, int32_t height);
|
virtual bool ApplyResample(int32_t width, int32_t height);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
VideoFrameResampling resampling_mode_;
|
||||||
VideoFrameResampling resampling_mode_;
|
int32_t target_width_;
|
||||||
int32_t target_width_;
|
int32_t target_height_;
|
||||||
int32_t target_height_;
|
Scaler scaler_;
|
||||||
Scaler scaler_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -16,100 +16,101 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(BrightnessDetection)) {
|
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(BrightnessDetection)) {
|
||||||
uint32_t frameNum = 0;
|
uint32_t frameNum = 0;
|
||||||
int32_t brightnessWarning = 0;
|
int32_t brightnessWarning = 0;
|
||||||
uint32_t warningCount = 0;
|
uint32_t warningCount = 0;
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
||||||
frame_length_) {
|
frame_length_) {
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
height_, 0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
frameNum++;
|
frameNum++;
|
||||||
VideoProcessing::FrameStats stats;
|
VideoProcessing::FrameStats stats;
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
EXPECT_GT(stats.num_pixels, 0u);
|
EXPECT_GT(stats.num_pixels, 0u);
|
||||||
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_,
|
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
|
||||||
stats), 0);
|
0);
|
||||||
if (brightnessWarning != VideoProcessing::kNoWarning) {
|
if (brightnessWarning != VideoProcessing::kNoWarning) {
|
||||||
warningCount++;
|
warningCount++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
}
|
||||||
|
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
||||||
|
|
||||||
// Expect few warnings
|
// Expect few warnings
|
||||||
float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
||||||
printf("\nWarning proportions:\n");
|
printf("\nWarning proportions:\n");
|
||||||
printf("Stock foreman: %.1f %%\n", warningProportion);
|
printf("Stock foreman: %.1f %%\n", warningProportion);
|
||||||
EXPECT_LT(warningProportion, 10);
|
EXPECT_LT(warningProportion, 10);
|
||||||
|
|
||||||
rewind(source_file_);
|
rewind(source_file_);
|
||||||
frameNum = 0;
|
frameNum = 0;
|
||||||
warningCount = 0;
|
warningCount = 0;
|
||||||
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
||||||
frame_length_ &&
|
frame_length_ &&
|
||||||
frameNum < 300) {
|
frameNum < 300) {
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
height_, 0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
frameNum++;
|
frameNum++;
|
||||||
|
|
||||||
uint8_t* frame = video_frame_.buffer(kYPlane);
|
uint8_t* frame = video_frame_.buffer(kYPlane);
|
||||||
uint32_t yTmp = 0;
|
uint32_t yTmp = 0;
|
||||||
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
|
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
|
||||||
yTmp = frame[yIdx] << 1;
|
yTmp = frame[yIdx] << 1;
|
||||||
if (yTmp > 255) {
|
if (yTmp > 255) {
|
||||||
yTmp = 255;
|
yTmp = 255;
|
||||||
}
|
}
|
||||||
frame[yIdx] = static_cast<uint8_t>(yTmp);
|
frame[yIdx] = static_cast<uint8_t>(yTmp);
|
||||||
}
|
|
||||||
|
|
||||||
VideoProcessing::FrameStats stats;
|
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
|
||||||
EXPECT_GT(stats.num_pixels, 0u);
|
|
||||||
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_,
|
|
||||||
stats), 0);
|
|
||||||
EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
|
|
||||||
if (brightnessWarning == VideoProcessing::kBrightWarning) {
|
|
||||||
warningCount++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
|
||||||
|
|
||||||
// Expect many brightness warnings
|
VideoProcessing::FrameStats stats;
|
||||||
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
printf("Bright foreman: %.1f %%\n", warningProportion);
|
EXPECT_GT(stats.num_pixels, 0u);
|
||||||
EXPECT_GT(warningProportion, 95);
|
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
|
||||||
|
0);
|
||||||
rewind(source_file_);
|
EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
|
||||||
frameNum = 0;
|
if (brightnessWarning == VideoProcessing::kBrightWarning) {
|
||||||
warningCount = 0;
|
warningCount++;
|
||||||
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
|
||||||
frame_length_ && frameNum < 300) {
|
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
|
|
||||||
height_, 0, kVideoRotation_0, &video_frame_));
|
|
||||||
frameNum++;
|
|
||||||
|
|
||||||
uint8_t* y_plane = video_frame_.buffer(kYPlane);
|
|
||||||
int32_t yTmp = 0;
|
|
||||||
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
|
|
||||||
yTmp = y_plane[yIdx] >> 1;
|
|
||||||
y_plane[yIdx] = static_cast<uint8_t>(yTmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
VideoProcessing::FrameStats stats;
|
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
|
||||||
EXPECT_GT(stats.num_pixels, 0u);
|
|
||||||
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_,
|
|
||||||
stats), 0);
|
|
||||||
EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
|
|
||||||
if (brightnessWarning == VideoProcessing::kDarkWarning) {
|
|
||||||
warningCount++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
}
|
||||||
|
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
||||||
|
|
||||||
// Expect many darkness warnings
|
// Expect many brightness warnings
|
||||||
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
||||||
printf("Dark foreman: %.1f %%\n\n", warningProportion);
|
printf("Bright foreman: %.1f %%\n", warningProportion);
|
||||||
EXPECT_GT(warningProportion, 90);
|
EXPECT_GT(warningProportion, 95);
|
||||||
|
|
||||||
|
rewind(source_file_);
|
||||||
|
frameNum = 0;
|
||||||
|
warningCount = 0;
|
||||||
|
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
||||||
|
frame_length_ &&
|
||||||
|
frameNum < 300) {
|
||||||
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
|
0, kVideoRotation_0, &video_frame_));
|
||||||
|
frameNum++;
|
||||||
|
|
||||||
|
uint8_t* y_plane = video_frame_.buffer(kYPlane);
|
||||||
|
int32_t yTmp = 0;
|
||||||
|
for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
|
||||||
|
yTmp = y_plane[yIdx] >> 1;
|
||||||
|
y_plane[yIdx] = static_cast<uint8_t>(yTmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
VideoProcessing::FrameStats stats;
|
||||||
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
|
EXPECT_GT(stats.num_pixels, 0u);
|
||||||
|
ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
|
||||||
|
0);
|
||||||
|
EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
|
||||||
|
if (brightnessWarning == VideoProcessing::kDarkWarning) {
|
||||||
|
warningCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
||||||
|
|
||||||
|
// Expect many darkness warnings
|
||||||
|
warningProportion = static_cast<float>(warningCount) / frameNum * 100;
|
||||||
|
printf("Dark foreman: %.1f %%\n\n", warningProportion);
|
||||||
|
EXPECT_GT(warningProportion, 90);
|
||||||
}
|
}
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(ContentAnalysis)) {
|
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(ContentAnalysis)) {
|
||||||
VPMContentAnalysis ca__c(false);
|
VPMContentAnalysis ca__c(false);
|
||||||
VPMContentAnalysis ca__sse(true);
|
VPMContentAnalysis ca__sse(true);
|
||||||
VideoContentMetrics* _cM_c;
|
VideoContentMetrics* _cM_c;
|
||||||
VideoContentMetrics* _cM_SSE;
|
VideoContentMetrics* _cM_SSE;
|
||||||
|
|
||||||
@ -26,12 +26,12 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(ContentAnalysis)) {
|
|||||||
ca__sse.Initialize(width_, height_);
|
ca__sse.Initialize(width_, height_);
|
||||||
|
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
while (fread(video_buffer.get(), 1, frame_length_, source_file_)
|
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
||||||
== frame_length_) {
|
frame_length_) {
|
||||||
// Using ConvertToI420 to add stride to the image.
|
// Using ConvertToI420 to add stride to the image.
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
_cM_c = ca__c.ComputeContentMetrics(video_frame_);
|
_cM_c = ca__c.ComputeContentMetrics(video_frame_);
|
||||||
_cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
|
_cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
|
||||||
|
|
||||||
ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
|
ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
|
||||||
|
@ -21,76 +21,75 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(Deflickering)) {
|
TEST_F(VideoProcessingTest, DISABLED_ON_IOS(Deflickering)) {
|
||||||
enum { NumRuns = 30 };
|
enum { NumRuns = 30 };
|
||||||
uint32_t frameNum = 0;
|
uint32_t frameNum = 0;
|
||||||
const uint32_t frame_rate = 15;
|
const uint32_t frame_rate = 15;
|
||||||
|
|
||||||
int64_t min_runtime = 0;
|
int64_t min_runtime = 0;
|
||||||
int64_t avg_runtime = 0;
|
int64_t avg_runtime = 0;
|
||||||
|
|
||||||
// Close automatically opened Foreman.
|
// Close automatically opened Foreman.
|
||||||
fclose(source_file_);
|
fclose(source_file_);
|
||||||
const std::string input_file =
|
const std::string input_file =
|
||||||
webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
|
webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
|
||||||
source_file_ = fopen(input_file.c_str(), "rb");
|
source_file_ = fopen(input_file.c_str(), "rb");
|
||||||
ASSERT_TRUE(source_file_ != NULL) <<
|
ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file: " << input_file
|
||||||
"Cannot read input file: " << input_file << "\n";
|
<< "\n";
|
||||||
|
|
||||||
const std::string output_file =
|
const std::string output_file =
|
||||||
webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
|
webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
|
||||||
FILE* deflickerFile = fopen(output_file.c_str(), "wb");
|
FILE* deflickerFile = fopen(output_file.c_str(), "wb");
|
||||||
ASSERT_TRUE(deflickerFile != NULL) <<
|
ASSERT_TRUE(deflickerFile != NULL)
|
||||||
"Could not open output file: " << output_file << "\n";
|
<< "Could not open output file: " << output_file << "\n";
|
||||||
|
|
||||||
printf("\nRun time [us / frame]:\n");
|
printf("\nRun time [us / frame]:\n");
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
|
for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
|
||||||
TickTime t0;
|
TickTime t0;
|
||||||
TickTime t1;
|
TickTime t1;
|
||||||
TickInterval acc_ticks;
|
TickInterval acc_ticks;
|
||||||
uint32_t timeStamp = 1;
|
uint32_t timeStamp = 1;
|
||||||
|
|
||||||
frameNum = 0;
|
frameNum = 0;
|
||||||
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
|
||||||
frame_length_) {
|
frame_length_) {
|
||||||
frameNum++;
|
frameNum++;
|
||||||
EXPECT_EQ(
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
|
||||||
0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
|
|
||||||
height_, 0, kVideoRotation_0, &video_frame_));
|
height_, 0, kVideoRotation_0, &video_frame_));
|
||||||
video_frame_.set_timestamp(timeStamp);
|
video_frame_.set_timestamp(timeStamp);
|
||||||
|
|
||||||
t0 = TickTime::Now();
|
t0 = TickTime::Now();
|
||||||
VideoProcessing::FrameStats stats;
|
VideoProcessing::FrameStats stats;
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
EXPECT_GT(stats.num_pixels, 0u);
|
EXPECT_GT(stats.num_pixels, 0u);
|
||||||
ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
|
ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
|
||||||
t1 = TickTime::Now();
|
t1 = TickTime::Now();
|
||||||
acc_ticks += (t1 - t0);
|
acc_ticks += (t1 - t0);
|
||||||
|
|
||||||
if (run_idx == 0) {
|
if (run_idx == 0) {
|
||||||
if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
|
if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
}
|
|
||||||
timeStamp += (90000 / frame_rate);
|
|
||||||
}
|
}
|
||||||
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
}
|
||||||
|
timeStamp += (90000 / frame_rate);
|
||||||
printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
|
|
||||||
if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
|
|
||||||
min_runtime = acc_ticks.Microseconds();
|
|
||||||
}
|
|
||||||
avg_runtime += acc_ticks.Microseconds();
|
|
||||||
|
|
||||||
rewind(source_file_);
|
|
||||||
}
|
}
|
||||||
ASSERT_EQ(0, fclose(deflickerFile));
|
ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
|
||||||
// TODO(kjellander): Add verification of deflicker output file.
|
|
||||||
|
|
||||||
printf("\nAverage run time = %d us / frame\n",
|
printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
|
||||||
static_cast<int>(avg_runtime / frameNum / NumRuns));
|
if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
|
||||||
printf("Min run time = %d us / frame\n\n",
|
min_runtime = acc_ticks.Microseconds();
|
||||||
static_cast<int>(min_runtime / frameNum));
|
}
|
||||||
|
avg_runtime += acc_ticks.Microseconds();
|
||||||
|
|
||||||
|
rewind(source_file_);
|
||||||
|
}
|
||||||
|
ASSERT_EQ(0, fclose(deflickerFile));
|
||||||
|
// TODO(kjellander): Add verification of deflicker output file.
|
||||||
|
|
||||||
|
printf("\nAverage run time = %d us / frame\n",
|
||||||
|
static_cast<int>(avg_runtime / frameNum / NumRuns));
|
||||||
|
printf("Min run time = %d us / frame\n\n",
|
||||||
|
static_cast<int>(min_runtime / frameNum));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -71,20 +71,20 @@ void VideoProcessingTest::SetUp() {
|
|||||||
ASSERT_TRUE(vp_ != NULL);
|
ASSERT_TRUE(vp_ != NULL);
|
||||||
|
|
||||||
ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
|
ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
|
||||||
half_width_, half_width_));
|
half_width_, half_width_));
|
||||||
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
|
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
|
||||||
memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
|
memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
|
||||||
memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
|
memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
|
||||||
memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
|
memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
|
||||||
const std::string video_file =
|
const std::string video_file =
|
||||||
webrtc::test::ResourcePath("foreman_cif", "yuv");
|
webrtc::test::ResourcePath("foreman_cif", "yuv");
|
||||||
source_file_ = fopen(video_file.c_str(), "rb");
|
source_file_ = fopen(video_file.c_str(), "rb");
|
||||||
ASSERT_TRUE(source_file_ != NULL) <<
|
ASSERT_TRUE(source_file_ != NULL)
|
||||||
"Cannot read source file: " + video_file + "\n";
|
<< "Cannot read source file: " + video_file + "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
void VideoProcessingTest::TearDown() {
|
void VideoProcessingTest::TearDown() {
|
||||||
if (source_file_ != NULL) {
|
if (source_file_ != NULL) {
|
||||||
ASSERT_EQ(0, fclose(source_file_));
|
ASSERT_EQ(0, fclose(source_file_));
|
||||||
}
|
}
|
||||||
source_file_ = NULL;
|
source_file_ = NULL;
|
||||||
@ -110,8 +110,8 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(HandleBadStats)) {
|
|||||||
VideoProcessing::FrameStats stats;
|
VideoProcessing::FrameStats stats;
|
||||||
vp_->ClearFrameStats(&stats);
|
vp_->ClearFrameStats(&stats);
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
|
ASSERT_EQ(frame_length_,
|
||||||
source_file_));
|
fread(video_buffer.get(), 1, frame_length_, source_file_));
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(IdenticalResultsAfterReset)) {
|
|||||||
VideoProcessing::FrameStats stats;
|
VideoProcessing::FrameStats stats;
|
||||||
// Only testing non-static functions here.
|
// Only testing non-static functions here.
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
|
ASSERT_EQ(frame_length_,
|
||||||
source_file_));
|
fread(video_buffer.get(), 1, frame_length_, source_file_));
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
@ -140,8 +140,8 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(IdenticalResultsAfterReset)) {
|
|||||||
ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
|
ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
|
||||||
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
|
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
|
||||||
|
|
||||||
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
|
ASSERT_EQ(frame_length_,
|
||||||
source_file_));
|
fread(video_buffer.get(), 1, frame_length_, source_file_));
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
vp_->GetFrameStats(video_frame_, &stats);
|
vp_->GetFrameStats(video_frame_, &stats);
|
||||||
@ -157,8 +157,8 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(FrameStats)) {
|
|||||||
VideoProcessing::FrameStats stats;
|
VideoProcessing::FrameStats stats;
|
||||||
vp_->ClearFrameStats(&stats);
|
vp_->ClearFrameStats(&stats);
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
|
ASSERT_EQ(frame_length_,
|
||||||
source_file_));
|
fread(video_buffer.get(), 1, frame_length_, source_file_));
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
|
|
||||||
@ -204,8 +204,7 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(Resampler)) {
|
|||||||
int64_t total_runtime = 0;
|
int64_t total_runtime = 0;
|
||||||
|
|
||||||
rewind(source_file_);
|
rewind(source_file_);
|
||||||
ASSERT_TRUE(source_file_ != NULL) <<
|
ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file \n";
|
||||||
"Cannot read input file \n";
|
|
||||||
|
|
||||||
// CA not needed here
|
// CA not needed here
|
||||||
vp_->EnableContentAnalysis(false);
|
vp_->EnableContentAnalysis(false);
|
||||||
@ -214,8 +213,8 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(Resampler)) {
|
|||||||
|
|
||||||
// Reading test frame
|
// Reading test frame
|
||||||
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
|
||||||
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
|
ASSERT_EQ(frame_length_,
|
||||||
source_file_));
|
fread(video_buffer.get(), 1, frame_length_, source_file_));
|
||||||
// Using ConvertToI420 to add stride to the image.
|
// Using ConvertToI420 to add stride to the image.
|
||||||
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
|
||||||
0, kVideoRotation_0, &video_frame_));
|
0, kVideoRotation_0, &video_frame_));
|
||||||
@ -281,8 +280,7 @@ TEST_F(VideoProcessingTest, DISABLED_ON_IOS(Resampler)) {
|
|||||||
|
|
||||||
printf("\nAverage run time = %d us / frame\n",
|
printf("\nAverage run time = %d us / frame\n",
|
||||||
static_cast<int>(total_runtime));
|
static_cast<int>(total_runtime));
|
||||||
printf("Min run time = %d us / frame\n\n",
|
printf("Min run time = %d us / frame\n\n", static_cast<int>(min_runtime));
|
||||||
static_cast<int>(min_runtime));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PreprocessFrameAndVerify(const VideoFrame& source,
|
void PreprocessFrameAndVerify(const VideoFrame& source,
|
||||||
@ -349,15 +347,16 @@ void TestSize(const VideoFrame& source_frame,
|
|||||||
// Compute PSNR against the cropped source frame and check expectation.
|
// Compute PSNR against the cropped source frame and check expectation.
|
||||||
double psnr = I420PSNR(&cropped_source_frame, out_frame);
|
double psnr = I420PSNR(&cropped_source_frame, out_frame);
|
||||||
EXPECT_GT(psnr, expected_psnr);
|
EXPECT_GT(psnr, expected_psnr);
|
||||||
printf("PSNR: %f. PSNR is between source of size %d %d, and a modified "
|
printf(
|
||||||
"source which is scaled down/up to: %d %d, and back to source size \n",
|
"PSNR: %f. PSNR is between source of size %d %d, and a modified "
|
||||||
psnr, source_frame.width(), source_frame.height(),
|
"source which is scaled down/up to: %d %d, and back to source size \n",
|
||||||
target_width, target_height);
|
psnr, source_frame.width(), source_frame.height(), target_width,
|
||||||
|
target_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CompareFrames(const webrtc::VideoFrame& frame1,
|
bool CompareFrames(const webrtc::VideoFrame& frame1,
|
||||||
const webrtc::VideoFrame& frame2) {
|
const webrtc::VideoFrame& frame2) {
|
||||||
for (int plane = 0; plane < webrtc::kNumOfPlanes; plane ++) {
|
for (int plane = 0; plane < webrtc::kNumOfPlanes; plane++) {
|
||||||
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
|
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
|
||||||
int allocated_size1 = frame1.allocated_size(plane_type);
|
int allocated_size1 = frame1.allocated_size(plane_type);
|
||||||
int allocated_size2 = frame2.allocated_size(plane_type);
|
int allocated_size2 = frame2.allocated_size(plane_type);
|
||||||
|
@ -30,9 +30,7 @@ class VideoProcessingTest : public ::testing::Test {
|
|||||||
std::string trace_file = webrtc::test::OutputPath() + "VPMTrace.txt";
|
std::string trace_file = webrtc::test::OutputPath() + "VPMTrace.txt";
|
||||||
ASSERT_EQ(0, Trace::SetTraceFile(trace_file.c_str()));
|
ASSERT_EQ(0, Trace::SetTraceFile(trace_file.c_str()));
|
||||||
}
|
}
|
||||||
static void TearDownTestCase() {
|
static void TearDownTestCase() { Trace::ReturnTrace(); }
|
||||||
Trace::ReturnTrace();
|
|
||||||
}
|
|
||||||
VideoProcessing* vp_;
|
VideoProcessing* vp_;
|
||||||
FILE* source_file_;
|
FILE* source_file_;
|
||||||
VideoFrame video_frame_;
|
VideoFrame video_frame_;
|
||||||
|
@ -23,12 +23,11 @@ const int kSumDiffThresholdHigh = 600;
|
|||||||
DenoiserFilter* DenoiserFilter::Create() {
|
DenoiserFilter* DenoiserFilter::Create() {
|
||||||
DenoiserFilter* filter = NULL;
|
DenoiserFilter* filter = NULL;
|
||||||
|
|
||||||
// If we know the minimum architecture at compile time, avoid CPU detection.
|
// If we know the minimum architecture at compile time, avoid CPU detection.
|
||||||
#if defined(WEBRTC_ARCH_X86_FAMILY)
|
#if defined(WEBRTC_ARCH_X86_FAMILY)
|
||||||
// x86 CPU detection required.
|
// x86 CPU detection required.
|
||||||
if (WebRtc_GetCPUInfo(kSSE2)) {
|
if (WebRtc_GetCPUInfo(kSSE2)) {
|
||||||
filter =
|
filter = new DenoiserFilterSSE2();
|
||||||
new DenoiserFilterSSE2();
|
|
||||||
} else {
|
} else {
|
||||||
filter = new DenoiserFilterC();
|
filter = new DenoiserFilterC();
|
||||||
}
|
}
|
||||||
|
@ -41,8 +41,6 @@ class DenoiserFilterC : public DenoiserFilter {
|
|||||||
int increase_denoising) override;
|
int increase_denoising) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
||||||
#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
|
#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
|
||||||
|
@ -58,8 +58,8 @@ static void VarianceNeonW8(const uint8_t* a,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*sum = HorizontalAddS16x8(v_sum);
|
*sum = HorizontalAddS16x8(v_sum);
|
||||||
*sse = static_cast<uint32_t>(
|
*sse =
|
||||||
HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
|
static_cast<uint32_t>(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void DenoiserFilterNEON::CopyMem16x16(const uint8_t* src,
|
void DenoiserFilterNEON::CopyMem16x16(const uint8_t* src,
|
||||||
@ -111,9 +111,8 @@ DenoiserDecision DenoiserFilterNEON::MbDenoise(uint8_t* mc_running_avg_y,
|
|||||||
// increasing the adjustment for each level, level1 adjustment is
|
// increasing the adjustment for each level, level1 adjustment is
|
||||||
// increased, the deltas stay the same.
|
// increased, the deltas stay the same.
|
||||||
int shift_inc =
|
int shift_inc =
|
||||||
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold)
|
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
|
||||||
? 1
|
: 0;
|
||||||
: 0;
|
|
||||||
const uint8x16_t v_level1_adjustment = vmovq_n_u8(
|
const uint8x16_t v_level1_adjustment = vmovq_n_u8(
|
||||||
(motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
|
(motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
|
||||||
const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
|
const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
|
||||||
|
@ -141,9 +141,8 @@ DenoiserDecision DenoiserFilterSSE2::MbDenoise(uint8_t* mc_running_avg_y,
|
|||||||
uint8_t motion_magnitude,
|
uint8_t motion_magnitude,
|
||||||
int increase_denoising) {
|
int increase_denoising) {
|
||||||
int shift_inc =
|
int shift_inc =
|
||||||
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold)
|
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
|
||||||
? 1
|
: 0;
|
||||||
: 0;
|
|
||||||
__m128i acc_diff = _mm_setzero_si128();
|
__m128i acc_diff = _mm_setzero_si128();
|
||||||
const __m128i k_0 = _mm_setzero_si128();
|
const __m128i k_0 = _mm_setzero_si128();
|
||||||
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
|
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
|
||||||
@ -239,15 +238,14 @@ DenoiserDecision DenoiserFilterSSE2::MbDenoise(uint8_t* mc_running_avg_y,
|
|||||||
// Calculate differences.
|
// Calculate differences.
|
||||||
const __m128i v_sig =
|
const __m128i v_sig =
|
||||||
_mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
|
_mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
|
||||||
const __m128i v_mc_running_avg_y = _mm_loadu_si128(
|
const __m128i v_mc_running_avg_y =
|
||||||
reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
|
_mm_loadu_si128(reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
|
||||||
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
|
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
|
||||||
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
|
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
|
||||||
// Obtain the sign. FF if diff is negative.
|
// Obtain the sign. FF if diff is negative.
|
||||||
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
|
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
|
||||||
// Clamp absolute difference to delta to get the adjustment.
|
// Clamp absolute difference to delta to get the adjustment.
|
||||||
const __m128i adj = _mm_min_epu8(
|
const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
|
||||||
_mm_or_si128(pdiff, ndiff), k_delta);
|
|
||||||
// Restore the sign and get positive and negative adjustments.
|
// Restore the sign and get positive and negative adjustments.
|
||||||
__m128i padj, nadj;
|
__m128i padj, nadj;
|
||||||
padj = _mm_andnot_si128(diff_sign, adj);
|
padj = _mm_andnot_si128(diff_sign, adj);
|
||||||
|
18
webrtc/modules/video_processing/util/skin_detection.cc
Executable file → Normal file
18
webrtc/modules/video_processing/util/skin_detection.cc
Executable file → Normal file
@ -48,19 +48,13 @@ bool MbHasSkinColor(const uint8_t* y_src,
|
|||||||
const int stride_v,
|
const int stride_v,
|
||||||
const int mb_row,
|
const int mb_row,
|
||||||
const int mb_col) {
|
const int mb_col) {
|
||||||
const uint8_t* y =
|
const uint8_t* y = y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
|
||||||
y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
|
const uint8_t* u = u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
|
||||||
const uint8_t* u =
|
const uint8_t* v = v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
|
||||||
u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
|
|
||||||
const uint8_t* v =
|
|
||||||
v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
|
|
||||||
// Use 2x2 average of center pixel to compute skin area.
|
// Use 2x2 average of center pixel to compute skin area.
|
||||||
uint8_t y_avg =
|
uint8_t y_avg = (*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
|
||||||
(*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
|
uint8_t u_avg = (*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
|
||||||
uint8_t u_avg =
|
uint8_t v_avg = (*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
|
||||||
(*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
|
|
||||||
uint8_t v_avg =
|
|
||||||
(*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
|
|
||||||
// Ignore MB with too high or low brightness.
|
// Ignore MB with too high or low brightness.
|
||||||
if (y_avg < y_low || y_avg > y_high)
|
if (y_avg < y_low || y_avg > y_high)
|
||||||
return false;
|
return false;
|
||||||
|
@ -23,7 +23,7 @@ VPMVideoDecimator::VPMVideoDecimator() {
|
|||||||
|
|
||||||
VPMVideoDecimator::~VPMVideoDecimator() {}
|
VPMVideoDecimator::~VPMVideoDecimator() {}
|
||||||
|
|
||||||
void VPMVideoDecimator::Reset() {
|
void VPMVideoDecimator::Reset() {
|
||||||
overshoot_modifier_ = 0;
|
overshoot_modifier_ = 0;
|
||||||
drop_count_ = 0;
|
drop_count_ = 0;
|
||||||
keep_count_ = 0;
|
keep_count_ = 0;
|
||||||
@ -43,14 +43,17 @@ void VPMVideoDecimator::SetTargetFramerate(int frame_rate) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool VPMVideoDecimator::DropFrame() {
|
bool VPMVideoDecimator::DropFrame() {
|
||||||
if (!enable_temporal_decimation_) return false;
|
if (!enable_temporal_decimation_)
|
||||||
|
return false;
|
||||||
|
|
||||||
if (incoming_frame_rate_ <= 0) return false;
|
if (incoming_frame_rate_ <= 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
const uint32_t incomingframe_rate =
|
const uint32_t incomingframe_rate =
|
||||||
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
|
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
|
||||||
|
|
||||||
if (target_frame_rate_ == 0) return true;
|
if (target_frame_rate_ == 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
bool drop = false;
|
bool drop = false;
|
||||||
if (incomingframe_rate > target_frame_rate_) {
|
if (incomingframe_rate > target_frame_rate_) {
|
||||||
@ -61,44 +64,43 @@ bool VPMVideoDecimator::DropFrame() {
|
|||||||
overshoot_modifier_ = 0;
|
overshoot_modifier_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (overshoot && 2 * overshoot < (int32_t) incomingframe_rate) {
|
if (overshoot && 2 * overshoot < (int32_t)incomingframe_rate) {
|
||||||
if (drop_count_) { // Just got here so drop to be sure.
|
if (drop_count_) { // Just got here so drop to be sure.
|
||||||
drop_count_ = 0;
|
drop_count_ = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const uint32_t dropVar = incomingframe_rate / overshoot;
|
const uint32_t dropVar = incomingframe_rate / overshoot;
|
||||||
|
|
||||||
if (keep_count_ >= dropVar) {
|
if (keep_count_ >= dropVar) {
|
||||||
drop = true;
|
drop = true;
|
||||||
overshoot_modifier_ = -((int32_t) incomingframe_rate % overshoot) / 3;
|
overshoot_modifier_ = -((int32_t)incomingframe_rate % overshoot) / 3;
|
||||||
keep_count_ = 1;
|
keep_count_ = 1;
|
||||||
} else {
|
} else {
|
||||||
keep_count_++;
|
keep_count_++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
keep_count_ = 0;
|
keep_count_ = 0;
|
||||||
const uint32_t dropVar = overshoot / target_frame_rate_;
|
const uint32_t dropVar = overshoot / target_frame_rate_;
|
||||||
if (drop_count_ < dropVar) {
|
if (drop_count_ < dropVar) {
|
||||||
drop = true;
|
drop = true;
|
||||||
drop_count_++;
|
drop_count_++;
|
||||||
} else {
|
} else {
|
||||||
overshoot_modifier_ = overshoot % target_frame_rate_;
|
overshoot_modifier_ = overshoot % target_frame_rate_;
|
||||||
drop = false;
|
drop = false;
|
||||||
drop_count_ = 0;
|
drop_count_ = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return drop;
|
return drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint32_t VPMVideoDecimator::GetDecimatedFrameRate() {
|
uint32_t VPMVideoDecimator::GetDecimatedFrameRate() {
|
||||||
ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
|
ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
|
||||||
if (!enable_temporal_decimation_) {
|
if (!enable_temporal_decimation_) {
|
||||||
return static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
|
return static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
|
||||||
}
|
}
|
||||||
return VD_MIN(target_frame_rate_,
|
return VD_MIN(target_frame_rate_,
|
||||||
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
|
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t VPMVideoDecimator::Inputframe_rate() {
|
uint32_t VPMVideoDecimator::Inputframe_rate() {
|
||||||
@ -113,7 +115,7 @@ void VPMVideoDecimator::UpdateIncomingframe_rate() {
|
|||||||
} else {
|
} else {
|
||||||
// Shift.
|
// Shift.
|
||||||
for (int i = kFrameCountHistory_size - 2; i >= 0; i--) {
|
for (int i = kFrameCountHistory_size - 2; i >= 0; i--) {
|
||||||
incoming_frame_times_[i+1] = incoming_frame_times_[i];
|
incoming_frame_times_[i + 1] = incoming_frame_times_[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
incoming_frame_times_[0] = now;
|
incoming_frame_times_[0] = now;
|
||||||
@ -133,7 +135,7 @@ void VPMVideoDecimator::ProcessIncomingframe_rate(int64_t now) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (num > 1) {
|
if (num > 1) {
|
||||||
int64_t diff = now - incoming_frame_times_[num-1];
|
int64_t diff = now - incoming_frame_times_[num - 1];
|
||||||
incoming_frame_rate_ = 1.0;
|
incoming_frame_rate_ = 1.0;
|
||||||
if (diff > 0) {
|
if (diff > 0) {
|
||||||
incoming_frame_rate_ = nrOfFrames * 1000.0f / static_cast<float>(diff);
|
incoming_frame_rate_ = nrOfFrames * 1000.0f / static_cast<float>(diff);
|
||||||
|
@ -40,8 +40,8 @@ class VPMVideoDecimator {
|
|||||||
private:
|
private:
|
||||||
void ProcessIncomingframe_rate(int64_t now);
|
void ProcessIncomingframe_rate(int64_t now);
|
||||||
|
|
||||||
enum { kFrameCountHistory_size = 90};
|
enum { kFrameCountHistory_size = 90 };
|
||||||
enum { kFrameHistoryWindowMs = 2000};
|
enum { kFrameHistoryWindowMs = 2000 };
|
||||||
|
|
||||||
// Temporal decimation.
|
// Temporal decimation.
|
||||||
int32_t overshoot_modifier_;
|
int32_t overshoot_modifier_;
|
||||||
|
@ -14,9 +14,7 @@
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
VideoDenoiser::VideoDenoiser()
|
VideoDenoiser::VideoDenoiser()
|
||||||
: width_(0),
|
: width_(0), height_(0), filter_(DenoiserFilter::Create()) {}
|
||||||
height_(0),
|
|
||||||
filter_(DenoiserFilter::Create()) {}
|
|
||||||
|
|
||||||
void VideoDenoiser::TrailingReduction(int mb_rows,
|
void VideoDenoiser::TrailingReduction(int mb_rows,
|
||||||
int mb_cols,
|
int mb_cols,
|
||||||
@ -32,25 +30,26 @@ void VideoDenoiser::TrailingReduction(int mb_rows,
|
|||||||
// do NOT denoise for the block. Set different threshold for skin MB.
|
// do NOT denoise for the block. Set different threshold for skin MB.
|
||||||
// The change of denoising status will not propagate.
|
// The change of denoising status will not propagate.
|
||||||
if (metrics_[mb_index].is_skin) {
|
if (metrics_[mb_index].is_skin) {
|
||||||
// The threshold is high (more strict) for non-skin MB where the trailing
|
// The threshold is high (more strict) for non-skin MB where the
|
||||||
// usually happen.
|
// trailing usually happen.
|
||||||
if (metrics_[mb_index].denoise &&
|
if (metrics_[mb_index].denoise &&
|
||||||
metrics_[mb_index + 1].denoise +
|
metrics_[mb_index + 1].denoise + metrics_[mb_index - 1].denoise +
|
||||||
metrics_[mb_index - 1].denoise +
|
metrics_[mb_index + mb_cols].denoise +
|
||||||
metrics_[mb_index + mb_cols].denoise +
|
metrics_[mb_index - mb_cols].denoise <=
|
||||||
metrics_[mb_index - mb_cols].denoise <= 2) {
|
2) {
|
||||||
metrics_[mb_index].denoise = 0;
|
metrics_[mb_index].denoise = 0;
|
||||||
filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
|
filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
|
||||||
}
|
}
|
||||||
} else if (metrics_[mb_index].denoise &&
|
} else if (metrics_[mb_index].denoise &&
|
||||||
metrics_[mb_index + 1].denoise +
|
metrics_[mb_index + 1].denoise +
|
||||||
metrics_[mb_index - 1].denoise +
|
metrics_[mb_index - 1].denoise +
|
||||||
metrics_[mb_index + mb_cols + 1].denoise +
|
metrics_[mb_index + mb_cols + 1].denoise +
|
||||||
metrics_[mb_index + mb_cols - 1].denoise +
|
metrics_[mb_index + mb_cols - 1].denoise +
|
||||||
metrics_[mb_index - mb_cols + 1].denoise +
|
metrics_[mb_index - mb_cols + 1].denoise +
|
||||||
metrics_[mb_index - mb_cols - 1].denoise +
|
metrics_[mb_index - mb_cols - 1].denoise +
|
||||||
metrics_[mb_index + mb_cols].denoise +
|
metrics_[mb_index + mb_cols].denoise +
|
||||||
metrics_[mb_index - mb_cols].denoise <= 7) {
|
metrics_[mb_index - mb_cols].denoise <=
|
||||||
|
7) {
|
||||||
filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
|
filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -91,23 +90,20 @@ void VideoDenoiser::DenoiseFrame(const VideoFrame& frame,
|
|||||||
uint8_t y_tmp[16 * 16] = {0};
|
uint8_t y_tmp[16 * 16] = {0};
|
||||||
for (int mb_row = 0; mb_row < mb_rows; ++mb_row) {
|
for (int mb_row = 0; mb_row < mb_rows; ++mb_row) {
|
||||||
for (int mb_col = 0; mb_col < mb_cols; ++mb_col) {
|
for (int mb_col = 0; mb_col < mb_cols; ++mb_col) {
|
||||||
const uint8_t* mb_src =
|
const uint8_t* mb_src = y_src + (mb_row << 4) * stride_y + (mb_col << 4);
|
||||||
y_src + (mb_row << 4) * stride_y + (mb_col << 4);
|
|
||||||
uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
|
uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
|
||||||
int mb_index = mb_row * mb_cols + mb_col;
|
int mb_index = mb_row * mb_cols + mb_col;
|
||||||
// Denoise each MB at the very start and save the result to a temporary
|
// Denoise each MB at the very start and save the result to a temporary
|
||||||
// buffer.
|
// buffer.
|
||||||
if (filter_->MbDenoise(
|
if (filter_->MbDenoise(mb_dst, stride_y, y_tmp, 16, mb_src, stride_y, 0,
|
||||||
mb_dst, stride_y, y_tmp, 16, mb_src, stride_y, 0, 1) ==
|
1) == FILTER_BLOCK) {
|
||||||
FILTER_BLOCK) {
|
|
||||||
uint32_t thr_var = 0;
|
uint32_t thr_var = 0;
|
||||||
// Save var and sad to the buffer.
|
// Save var and sad to the buffer.
|
||||||
metrics_[mb_index].var = filter_->Variance16x8(
|
metrics_[mb_index].var = filter_->Variance16x8(
|
||||||
mb_dst, stride_y, y_tmp, 16, &metrics_[mb_index].sad);
|
mb_dst, stride_y, y_tmp, 16, &metrics_[mb_index].sad);
|
||||||
// Get skin map.
|
// Get skin map.
|
||||||
metrics_[mb_index].is_skin =
|
metrics_[mb_index].is_skin = MbHasSkinColor(
|
||||||
MbHasSkinColor(y_src, u_src, v_src, stride_y, stride_u, stride_v,
|
y_src, u_src, v_src, stride_y, stride_u, stride_v, mb_row, mb_col);
|
||||||
mb_row, mb_col);
|
|
||||||
// Variance threshold for skin/non-skin MB is different.
|
// Variance threshold for skin/non-skin MB is different.
|
||||||
// Skin MB use a small threshold to reduce blockiness.
|
// Skin MB use a small threshold to reduce blockiness.
|
||||||
thr_var = metrics_[mb_index].is_skin ? 128 : 12 * 128;
|
thr_var = metrics_[mb_index].is_skin ? 128 : 12 * 128;
|
||||||
|
@ -22,8 +22,11 @@ class VideoDenoiser {
|
|||||||
void DenoiseFrame(const VideoFrame& frame, VideoFrame* denoised_frame);
|
void DenoiseFrame(const VideoFrame& frame, VideoFrame* denoised_frame);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void TrailingReduction(int mb_rows, int mb_cols, const uint8_t* y_src,
|
void TrailingReduction(int mb_rows,
|
||||||
int stride_y, uint8_t* y_dst);
|
int mb_cols,
|
||||||
|
const uint8_t* y_src,
|
||||||
|
int stride_y,
|
||||||
|
uint8_t* y_dst);
|
||||||
int width_;
|
int width_;
|
||||||
int height_;
|
int height_;
|
||||||
rtc::scoped_ptr<DenoiseMetrics[]> metrics_;
|
rtc::scoped_ptr<DenoiseMetrics[]> metrics_;
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include "webrtc/base/logging.h"
|
#include "webrtc/base/logging.h"
|
||||||
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
||||||
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -62,8 +61,8 @@ void VideoProcessing::GetFrameStats(const VideoFrame& frame,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stats->num_pixels = (width * height) /
|
stats->num_pixels = (width * height) / ((1 << stats->sub_sampling_factor) *
|
||||||
((1 << stats->sub_sampling_factor) * (1 << stats->sub_sampling_factor));
|
(1 << stats->sub_sampling_factor));
|
||||||
assert(stats->num_pixels > 0);
|
assert(stats->num_pixels > 0);
|
||||||
|
|
||||||
// Compute mean value of frame
|
// Compute mean value of frame
|
||||||
@ -112,22 +111,19 @@ int32_t VideoProcessingImpl::Deflickering(VideoFrame* frame,
|
|||||||
return deflickering_.ProcessFrame(frame, stats);
|
return deflickering_.ProcessFrame(frame, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t VideoProcessingImpl::BrightnessDetection(
|
int32_t VideoProcessingImpl::BrightnessDetection(const VideoFrame& frame,
|
||||||
const VideoFrame& frame,
|
const FrameStats& stats) {
|
||||||
const FrameStats& stats) {
|
|
||||||
rtc::CritScope mutex(&mutex_);
|
rtc::CritScope mutex(&mutex_);
|
||||||
return brightness_detection_.ProcessFrame(frame, stats);
|
return brightness_detection_.ProcessFrame(frame, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void VideoProcessingImpl::EnableTemporalDecimation(bool enable) {
|
void VideoProcessingImpl::EnableTemporalDecimation(bool enable) {
|
||||||
rtc::CritScope mutex(&mutex_);
|
rtc::CritScope mutex(&mutex_);
|
||||||
frame_pre_processor_.EnableTemporalDecimation(enable);
|
frame_pre_processor_.EnableTemporalDecimation(enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VideoProcessingImpl::SetInputFrameResampleMode(
|
||||||
void VideoProcessingImpl::SetInputFrameResampleMode(VideoFrameResampling
|
VideoFrameResampling resampling_mode) {
|
||||||
resampling_mode) {
|
|
||||||
rtc::CritScope cs(&mutex_);
|
rtc::CritScope cs(&mutex_);
|
||||||
frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
|
frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
|
||||||
}
|
}
|
||||||
@ -146,7 +142,7 @@ void VideoProcessingImpl::SetTargetFramerate(int frame_rate) {
|
|||||||
|
|
||||||
uint32_t VideoProcessingImpl::GetDecimatedFrameRate() {
|
uint32_t VideoProcessingImpl::GetDecimatedFrameRate() {
|
||||||
rtc::CritScope cs(&mutex_);
|
rtc::CritScope cs(&mutex_);
|
||||||
return frame_pre_processor_.GetDecimatedFrameRate();
|
return frame_pre_processor_.GetDecimatedFrameRate();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t VideoProcessingImpl::GetDecimatedWidth() const {
|
uint32_t VideoProcessingImpl::GetDecimatedWidth() const {
|
||||||
|
Reference in New Issue
Block a user