Revert "Update video_quality_analysis to align videos instead of using barcodes"

This reverts commit d65e143801a7aaa9affdb939ea836aec1955cdcc.

Reason for revert: Breaks perf bots. frame_analyzer is a prebuilt binary, so it won't automatically pick up changes in the .cc file.

Original change's description:
> Update video_quality_analysis to align videos instead of using barcodes
> 
> This CL is a follow-up to the previous CL
> https://webrtc-review.googlesource.com/c/src/+/94773 that added generic
> logic for aligning videos. This will allow us to easily extend
> video_quality_analysis with new sophisticated video quality metrics.
> Also, we can use any kind of video that does not necessarily need to
> contain bar codes. Removing the need to decode barcodes also leads to a
> big speedup for the tests.
> 
> Bug: webrtc:9642
> Change-Id: I74b0d630b3e1ed44781ad024115ded3143e28f50
> Reviewed-on: https://webrtc-review.googlesource.com/94845
> Reviewed-by: Paulina Hensman <phensman@webrtc.org>
> Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
> Commit-Queue: Magnus Jedvert <magjed@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#24423}

TBR=phoglund@webrtc.org,magjed@webrtc.org,phensman@webrtc.org

Change-Id: Ia590b465687b861fe37ed1b14756d4607ca90da1
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:9642
Reviewed-on: https://webrtc-review.googlesource.com/95946
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Commit-Queue: Magnus Jedvert <magjed@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24428}
This commit is contained in:
Magnus Jedvert
2018-08-24 12:44:59 +00:00
committed by Commit Bot
parent a1cceca02c
commit 3e169ac18c
14 changed files with 1636 additions and 138 deletions

View File

@ -176,6 +176,8 @@ if (is_android) {
"../build/android/adb_reverse_forwarder.py",
"../examples/androidtests/video_quality_loopback_test.py",
"../resources/reference_video_640x360_30fps.y4m",
"../rtc_tools/barcode_tools/barcode_decoder.py",
"../rtc_tools/barcode_tools/helper_functions.py",
"../rtc_tools/compare_videos.py",
"../rtc_tools/testing/prebuilt_apprtc.zip",
"../rtc_tools/testing/golang/linux/go.tar.gz",

View File

@ -0,0 +1,13 @@
# This is trimmed down version of the main tools DEPS file which is to be used
# in Chromiums PyAuto WebRTC video quality measurement test. We will only
# need the Zxing dependencies as we only use the barcode tools in this test.
deps = {
# Used by barcode_tools
"barcode_tools/third_party/zxing/core":
"http://zxing.googlecode.com/svn/trunk/core@2349",
# Used by barcode_tools
"barcode_tools/third_party/zxing/javase":
"http://zxing.googlecode.com/svn/trunk/javase@2349",
}

View File

@ -0,0 +1,34 @@
This file explains how to get the dependencies needed for the barcode tools.
barcode_encoder.py
==================
This script depends on:
* Zxing (Java version)
* Ant (must be installed manually)
* Java
To automatically download Zxing for the encoder script, checkout this directory
as a separate gclient solution, like this:
gclient config http://webrtc.googlecode.com/svn/trunk/webrtc/rtc_tools/barcode_tools
gclient sync
Then the Zxing Java source code will be put in third_party/zxing.
In order to run barcode_encoder.py you then need to build:
* zxing/core/core.jar
* zxing/javase/javase.jar
These are compiled using Ant by running build_zxing.py:
python build_zxing.py
For more info about Zxing, see https://code.google.com/p/zxing/
barcode_decoder.py
==================
This script depends on:
* Zxing (C++ version). You need to checkout from Subversion and build the libs
and zxing SCons targets. SVN URL: http://zxing.googlecode.com/svn/trunk/cpp
* FFMPEG fmpeg 0.11.1
These dependencies must be precompiled separately before running the script.
Make sure to add FFMPEG to the PATH environment variable and provide the path
to the zxing executable using the mandatory command line flag to the script.

View File

@ -0,0 +1,291 @@
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import optparse
import os
import sys
if __name__ == '__main__':
# Make sure we always can import helper_functions.
sys.path.append(os.path.dirname(__file__))
import helper_functions
# Chrome browsertests will throw away stderr; avoid that output gets lost.
sys.stderr = sys.stdout
def ConvertYuvToPngFiles(yuv_file_name, yuv_frame_width, yuv_frame_height,
output_directory, ffmpeg_path):
"""Converts a YUV video file into PNG frames.
The function uses ffmpeg to convert the YUV file. The output of ffmpeg is in
the form frame_xxxx.png, where xxxx is the frame number, starting from 0001.
Args:
yuv_file_name(string): The name of the YUV file.
yuv_frame_width(int): The width of one YUV frame.
yuv_frame_height(int): The height of one YUV frame.
output_directory(string): The output directory where the PNG frames will be
stored.
ffmpeg_path(string): The path to the ffmpeg executable. If None, the PATH
will be searched for it.
Return:
(bool): True if the conversion was OK.
"""
size_string = str(yuv_frame_width) + 'x' + str(yuv_frame_height)
output_files_pattern = os.path.join(output_directory, 'frame_%04d.png')
if not ffmpeg_path:
ffmpeg_path = 'ffmpeg.exe' if sys.platform == 'win32' else 'ffmpeg'
command = [ffmpeg_path, '-s', '%s' % size_string, '-i', '%s'
% yuv_file_name, '-f', 'image2', '-vcodec', 'png',
'%s' % output_files_pattern]
try:
print 'Converting YUV file to PNG images (may take a while)...'
print ' '.join(command)
helper_functions.RunShellCommand(
command, fail_msg='Error during YUV to PNG conversion')
except helper_functions.HelperError, err:
print 'Error executing command: %s. Error: %s' % (command, err)
return False
except OSError:
print 'Did not find %s. Have you installed it?' % ffmpeg_path
return False
return True
def DecodeFrames(input_directory, zxing_path):
"""Decodes the barcodes overlaid in each frame.
The function uses the Zxing command-line tool from the Zxing C++ distribution
to decode the barcode in every PNG frame from the input directory. The frames
should be named frame_xxxx.png, where xxxx is the frame number. The frame
numbers should be consecutive and should start from 0001.
The decoding results in a frame_xxxx.txt file for every successfully decoded
barcode. This file contains the decoded barcode as 12-digit string (UPC-A
format: 11 digits content + one check digit).
Args:
input_directory(string): The input directory from where the PNG frames are
read.
zxing_path(string): The path to the zxing binary. If specified as None,
the PATH will be searched for it.
Return:
(bool): True if the decoding succeeded.
"""
if not zxing_path:
zxing_path = 'zxing.exe' if sys.platform == 'win32' else 'zxing'
print 'Decoding barcodes from PNG files with %s...' % zxing_path
return helper_functions.PerformActionOnAllFiles(
directory=input_directory, file_pattern='frame_',
file_extension='png', start_number=1, action=_DecodeBarcodeInFile,
command_line_decoder=zxing_path)
def _DecodeBarcodeInFile(file_name, command_line_decoder):
"""Decodes the barcode in the upper left corner of a PNG file.
Args:
file_name(string): File name of the PNG file.
command_line_decoder(string): The ZXing command-line decoding tool.
Return:
(bool): True upon success, False otherwise.
"""
command = [command_line_decoder, '--try-harder', '--dump-raw', file_name]
try:
out = helper_functions.RunShellCommand(
command, fail_msg='Error during decoding of %s' % file_name)
text_file = open('%s.txt' % file_name[:-4], 'w')
text_file.write(out)
text_file.close()
except helper_functions.HelperError, err:
print 'Barcode in %s cannot be decoded.' % file_name
print err
return False
except OSError:
print 'Did not find %s. Have you installed it?' % command_line_decoder
return False
return True
def _GenerateStatsFile(stats_file_name, input_directory='.'):
"""Generate statistics file.
The function generates a statistics file. The contents of the file are in the
format <frame_name> <barcode>, where frame name is the name of every frame
(effectively the frame number) and barcode is the decoded barcode. The frames
and the helper .txt files are removed after they have been used.
"""
file_prefix = os.path.join(input_directory, 'frame_')
stats_file = open(stats_file_name, 'w')
print 'Generating stats file: %s' % stats_file_name
for i in range(1, _CountFramesIn(input_directory=input_directory) + 1):
frame_number = helper_functions.ZeroPad(i)
barcode_file_name = file_prefix + frame_number + '.txt'
png_frame = file_prefix + frame_number + '.png'
entry_frame_number = helper_functions.ZeroPad(i-1)
entry = 'frame_' + entry_frame_number + ' '
if os.path.isfile(barcode_file_name):
barcode = _ReadBarcodeFromTextFile(barcode_file_name)
os.remove(barcode_file_name)
if _CheckBarcode(barcode):
entry += (helper_functions.ZeroPad(int(barcode[0:11])) + '\n')
else:
entry += 'Barcode error\n' # Barcode is wrongly detected.
else: # Barcode file doesn't exist.
entry += 'Barcode error\n'
stats_file.write(entry)
os.remove(png_frame)
stats_file.close()
def _ReadBarcodeFromTextFile(barcode_file_name):
"""Reads the decoded barcode for a .txt file.
Args:
barcode_file_name(string): The name of the .txt file.
Return:
(string): The decoded barcode.
"""
barcode_file = open(barcode_file_name, 'r')
barcode = barcode_file.read()
barcode_file.close()
return barcode
def _CheckBarcode(barcode):
"""Check weather the UPC-A barcode was decoded correctly.
This function calculates the check digit of the provided barcode and compares
it to the check digit that was decoded.
Args:
barcode(string): The barcode (12-digit).
Return:
(bool): True if the barcode was decoded correctly.
"""
if len(barcode) != 12:
return False
r1 = range(0, 11, 2) # Odd digits
r2 = range(1, 10, 2) # Even digits except last
dsum = 0
# Sum all the even digits
for i in r1:
dsum += int(barcode[i])
# Multiply the sum by 3
dsum *= 3
# Add all the even digits except the check digit (12th digit)
for i in r2:
dsum += int(barcode[i])
# Get the modulo 10
dsum = dsum % 10
# If not 0 substract from 10
if dsum != 0:
dsum = 10 - dsum
# Compare result and check digit
return dsum == int(barcode[11])
def _CountFramesIn(input_directory='.'):
"""Calculates the number of frames in the input directory.
The function calculates the number of frames in the input directory. The
frames should be named frame_xxxx.png, where xxxx is the number of the frame.
The numbers should start from 1 and should be consecutive.
Args:
input_directory(string): The input directory.
Return:
(int): The number of frames.
"""
file_prefix = os.path.join(input_directory, 'frame_')
file_exists = True
num = 1
while file_exists:
file_name = (file_prefix + helper_functions.ZeroPad(num) + '.png')
if os.path.isfile(file_name):
num += 1
else:
file_exists = False
return num - 1
def _ParseArgs():
"""Registers the command-line options."""
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--zxing_path', type='string',
help=('The path to where the zxing executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name zxing[.exe].'))
parser.add_option('--ffmpeg_path', type='string',
help=('The path to where the ffmpeg executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name ffmpeg[.exe].'))
parser.add_option('--yuv_frame_width', type='int', default=640,
help='Width of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_frame_height', type='int', default=480,
help='Height of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_file', type='string', default='output.yuv',
help='The YUV file to be decoded. Default: %default')
parser.add_option('--stats_file', type='string', default='stats.txt',
help='The output stats file. Default: %default')
parser.add_option('--png_working_dir', type='string', default='.',
help=('The directory for temporary PNG images to be stored '
'in when decoding from YUV before they\'re barcode '
'decoded. If using Windows and a Cygwin-compiled '
'zxing.exe, you should keep the default value to '
'avoid problems. Default: %default'))
options, _ = parser.parse_args()
return options
def main():
"""The main function.
A simple invocation is:
./webrtc/rtc_tools/barcode_tools/barcode_decoder.py
--yuv_file=<path_and_name_of_overlaid_yuv_video>
--yuv_frame_width=640 --yuv_frame_height=480
--stats_file=<path_and_name_to_stats_file>
"""
options = _ParseArgs()
# Convert the overlaid YUV video into a set of PNG frames.
if not ConvertYuvToPngFiles(options.yuv_file, options.yuv_frame_width,
options.yuv_frame_height,
output_directory=options.png_working_dir,
ffmpeg_path=options.ffmpeg_path):
print 'An error occurred converting from YUV to PNG frames.'
return -1
# Decode the barcodes from the PNG frames.
if not DecodeFrames(input_directory=options.png_working_dir,
zxing_path=options.zxing_path):
print 'An error occurred decoding barcodes from PNG frames.'
return -2
# Generate statistics file.
_GenerateStatsFile(options.stats_file,
input_directory=options.png_working_dir)
print 'Completed barcode decoding.'
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,372 @@
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import optparse
import os
import sys
import helper_functions
_DEFAULT_BARCODE_WIDTH = 352
_DEFAULT_BARCODES_FILE = 'barcodes.yuv'
def GenerateUpcaBarcodes(number_of_barcodes, barcode_width, barcode_height,
output_directory='.',
path_to_zxing='zxing-read-only'):
"""Generates UPC-A barcodes.
This function generates a number_of_barcodes UPC-A barcodes. The function
calls an example Java encoder from the Zxing library. The barcodes are
generated as PNG images. The width of the barcodes shouldn't be less than 102
pixels because otherwise Zxing can't properly generate the barcodes.
Args:
number_of_barcodes(int): The number of barcodes to generate.
barcode_width(int): Width of barcode in pixels.
barcode_height(int): Height of barcode in pixels.
output_directory(string): Output directory where to store generated
barcodes.
path_to_zxing(string): The path to Zxing.
Return:
(bool): True if the conversion is successful.
"""
base_file_name = os.path.join(output_directory, "barcode_")
jars = _FormJarsString(path_to_zxing)
command_line_encoder = 'com.google.zxing.client.j2se.CommandLineEncoder'
barcode_width = str(barcode_width)
barcode_height = str(barcode_height)
errors = False
for i in range(number_of_barcodes):
suffix = helper_functions.ZeroPad(i)
# Barcodes starting from 0
content = helper_functions.ZeroPad(i, 11)
output_file_name = base_file_name + suffix + ".png"
command = ["java", "-cp", jars, command_line_encoder,
"--barcode_format=UPC_A", "--height=%s" % barcode_height,
"--width=%s" % barcode_width,
"--output=%s" % (output_file_name), "%s" % (content)]
try:
helper_functions.RunShellCommand(
command, fail_msg=('Error during barcode %s generation' % content))
except helper_functions.HelperError as err:
print >> sys.stderr, err
errors = True
return not errors
def ConvertPngToYuvBarcodes(input_directory='.', output_directory='.'):
"""Converts PNG barcodes to YUV barcode images.
This function reads all the PNG files from the input directory which are in
the format frame_xxxx.png, where xxxx is the number of the frame, starting
from 0000. The frames should be consecutive numbers. The output YUV file is
named frame_xxxx.yuv. The function uses ffmpeg to do the conversion.
Args:
input_directory(string): The input direcotry to read the PNG barcodes from.
output_directory(string): The putput directory to write the YUV files to.
Return:
(bool): True if the conversion was without errors.
"""
return helper_functions.PerformActionOnAllFiles(
input_directory, 'barcode_', 'png', 0, _ConvertToYuvAndDelete,
output_directory=output_directory, pattern='barcode_')
def _ConvertToYuvAndDelete(output_directory, file_name, pattern):
"""Converts a PNG file to a YUV file and deletes the PNG file.
Args:
output_directory(string): The output directory for the YUV file.
file_name(string): The PNG file name.
pattern(string): The file pattern of the PNG/YUV file. The PNG/YUV files are
named patternxx..x.png/yuv, where xx..x are digits starting from 00..0.
Return:
(bool): True upon successful conversion, false otherwise.
"""
# Pattern should be in file name
if not pattern in file_name:
return False
pattern_position = file_name.rfind(pattern)
# Strip the path to the PNG file and replace the png extension with yuv
yuv_file_name = file_name[pattern_position:-3] + 'yuv'
yuv_file_name = os.path.join(output_directory, yuv_file_name)
command = ['ffmpeg', '-i', '%s' % (file_name), '-pix_fmt', 'yuv420p',
'%s' % (yuv_file_name)]
try:
helper_functions.RunShellCommand(
command, fail_msg=('Error during PNG to YUV conversion of %s' %
file_name))
os.remove(file_name)
except helper_functions.HelperError as err:
print >> sys.stderr, err
return False
return True
def CombineYuvFramesIntoOneFile(output_file_name, input_directory='.'):
"""Combines several YUV frames into one YUV video file.
The function combines the YUV frames from input_directory into one YUV video
file. The frames should be named in the format frame_xxxx.yuv where xxxx
stands for the frame number. The numbers have to be consecutive and start from
0000. The YUV frames are removed after they have been added to the video.
Args:
output_file_name(string): The name of the file to produce.
input_directory(string): The directory from which the YUV frames are read.
Return:
(bool): True if the frame stitching went OK.
"""
output_file = open(output_file_name, "wb")
success = helper_functions.PerformActionOnAllFiles(
input_directory, 'barcode_', 'yuv', 0, _AddToFileAndDelete,
output_file=output_file)
output_file.close()
return success
def _AddToFileAndDelete(output_file, file_name):
"""Adds the contents of a file to a previously opened file.
Args:
output_file(file): The ouput file, previously opened.
file_name(string): The file name of the file to add to the output file.
Return:
(bool): True if successful, False otherwise.
"""
input_file = open(file_name, "rb")
input_file_contents = input_file.read()
output_file.write(input_file_contents)
input_file.close()
try:
os.remove(file_name)
except OSError as e:
print >> sys.stderr, 'Error deleting file %s.\nError: %s' % (file_name, e)
return False
return True
def _OverlayBarcodeAndBaseFrames(barcodes_file, base_file, output_file,
barcodes_component_sizes,
base_component_sizes):
"""Overlays the next YUV frame from a file with a barcode.
Args:
barcodes_file(FileObject): The YUV file containing the barcodes (opened).
base_file(FileObject): The base YUV file (opened).
output_file(FileObject): The output overlaid file (opened).
barcodes_component_sizes(list of tuples): The width and height of each Y, U
and V plane of the barcodes YUV file.
base_component_sizes(list of tuples): The width and height of each Y, U and
V plane of the base YUV file.
Return:
(bool): True if there are more planes (i.e. frames) in the base file, false
otherwise.
"""
# We will loop three times - once for the Y, U and V planes
for ((barcode_comp_width, barcode_comp_height),
(base_comp_width, base_comp_height)) in zip(barcodes_component_sizes,
base_component_sizes):
for base_row in range(base_comp_height):
barcode_plane_traversed = False
if (base_row < barcode_comp_height) and not barcode_plane_traversed:
barcode_plane = barcodes_file.read(barcode_comp_width)
if barcode_plane == "":
barcode_plane_traversed = True
else:
barcode_plane_traversed = True
base_plane = base_file.read(base_comp_width)
if base_plane == "":
return False
if not barcode_plane_traversed:
# Substitute part of the base component with the top component
output_file.write(barcode_plane)
base_plane = base_plane[barcode_comp_width:]
output_file.write(base_plane)
return True
def OverlayYuvFiles(barcode_width, barcode_height, base_width, base_height,
barcodes_file_name, base_file_name, output_file_name):
"""Overlays two YUV files starting from the upper left corner of both.
Args:
barcode_width(int): The width of the barcode (to be overlaid).
barcode_height(int): The height of the barcode (to be overlaid).
base_width(int): The width of a frame of the base file.
base_height(int): The height of a frame of the base file.
barcodes_file_name(string): The name of the YUV file containing the YUV
barcodes.
base_file_name(string): The name of the base YUV file.
output_file_name(string): The name of the output file where the overlaid
video will be written.
"""
# Component sizes = [Y_sizes, U_sizes, V_sizes]
barcodes_component_sizes = [(barcode_width, barcode_height),
(barcode_width/2, barcode_height/2),
(barcode_width/2, barcode_height/2)]
base_component_sizes = [(base_width, base_height),
(base_width/2, base_height/2),
(base_width/2, base_height/2)]
barcodes_file = open(barcodes_file_name, 'rb')
base_file = open(base_file_name, 'rb')
output_file = open(output_file_name, 'wb')
data_left = True
while data_left:
data_left = _OverlayBarcodeAndBaseFrames(barcodes_file, base_file,
output_file,
barcodes_component_sizes,
base_component_sizes)
barcodes_file.close()
base_file.close()
output_file.close()
def CalculateFramesNumberFromYuv(yuv_width, yuv_height, file_name):
"""Calculates the number of frames of a YUV video.
Args:
yuv_width(int): Width of a frame of the yuv file.
yuv_height(int): Height of a frame of the YUV file.
file_name(string): The name of the YUV file.
Return:
(int): The number of frames in the YUV file.
"""
file_size = os.path.getsize(file_name)
y_plane_size = yuv_width * yuv_height
u_plane_size = (yuv_width/2) * (yuv_height/2) # Equals to V plane size too
frame_size = y_plane_size + (2 * u_plane_size)
return int(file_size/frame_size) # Should be int anyway
def _FormJarsString(path_to_zxing):
"""Forms the the Zxing core and javase jars argument.
Args:
path_to_zxing(string): The path to the Zxing checkout folder.
Return:
(string): The newly formed jars argument.
"""
javase_jar = os.path.join(path_to_zxing, "javase", "javase.jar")
core_jar = os.path.join(path_to_zxing, "core", "core.jar")
delimiter = ':'
if os.name != 'posix':
delimiter = ';'
return javase_jar + delimiter + core_jar
def _ParseArgs():
"""Registers the command-line options."""
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--barcode_width', type='int',
default=_DEFAULT_BARCODE_WIDTH,
help=('Width of the barcodes to be overlaid on top of the'
' base file. Default: %default'))
parser.add_option('--barcode_height', type='int', default=32,
help=('Height of the barcodes to be overlaid on top of the'
' base file. Default: %default'))
parser.add_option('--base_frame_width', type='int', default=352,
help=('Width of the base YUV file\'s frames. '
'Default: %default'))
parser.add_option('--base_frame_height', type='int', default=288,
help=('Height of the top YUV file\'s frames. '
'Default: %default'))
parser.add_option('--barcodes_yuv', type='string',
default=_DEFAULT_BARCODES_FILE,
help=('The YUV file with the barcodes in YUV. '
'Default: %default'))
parser.add_option('--base_yuv', type='string', default='base.yuv',
help=('The base YUV file to be overlaid. '
'Default: %default'))
parser.add_option('--output_yuv', type='string', default='output.yuv',
help=('The output YUV file containing the base overlaid'
' with the barcodes. Default: %default'))
parser.add_option('--png_barcodes_output_dir', type='string', default='.',
help=('Output directory where the PNG barcodes will be '
'generated. Default: %default'))
parser.add_option('--png_barcodes_input_dir', type='string', default='.',
help=('Input directory from where the PNG barcodes will be '
'read. Default: %default'))
parser.add_option('--yuv_barcodes_output_dir', type='string', default='.',
help=('Output directory where the YUV barcodes will be '
'generated. Default: %default'))
parser.add_option('--yuv_frames_input_dir', type='string', default='.',
help=('Input directory from where the YUV will be '
'read before combination. Default: %default'))
parser.add_option('--zxing_dir', type='string', default='zxing',
help=('Path to the Zxing barcodes library. '
'Default: %default'))
options = parser.parse_args()[0]
return options
def main():
"""The main function.
A simple invocation will be:
./webrtc/rtc_tools/barcode_tools/barcode_encoder.py --barcode_height=32
--base_frame_width=352 --base_frame_height=288
--base_yuv=<path_and_name_of_base_file>
--output_yuv=<path and name_of_output_file>
"""
options = _ParseArgs()
# The barcodes with will be different than the base frame width only if
# explicitly specified at the command line.
if options.barcode_width == _DEFAULT_BARCODE_WIDTH:
options.barcode_width = options.base_frame_width
# If the user provides a value for the barcodes YUV video file, we will keep
# it. Otherwise we create a temp file which is removed after it has been used.
keep_barcodes_yuv_file = False
if options.barcodes_yuv != _DEFAULT_BARCODES_FILE:
keep_barcodes_yuv_file = True
# Calculate the number of barcodes - it is equal to the number of frames in
# the base file.
number_of_barcodes = CalculateFramesNumberFromYuv(
options.base_frame_width, options.base_frame_height, options.base_yuv)
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
zxing_dir = os.path.join(script_dir, 'third_party', 'zxing')
# Generate barcodes - will generate them in PNG.
GenerateUpcaBarcodes(number_of_barcodes, options.barcode_width,
options.barcode_height,
output_directory=options.png_barcodes_output_dir,
path_to_zxing=zxing_dir)
# Convert the PNG barcodes to to YUV format.
ConvertPngToYuvBarcodes(options.png_barcodes_input_dir,
options.yuv_barcodes_output_dir)
# Combine the YUV barcodes into one YUV file.
CombineYuvFramesIntoOneFile(options.barcodes_yuv,
input_directory=options.yuv_frames_input_dir)
# Overlay the barcodes over the base file.
OverlayYuvFiles(options.barcode_width, options.barcode_height,
options.base_frame_width, options.base_frame_height,
options.barcodes_yuv, options.base_yuv, options.output_yuv)
if not keep_barcodes_yuv_file:
# Remove the temporary barcodes YUV file
os.remove(options.barcodes_yuv)
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,44 @@
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import os
import subprocess
import sys
def RunAntBuildCommand(path_to_ant_build_file):
"""Tries to build the passed build file with ant."""
ant_executable = 'ant'
if sys.platform == 'win32':
if os.getenv('ANT_HOME'):
ant_executable = os.path.join(os.getenv('ANT_HOME'), 'bin', 'ant.bat')
else:
ant_executable = 'ant.bat'
cmd = [ant_executable, '-buildfile', path_to_ant_build_file]
try:
process = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
process.wait()
if process.returncode != 0:
print >> sys.stderr, 'Failed to execute: %s' % ' '.join(cmd)
return process.returncode
except subprocess.CalledProcessError as e:
print >> sys.stderr, 'Failed to execute: %s.\nCause: %s' % (' '.join(cmd),
e)
return -1
def main():
core_build = os.path.join('third_party', 'zxing', 'core', 'build.xml')
RunAntBuildCommand(core_build)
javase_build = os.path.join('third_party', 'zxing', 'javase', 'build.xml')
return RunAntBuildCommand(javase_build)
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,105 @@
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import multiprocessing
import os
import subprocess
import sys
_DEFAULT_PADDING = 4
class HelperError(Exception):
"""Exception raised for errors in the helper."""
pass
def ZeroPad(number, padding=_DEFAULT_PADDING):
"""Converts an int into a zero padded string.
Args:
number(int): The number to convert.
padding(int): The number of chars in the output. Note that if you pass for
example number=23456 and padding=4, the output will still be '23456',
i.e. it will not be cropped. If you pass number=2 and padding=4, the
return value will be '0002'.
Return:
(string): The zero padded number converted to string.
"""
return str(number).zfill(padding)
def RunShellCommand(cmd_list, fail_msg=None):
"""Executes a command.
Args:
cmd_list(list): Command list to execute.
fail_msg(string): Message describing the error in case the command fails.
Return:
(string): The standard output from running the command.
Raise:
HelperError: If command fails.
"""
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode != 0:
if fail_msg:
print >> sys.stderr, fail_msg
raise HelperError('Failed to run %s: command returned %d and printed '
'%s and %s' % (' '.join(cmd_list), process.returncode,
output, error))
return output.strip()
def PerformActionOnAllFiles(directory, file_pattern, file_extension,
start_number, action, **kwargs):
"""Function that performs a given action on all files matching a pattern.
It is assumed that the files are named file_patternxxxx.file_extension, where
xxxx are digits starting from start_number.
Args:
directory(string): The directory where the files live.
file_pattern(string): The name pattern of the files.
file_extension(string): The files' extension.
start_number(int): From where to start to count frames.
action(function): The action to be performed over the files. Must return
False if the action failed, True otherwise. It should take a file name
as the first argument and **kwargs as arguments. The function must be
possible to pickle, so it cannot be a bound function (for instance).
Return:
(bool): Whether performing the action over all files was successful or not.
"""
file_prefix = os.path.join(directory, file_pattern)
file_number = start_number
process_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
results = []
while True:
zero_padded_file_number = ZeroPad(file_number)
file_name = file_prefix + zero_padded_file_number + '.' + file_extension
if not os.path.isfile(file_name):
break
future = process_pool.apply_async(action, args=(file_name,), kwds=kwargs)
results.append(future)
file_number += 1
successful = True
for result in results:
if not result.get():
print "At least one action %s failed for files %sxxxx.%s." % (
action, file_pattern, file_extension)
successful = False
process_pool.close()
return successful

View File

@ -0,0 +1,125 @@
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import optparse
import os
import sys
def _CropOneFrame(yuv_file, output_file, component_sizes):
"""Crops one frame.
This function crops one frame going through all the YUV planes and cropping
respective amount of rows.
Args:
yuv_file(file): The opened (for binary reading) YUV file.
output_file(file): The opened (for binary writing) file.
component_sizes(list of 3 3-ples): The list contains the sizes for all the
planes (Y, U, V) of the YUV file plus the crop_height scaled for every
plane. The sizes equal width, height and crop_height for the Y plane,
and are equal to width/2, height/2 and crop_height/2 for the U and V
planes.
Return:
(bool): True if there are more frames to crop, False otherwise.
"""
for comp_width, comp_height, comp_crop_height in component_sizes:
for row in range(comp_height):
# Read the plane data for this row.
yuv_plane = yuv_file.read(comp_width)
# If the plane is empty, we have reached the end of the file.
if yuv_plane == "":
return False
# Only write the plane data for the rows bigger than crop_height.
if row >= comp_crop_height:
output_file.write(yuv_plane)
return True
def CropFrames(yuv_file_name, output_file_name, width, height, crop_height):
"""Crops rows of pixels from the top of the YUV frames.
This function goes through all the frames in a video and crops the crop_height
top pixel rows of every frame.
Args:
yuv_file_name(string): The name of the YUV file to be cropped.
output_file_name(string): The name of the output file where the result will
be written.
width(int): The width of the original YUV file.
height(int): The height of the original YUV file.
crop_height(int): The height (the number of pixel rows) to be cropped from
the frames.
"""
# Component sizes = [Y_sizes, U_sizes, V_sizes].
component_sizes = [(width, height, crop_height),
(width/2, height/2, crop_height/2),
(width/2, height/2, crop_height/2)]
yuv_file = open(yuv_file_name, 'rb')
output_file = open(output_file_name, 'wb')
data_left = True
while data_left:
data_left = _CropOneFrame(yuv_file, output_file, component_sizes)
yuv_file.close()
output_file.close()
def _ParseArgs():
"""Registers the command-line options."""
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--width', type='int',
default=352,
help=('Width of the YUV file\'s frames. '
'Default: %default'))
parser.add_option('--height', type='int', default=288,
help=('Height of the YUV file\'s frames. '
'Default: %default'))
parser.add_option('--crop_height', type='int', default=32,
help=('How much of the top of the YUV file to crop. '
'Has to be module of 2. Default: %default'))
parser.add_option('--yuv_file', type='string',
help=('The YUV file to be cropped.'))
parser.add_option('--output_file', type='string', default='output.yuv',
help=('The output YUV file containing the cropped YUV. '
'Default: %default'))
options = parser.parse_args()[0]
if not options.yuv_file:
parser.error('yuv_file argument missing. Please specify input YUV file!')
return options
def main():
"""A tool to crop rows of pixels from the top part of a YUV file.
A simple invocation will be:
./yuv_cropper.py --width=640 --height=480 --crop_height=32
--yuv_file=<path_and_name_of_yuv_file>
--output_yuv=<path and name_of_output_file>
"""
options = _ParseArgs()
if os.path.getsize(options.yuv_file) == 0:
sys.stderr.write('Error: The YUV file you have passed has size 0. The '
'produced output will also have size 0.\n')
return -1
CropFrames(options.yuv_file, options.output_file, options.width,
options.height, options.crop_height)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -9,8 +9,10 @@
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -35,25 +37,40 @@ def _ParseArgs():
parser.add_option('--frame_analyzer', type='string',
help='Path to the frame analyzer executable.')
parser.add_option('--barcode_decoder', type='string',
help=('DEPRECATED'))
help=('Path to the barcode decoder script. By default, we '
'will assume we can find it in barcode_tools/'
'relative to this directory.'))
parser.add_option('--ffmpeg_path', type='string',
help=('DEPRECATED'))
help=('The path to where the ffmpeg executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name ffmpeg[.exe].'))
parser.add_option('--zxing_path', type='string',
help=('DEPRECATED'))
help=('The path to where the zxing executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name zxing[.exe].'))
parser.add_option('--stats_file_ref', type='string', default='stats_ref.txt',
help=('DEPRECATED'))
help=('Path to the temporary stats file to be created and '
'used for the reference video file. '
'Default: %default'))
parser.add_option('--stats_file_test', type='string',
help=('DEPRECATED'))
default='stats_test.txt',
help=('Path to the temporary stats file to be created and '
'used for the test video file. Default: %default'))
parser.add_option('--stats_file', type='string',
help=('DEPRECATED'))
parser.add_option('--yuv_frame_width', type='int', default=640,
help=('DEPRECATED'))
help='Width of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_frame_height', type='int', default=480,
help=('DEPRECATED'))
help='Height of the YUV file\'s frames. Default: %default')
parser.add_option('--chartjson_result_file', type='str', default=None,
help='Where to store perf results in chartjson format.')
options, _ = parser.parse_args()
if options.stats_file:
options.stats_file_test = options.stats_file
print ('WARNING: Using deprecated switch --stats_file. '
'The new flag is --stats_file_test.')
if not options.ref_video:
parser.error('You must provide a path to the reference video!')
if not os.path.exists(options.ref_video):
@ -78,23 +95,73 @@ def _DevNull():
"""
return open(os.devnull, 'r')
def DecodeBarcodesInVideo(options, path_to_decoder, video, stat_file):
# Run barcode decoder on the test video to identify frame numbers.
png_working_directory = tempfile.mkdtemp()
cmd = [
sys.executable,
path_to_decoder,
'--yuv_file=%s' % video,
'--yuv_frame_width=%d' % options.yuv_frame_width,
'--yuv_frame_height=%d' % options.yuv_frame_height,
'--stats_file=%s' % stat_file,
'--png_working_dir=%s' % png_working_directory,
]
if options.zxing_path:
cmd.append('--zxing_path=%s' % options.zxing_path)
if options.ffmpeg_path:
cmd.append('--ffmpeg_path=%s' % options.ffmpeg_path)
barcode_decoder = subprocess.Popen(cmd, stdin=_DevNull(),
stdout=sys.stdout, stderr=sys.stderr)
barcode_decoder.wait()
shutil.rmtree(png_working_directory)
if barcode_decoder.returncode != 0:
print 'Failed to run barcode decoder script.'
return 1
return 0
def main():
"""The main function.
A simple invocation is:
./webrtc/rtc_tools/compare_videos.py
./webrtc/rtc_tools/barcode_tools/compare_videos.py
--ref_video=<path_and_name_of_reference_video>
--test_video=<path_and_name_of_test_video>
--frame_analyzer=<path_and_name_of_the_frame_analyzer_executable>
Notice that the prerequisites for barcode_decoder.py also applies to this
script. The means the following executables have to be available in the PATH:
* zxing
* ffmpeg
"""
options = _ParseArgs()
if options.barcode_decoder:
path_to_decoder = options.barcode_decoder
else:
path_to_decoder = os.path.join(SCRIPT_DIR, 'barcode_tools',
'barcode_decoder.py')
if DecodeBarcodesInVideo(options, path_to_decoder,
options.ref_video, options.stats_file_ref) != 0:
return 1
if DecodeBarcodesInVideo(options, path_to_decoder,
options.test_video, options.stats_file_test) != 0:
return 1
# Run frame analyzer to compare the videos and print output.
cmd = [
options.frame_analyzer,
'--label=%s' % options.label,
'--reference_file=%s' % options.ref_video,
'--test_file=%s' % options.test_video,
'--stats_file_ref=%s' % options.stats_file_ref,
'--stats_file_test=%s' % options.stats_file_test,
'--width=%d' % options.yuv_frame_width,
'--height=%d' % options.yuv_frame_height,
]
if options.chartjson_result_file:
cmd.append('--chartjson_result_file=%s' % options.chartjson_result_file)

View File

@ -16,7 +16,6 @@
#include <vector>
#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
#include "rtc_tools/frame_analyzer/video_temporal_aligner.h"
#include "rtc_tools/simple_command_line_parser.h"
#include "rtc_tools/y4m_file_reader.h"
#include "test/testsupport/perf_test.h"
@ -25,7 +24,12 @@
* A command line tool running PSNR and SSIM on a reference video and a test
* video. The test video is a record of the reference video which can start at
* an arbitrary point. It is possible that there will be repeated frames or
* skipped frames as well. The video files should be 1420 Y4M videos.
* skipped frames as well. In order to have a way to compare corresponding
* frames from the two videos, two stats files should be provided. One for the
* reference video and one for the test video. The stats file
* is a text file assumed to be in the format:
* frame_xxxx yyyy where xxxx is the frame number in and yyyy is the
* corresponding barcode. The video files should be 1420 YUV videos.
* The tool prints the result to standard output in the Chromium perf format:
* RESULT <metric>:<label>= <values>
*
@ -33,7 +37,9 @@
*
* Usage:
* frame_analyzer --label=<test_label> --reference_file=<name_of_file>
* --test_file_ref=<name_of_file>
* --test_file_ref=<name_of_file> --stats_file_test=<name_of_file>
* --stats_file=<name_of_file> --width=<frame_width>
* --height=<frame_height>
*/
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
@ -41,13 +47,24 @@ int main(int argc, char* argv[]) {
"Compares the output video with the initially sent video."
"\nExample usage:\n" +
program_name +
" --reference_file=ref.y4m --test_file=test.y4m\n"
" --reference_file=ref.yuv --test_file=test.yuv --width=320 "
"--height=240\n"
"Command line flags:\n"
" - width(int): The width of the reference and test files. Default: -1\n"
" - height(int): The height of the reference and test files. "
" Default: -1\n"
" - label(string): The label to use for the perf output."
" Default: MY_TEST\n"
" Default: ref.y4m\n"
" - stats_file_ref(string): The path to the stats file that will be"
" produced for the reference video file."
" Default: stats_ref.txt\n"
" - stats_file_test(string): The path to the stats file that will be"
" produced for the test video file."
" Default: stats_test.txt\n"
" - reference_file(string): The reference YUV file to compare against."
" Default: ref.yuv\n"
" - test_file(string): The test YUV file to run the analysis for."
" Default: test_file.y4m\n"
" Default: test_file.yuv\n"
" - chartjson_result_file: Where to store perf result in chartjson"
" format. If not present, no perf result will be stored."
" Default: None\n";
@ -58,9 +75,13 @@ int main(int argc, char* argv[]) {
parser.Init(argc, argv);
parser.SetUsageMessage(usage);
parser.SetFlag("width", "-1");
parser.SetFlag("height", "-1");
parser.SetFlag("label", "MY_TEST");
parser.SetFlag("reference_file", "ref.y4m");
parser.SetFlag("test_file", "test.y4m");
parser.SetFlag("stats_file_ref", "stats_ref.txt");
parser.SetFlag("stats_file_test", "stats_test.txt");
parser.SetFlag("reference_file", "ref.yuv");
parser.SetFlag("test_file", "test.yuv");
parser.SetFlag("chartjson_result_file", "");
parser.SetFlag("help", "false");
@ -71,6 +92,14 @@ int main(int argc, char* argv[]) {
}
parser.PrintEnteredFlags();
int width = strtol((parser.GetFlag("width")).c_str(), NULL, 10);
int height = strtol((parser.GetFlag("height")).c_str(), NULL, 10);
if (width <= 0 || height <= 0) {
fprintf(stderr, "Error: width or height cannot be <= 0!\n");
return -1;
}
webrtc::test::ResultsContainer results;
rtc::scoped_refptr<webrtc::test::Y4mFile> reference_video =
@ -83,20 +112,12 @@ int main(int argc, char* argv[]) {
return 0;
}
const std::vector<size_t> matching_indices =
webrtc::test::FindMatchingFrameIndices(reference_video, test_video);
results.frames =
webrtc::test::RunAnalysis(reference_video, test_video, matching_indices);
const std::vector<webrtc::test::Cluster> clusters =
webrtc::test::CalculateFrameClusters(matching_indices);
results.max_repeated_frames = webrtc::test::GetMaxRepeatedFrames(clusters);
results.max_skipped_frames = webrtc::test::GetMaxSkippedFrames(clusters);
results.total_skipped_frames =
webrtc::test::GetTotalNumberOfSkippedFrames(clusters);
results.decode_errors_ref = 0;
results.decode_errors_test = 0;
webrtc::test::RunAnalysis(
reference_video, test_video, parser.GetFlag("stats_file_ref").c_str(),
parser.GetFlag("stats_file_test").c_str(), width, height, &results);
webrtc::test::GetMaxRepeatedAndSkippedFrames(
parser.GetFlag("stats_file_ref"), parser.GetFlag("stats_file_test"),
&results);
webrtc::test::PrintAnalysisResults(parser.GetFlag("label"), &results);

View File

@ -10,19 +10,87 @@
#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <numeric>
#include <map>
#include <string>
#include <utility>
#include "test/testsupport/perf_test.h"
#include "third_party/libyuv/include/libyuv/compare.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#define STATS_LINE_LENGTH 32
namespace webrtc {
namespace test {
ResultsContainer::ResultsContainer() {}
ResultsContainer::~ResultsContainer() {}
int GetI420FrameSize(int width, int height) {
int half_width = (width + 1) >> 1;
int half_height = (height + 1) >> 1;
int y_plane = width * height; // I420 Y plane.
int u_plane = half_width * half_height; // I420 U plane.
int v_plane = half_width * half_height; // I420 V plane.
return y_plane + u_plane + v_plane;
}
int ExtractFrameSequenceNumber(std::string line) {
size_t space_position = line.find(' ');
if (space_position == std::string::npos) {
return -1;
}
std::string frame = line.substr(0, space_position);
size_t underscore_position = frame.find('_');
if (underscore_position == std::string::npos) {
return -1;
}
std::string frame_number = frame.substr(underscore_position + 1);
return strtol(frame_number.c_str(), NULL, 10);
}
int ExtractDecodedFrameNumber(std::string line) {
size_t space_position = line.find(' ');
if (space_position == std::string::npos) {
return -1;
}
std::string decoded_number = line.substr(space_position + 1);
return strtol(decoded_number.c_str(), NULL, 10);
}
bool IsThereBarcodeError(std::string line) {
size_t barcode_error_position = line.find("Barcode error");
if (barcode_error_position != std::string::npos) {
return true;
}
return false;
}
bool GetNextStatsLine(FILE* stats_file, char* line) {
int chars = 0;
char buf = 0;
while (buf != '\n') {
size_t chars_read = fread(&buf, 1, 1, stats_file);
if (chars_read != 1 || feof(stats_file)) {
return false;
}
line[chars] = buf;
++chars;
}
line[chars - 1] = '\0'; // Strip the trailing \n and put end of string.
return true;
}
template <typename FrameMetricFunction>
static double CalculateMetric(
const FrameMetricFunction& frame_metric_function,
@ -51,69 +119,225 @@ double Ssim(const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
return CalculateMetric(&libyuv::I420Ssim, ref_buffer, test_buffer);
}
std::vector<AnalysisResult> RunAnalysis(
const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
const rtc::scoped_refptr<webrtc::test::Video>& test_video,
const std::vector<size_t>& test_frame_indices) {
std::vector<AnalysisResult> results;
for (size_t i = 0; i < test_frame_indices.size(); ++i) {
// Ignore duplicated frames in the test video.
if (i > 0 && test_frame_indices[i] == test_frame_indices[i - 1])
continue;
void RunAnalysis(const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
const rtc::scoped_refptr<webrtc::test::Video>& test_video,
const char* stats_file_reference_name,
const char* stats_file_test_name,
int width,
int height,
ResultsContainer* results) {
FILE* stats_file_ref = fopen(stats_file_reference_name, "r");
FILE* stats_file_test = fopen(stats_file_test_name, "r");
const rtc::scoped_refptr<I420BufferInterface>& test_frame =
test_video->GetFrame(i);
const rtc::scoped_refptr<I420BufferInterface>& reference_frame =
reference_video->GetFrame(test_frame_indices[i] %
reference_video->number_of_frames());
// String buffer for the lines in the stats file.
char line[STATS_LINE_LENGTH];
int previous_frame_number = -1;
// Maps barcode id to the frame id for the reference video.
// In case two frames have same id, then we only save the first one.
std::map<int, int> ref_barcode_to_frame;
// While there are entries in the stats file.
while (GetNextStatsLine(stats_file_ref, line)) {
int extracted_ref_frame = ExtractFrameSequenceNumber(line);
int decoded_frame_number = ExtractDecodedFrameNumber(line);
// Insert will only add if it is not in map already.
ref_barcode_to_frame.insert(
std::make_pair(decoded_frame_number, extracted_ref_frame));
}
while (GetNextStatsLine(stats_file_test, line)) {
int extracted_test_frame = ExtractFrameSequenceNumber(line);
int decoded_frame_number = ExtractDecodedFrameNumber(line);
auto it = ref_barcode_to_frame.find(decoded_frame_number);
if (it == ref_barcode_to_frame.end()) {
// Not found in the reference video.
// TODO(mandermo) print
continue;
}
int extracted_ref_frame = it->second;
// If there was problem decoding the barcode in this frame or the frame has
// been duplicated, continue.
if (IsThereBarcodeError(line) ||
decoded_frame_number == previous_frame_number) {
continue;
}
assert(extracted_test_frame != -1);
assert(decoded_frame_number != -1);
const rtc::scoped_refptr<webrtc::I420BufferInterface> test_frame =
test_video->GetFrame(extracted_test_frame);
const rtc::scoped_refptr<webrtc::I420BufferInterface> reference_frame =
reference_video->GetFrame(extracted_ref_frame);
// Calculate the PSNR and SSIM.
double result_psnr = Psnr(reference_frame, test_frame);
double result_ssim = Ssim(reference_frame, test_frame);
previous_frame_number = decoded_frame_number;
// Fill in the result struct.
AnalysisResult result;
result.frame_number = test_frame_indices[i];
result.psnr_value = Psnr(reference_frame, test_frame);
result.ssim_value = Ssim(reference_frame, test_frame);
results.push_back(result);
result.frame_number = decoded_frame_number;
result.psnr_value = result_psnr;
result.ssim_value = result_ssim;
results->frames.push_back(result);
}
return results;
// Cleanup.
fclose(stats_file_ref);
fclose(stats_file_test);
}
std::vector<Cluster> CalculateFrameClusters(
const std::vector<size_t>& indices) {
std::vector<Cluster> clusters;
for (size_t i = 0; i < indices.size(); ++i) {
const size_t start_index = i;
while (i < indices.size() && indices[i] == indices[start_index])
++i;
const int number_of_repeated_frames = static_cast<int>(i - start_index);
clusters.push_back({indices[start_index], number_of_repeated_frames});
std::vector<std::pair<int, int> > CalculateFrameClusters(
FILE* file,
int* num_decode_errors) {
if (num_decode_errors) {
*num_decode_errors = 0;
}
return clusters;
}
int GetMaxRepeatedFrames(const std::vector<Cluster>& clusters) {
int max_number_of_repeated_frames = 0;
for (const Cluster& cluster : clusters) {
max_number_of_repeated_frames = std::max(max_number_of_repeated_frames,
cluster.number_of_repeated_frames);
std::vector<std::pair<int, int> > frame_cnt;
char line[STATS_LINE_LENGTH];
while (GetNextStatsLine(file, line)) {
int decoded_frame_number;
if (IsThereBarcodeError(line)) {
decoded_frame_number = DECODE_ERROR;
if (num_decode_errors) {
++*num_decode_errors;
}
} else {
decoded_frame_number = ExtractDecodedFrameNumber(line);
}
if (frame_cnt.size() >= 2 && decoded_frame_number != DECODE_ERROR &&
frame_cnt.back().first == DECODE_ERROR &&
frame_cnt[frame_cnt.size() - 2].first == decoded_frame_number) {
// Handle when there is a decoding error inside a cluster of frames.
frame_cnt[frame_cnt.size() - 2].second += frame_cnt.back().second + 1;
frame_cnt.pop_back();
} else if (frame_cnt.empty() ||
frame_cnt.back().first != decoded_frame_number) {
frame_cnt.push_back(std::make_pair(decoded_frame_number, 1));
} else {
++frame_cnt.back().second;
}
}
return max_number_of_repeated_frames;
return frame_cnt;
}
int GetMaxSkippedFrames(const std::vector<Cluster>& clusters) {
size_t max_skipped_frames = 0;
for (size_t i = 1; i < clusters.size(); ++i) {
const size_t skipped_frames = clusters[i].index - clusters[i - 1].index - 1;
max_skipped_frames = std::max(max_skipped_frames, skipped_frames);
void GetMaxRepeatedAndSkippedFrames(const std::string& stats_file_ref_name,
const std::string& stats_file_test_name,
ResultsContainer* results) {
FILE* stats_file_ref = fopen(stats_file_ref_name.c_str(), "r");
FILE* stats_file_test = fopen(stats_file_test_name.c_str(), "r");
if (stats_file_ref == NULL) {
fprintf(stderr, "Couldn't open reference stats file for reading: %s\n",
stats_file_ref_name.c_str());
return;
}
if (stats_file_test == NULL) {
fprintf(stderr, "Couldn't open test stats file for reading: %s\n",
stats_file_test_name.c_str());
fclose(stats_file_ref);
return;
}
return static_cast<int>(max_skipped_frames);
}
int GetTotalNumberOfSkippedFrames(const std::vector<Cluster>& clusters) {
// The number of reference frames the test video spans.
const size_t number_ref_frames =
clusters.empty() ? 0 : 1 + clusters.back().index - clusters.front().index;
return static_cast<int>(number_ref_frames - clusters.size());
int max_repeated_frames = 1;
int max_skipped_frames = 0;
int decode_errors_ref = 0;
int decode_errors_test = 0;
std::vector<std::pair<int, int> > frame_cnt_ref =
CalculateFrameClusters(stats_file_ref, &decode_errors_ref);
std::vector<std::pair<int, int> > frame_cnt_test =
CalculateFrameClusters(stats_file_test, &decode_errors_test);
fclose(stats_file_ref);
fclose(stats_file_test);
auto it_ref = frame_cnt_ref.begin();
auto it_test = frame_cnt_test.begin();
auto end_ref = frame_cnt_ref.end();
auto end_test = frame_cnt_test.end();
if (it_test == end_test || it_ref == end_ref) {
fprintf(stderr, "Either test or ref file is empty, nothing to print\n");
return;
}
while (it_test != end_test && it_test->first == DECODE_ERROR) {
++it_test;
}
if (it_test == end_test) {
fprintf(stderr, "Test video only has barcode decode errors\n");
return;
}
// Find the first frame in the reference video that match the first frame in
// the test video.
while (it_ref != end_ref &&
(it_ref->first == DECODE_ERROR || it_ref->first != it_test->first)) {
++it_ref;
}
if (it_ref == end_ref) {
fprintf(stderr,
"The barcode in the test video's first frame is not in the "
"reference video.\n");
return;
}
int total_skipped_frames = 0;
for (;;) {
max_repeated_frames =
std::max(max_repeated_frames, it_test->second - it_ref->second + 1);
bool passed_error = false;
++it_test;
while (it_test != end_test && it_test->first == DECODE_ERROR) {
++it_test;
passed_error = true;
}
if (it_test == end_test) {
break;
}
int skipped_frames = 0;
++it_ref;
for (; it_ref != end_ref; ++it_ref) {
if (it_ref->first != DECODE_ERROR && it_ref->first >= it_test->first) {
break;
}
++skipped_frames;
}
if (passed_error) {
// If we pass an error in the test video, then we are conservative
// and will not calculate skipped frames for that part.
skipped_frames = 0;
}
if (it_ref != end_ref && it_ref->first == it_test->first) {
total_skipped_frames += skipped_frames;
if (skipped_frames > max_skipped_frames) {
max_skipped_frames = skipped_frames;
}
continue;
}
fprintf(stdout,
"Found barcode %d in test video, which is not in reference video\n",
it_test->first);
break;
}
results->max_repeated_frames = max_repeated_frames;
results->max_skipped_frames = max_skipped_frames;
results->total_skipped_frames = total_skipped_frames;
results->decode_errors_ref = decode_errors_ref;
results->decode_errors_test = decode_errors_test;
}
void PrintAnalysisResults(const std::string& label, ResultsContainer* results) {

View File

@ -47,12 +47,25 @@ struct ResultsContainer {
// A function to run the PSNR and SSIM analysis on the test file. The test file
// comprises the frames that were captured during the quality measurement test.
// There may be missing or duplicate frames. Also the frames start at a random
// position in the original video. We also need to provide a map from test frame
// indices to reference frame indices.
std::vector<AnalysisResult> RunAnalysis(
const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
const rtc::scoped_refptr<webrtc::test::Video>& test_video,
const std::vector<size_t>& test_frame_indices);
// position in the original video. We should provide a statistics file along
// with the test video. The stats file contains the connection between the
// actual frames in the test file and their bar code number. There is one file
// for the reference video and one for the test video. The stats file should
// be in the form 'frame_xxxx yyyy', where xxxx is the consecutive
// number of the frame in the test video, and yyyy is the barcode number.
// The stats file could be produced by
// tools/barcode_tools/barcode_decoder.py. This script decodes the barcodes
// integrated in every video and generates the stats file. If three was some
// problem with the decoding there would be 'Barcode error' instead of yyyy.
// The stat files are used to compare the right frames with each other and
// to calculate statistics.
void RunAnalysis(const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
const rtc::scoped_refptr<webrtc::test::Video>& test_video,
const char* stats_file_reference_name,
const char* stats_file_test_name,
int width,
int height,
ResultsContainer* results);
// Compute PSNR for an I420 buffer (all planes). The max return value (in the
// case where the test and reference frames are exactly the same) will be 48.
@ -74,28 +87,45 @@ void PrintAnalysisResults(FILE* output,
const std::string& label,
ResultsContainer* results);
struct Cluster {
// Corresponding reference frame index for this cluster.
size_t index;
// The number of sequential frames that mapped to the same reference frame
// index.
int number_of_repeated_frames;
};
// The barcode number that means that the barcode could not be decoded.
const int DECODE_ERROR = -1;
// Clusters sequentially repeated frames. For example, the sequence {100, 102,
// 102, 103} will be mapped to {{100, 1}, {102, 2}, {103, 1}}.
std::vector<Cluster> CalculateFrameClusters(const std::vector<size_t>& indices);
// Clusters the frames in the file. First in the pair is the frame number and
// second is the number of frames in that cluster. So if first frame in video
// has number 100 and it is repeated 3 after each other, then the first entry
// in the returned vector has first set to 100 and second set to 3.
// Decode errors between two frames with same barcode, then it interprets
// the frame with the decode error as having the same id as the two frames
// around it. Eg. [400, DECODE_ERROR, DECODE_ERROR, 400] is becomes an entry
// in return vector with first==400 and second==4. In other cases with decode
// errors like [400, DECODE_ERROR, 401] becomes three entries, each with
// second==1 and the middle has first==DECODE_ERROR.
std::vector<std::pair<int, int> > CalculateFrameClusters(
FILE* file,
int* num_decode_errors);
// Get number of max sequentially repeated frames in the test video. This number
// will be one if we only store unique frames in the test video.
int GetMaxRepeatedFrames(const std::vector<Cluster>& clusters);
// Calculates max repeated and skipped frames and prints them to stdout in a
// format that is compatible with Chromium performance numbers.
void GetMaxRepeatedAndSkippedFrames(const std::string& stats_file_ref_name,
const std::string& stats_file_test_name,
ResultsContainer* results);
// Get the longest sequence of skipped reference frames. This corresponds to the
// longest freeze in the test video.
int GetMaxSkippedFrames(const std::vector<Cluster>& clusters);
// Gets the next line from an open stats file.
bool GetNextStatsLine(FILE* stats_file, char* line);
// Get total number of skipped frames in the test video.
int GetTotalNumberOfSkippedFrames(const std::vector<Cluster>& clusters);
// Calculates the size of a I420 frame if given the width and height.
int GetI420FrameSize(int width, int height);
// Extract the sequence of the frame in the video. I.e. if line is
// frame_0023 0284, we will get 23.
int ExtractFrameSequenceNumber(std::string line);
// Checks if there is 'Barcode error' for the given line.
bool IsThereBarcodeError(std::string line);
// Extract the frame number in the reference video. I.e. if line is
// frame_0023 0284, we will get 284.
int ExtractDecodedFrameNumber(std::string line);
} // namespace test
} // namespace webrtc

View File

@ -31,9 +31,14 @@ class VideoQualityAnalysisTest : public ::testing::Test {
"VideoQualityAnalysisTest.log");
logfile_ = fopen(log_filename.c_str(), "w");
ASSERT_TRUE(logfile_ != NULL);
stats_filename_ref_ = TempFilename(OutputPath(), "stats-1.txt");
stats_filename_ = TempFilename(OutputPath(), "stats-2.txt");
}
void TearDown() { ASSERT_EQ(0, fclose(logfile_)); }
FILE* logfile_;
std::string stats_filename_ref_;
std::string stats_filename_;
};
TEST_F(VideoQualityAnalysisTest, PrintAnalysisResultsEmpty) {
@ -55,6 +60,46 @@ TEST_F(VideoQualityAnalysisTest, PrintAnalysisResultsThreeFrames) {
PrintAnalysisResults(logfile_, "ThreeFrames", &result);
}
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedAndSkippedFramesInvalidFile) {
ResultsContainer result;
remove(stats_filename_.c_str());
GetMaxRepeatedAndSkippedFrames(stats_filename_ref_, stats_filename_, &result);
}
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedAndSkippedFramesEmptyStatsFile) {
ResultsContainer result;
std::ofstream stats_file;
stats_file.open(stats_filename_ref_.c_str());
stats_file.close();
stats_file.open(stats_filename_.c_str());
stats_file.close();
GetMaxRepeatedAndSkippedFrames(stats_filename_ref_, stats_filename_, &result);
}
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedAndSkippedFramesNormalFile) {
ResultsContainer result;
std::ofstream stats_file;
stats_file.open(stats_filename_ref_.c_str());
stats_file << "frame_0001 0100\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0102\n";
stats_file << "frame_0004 0103\n";
stats_file << "frame_0005 0106\n";
stats_file << "frame_0006 0107\n";
stats_file << "frame_0007 0108\n";
stats_file.close();
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0100\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0101\n";
stats_file << "frame_0004 0106\n";
stats_file.close();
GetMaxRepeatedAndSkippedFrames(stats_filename_ref_, stats_filename_, &result);
}
namespace {
void VerifyLogOutput(const std::string& log_filename,
const std::vector<std::string>& expected_out) {
@ -72,18 +117,35 @@ void VerifyLogOutput(const std::string& log_filename,
TEST_F(VideoQualityAnalysisTest,
PrintMaxRepeatedAndSkippedFramesSkippedFrames) {
ResultsContainer result;
std::ofstream stats_file;
std::string log_filename =
TempFilename(webrtc::test::OutputPath(), "log.log");
FILE* logfile = fopen(log_filename.c_str(), "w");
ASSERT_TRUE(logfile != NULL);
stats_file.open(stats_filename_ref_.c_str());
stats_file << "frame_0001 0100\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0103\n";
stats_file << "frame_0004 0103\n";
stats_file << "frame_0005 0106\n";
stats_file << "frame_0006 0106\n";
stats_file << "frame_0007 0108\n";
stats_file << "frame_0008 0110\n";
stats_file << "frame_0009 0112\n";
stats_file.close();
result.max_repeated_frames = 2;
result.max_skipped_frames = 2;
result.total_skipped_frames = 3;
result.decode_errors_ref = 0;
result.decode_errors_test = 0;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0101\n";
stats_file << "frame_0004 0108\n";
stats_file << "frame_0005 0108\n";
stats_file << "frame_0006 0112\n";
stats_file.close();
GetMaxRepeatedAndSkippedFrames(stats_filename_ref_, stats_filename_, &result);
PrintAnalysisResults(logfile, "NormalStatsFile", &result);
ASSERT_EQ(0, fclose(logfile));
@ -99,17 +161,35 @@ TEST_F(VideoQualityAnalysisTest,
TEST_F(VideoQualityAnalysisTest,
PrintMaxRepeatedAndSkippedFramesDecodeErrorInTest) {
ResultsContainer result;
std::ofstream stats_file;
std::string log_filename =
TempFilename(webrtc::test::OutputPath(), "log.log");
FILE* logfile = fopen(log_filename.c_str(), "w");
ASSERT_TRUE(logfile != NULL);
stats_file.open(stats_filename_ref_.c_str());
stats_file << "frame_0001 0100\n";
stats_file << "frame_0002 0100\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0103\n";
stats_file << "frame_0004 0103\n";
stats_file << "frame_0005 0106\n";
stats_file << "frame_0006 0107\n";
stats_file << "frame_0007 0107\n";
stats_file << "frame_0008 0110\n";
stats_file << "frame_0009 0112\n";
stats_file.close();
result.max_repeated_frames = 1;
result.max_skipped_frames = 0;
result.total_skipped_frames = 0;
result.decode_errors_ref = 0;
result.decode_errors_test = 3;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file << "frame_0002 Barcode error\n";
stats_file << "frame_0003 Barcode error\n";
stats_file << "frame_0004 Barcode error\n";
stats_file << "frame_0005 0107\n";
stats_file << "frame_0006 0110\n";
stats_file.close();
GetMaxRepeatedAndSkippedFrames(stats_filename_ref_, stats_filename_, &result);
PrintAnalysisResults(logfile, "NormalStatsFile", &result);
ASSERT_EQ(0, fclose(logfile));
@ -122,42 +202,114 @@ TEST_F(VideoQualityAnalysisTest,
VerifyLogOutput(log_filename, expected_out);
}
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedFramesOneValue) {
EXPECT_EQ(1, GetMaxRepeatedFrames(CalculateFrameClusters({1})));
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersOneValue) {
std::ofstream stats_file;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file.close();
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected = {std::make_pair(101, 1)};
ASSERT_EQ(expected, clusters);
}
TEST_F(VideoQualityAnalysisTest, GetMaxSkippedFramesOneValue) {
EXPECT_EQ(0, GetMaxSkippedFrames(CalculateFrameClusters({1})));
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersOneOneTwo) {
std::ofstream stats_file;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 0102\n";
stats_file.close();
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected = {std::make_pair(101, 2),
std::make_pair(102, 1)};
ASSERT_EQ(expected, clusters);
}
TEST_F(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesOneValue) {
EXPECT_EQ(0, GetTotalNumberOfSkippedFrames(CalculateFrameClusters({1})));
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersOneOneErrErrThree) {
std::ofstream stats_file;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 Barcode error\n";
stats_file << "frame_0004 Barcode error\n";
stats_file << "frame_0005 0103\n";
stats_file.close();
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected = {std::make_pair(101, 2),
std::make_pair(DECODE_ERROR, 2),
std::make_pair(103, 1)};
ASSERT_EQ(expected, clusters);
}
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedFramesOneOneTwo) {
EXPECT_EQ(2, GetMaxRepeatedFrames(CalculateFrameClusters({1, 1, 2})));
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersErrErr) {
std::ofstream stats_file;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 Barcode error\n";
stats_file << "frame_0002 Barcode error\n";
stats_file.close();
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected = {std::make_pair(DECODE_ERROR, 2)};
ASSERT_EQ(expected, clusters);
}
TEST_F(VideoQualityAnalysisTest, GetMaxSkippedFramesOneOneTwo) {
EXPECT_EQ(0, GetMaxSkippedFrames(CalculateFrameClusters({1, 1, 2})));
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersOneOneErrErrOneOne) {
std::ofstream stats_file;
stats_file.open(stats_filename_.c_str());
stats_file << "frame_0001 0101\n";
stats_file << "frame_0002 0101\n";
stats_file << "frame_0003 Barcode error\n";
stats_file << "frame_0004 Barcode error\n";
stats_file << "frame_0005 0101\n";
stats_file << "frame_0006 0101\n";
stats_file.close();
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected = {std::make_pair(101, 6)};
ASSERT_EQ(expected, clusters);
}
TEST_F(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesOneOneTwo) {
EXPECT_EQ(0,
GetTotalNumberOfSkippedFrames(CalculateFrameClusters({1, 1, 2})));
}
TEST_F(VideoQualityAnalysisTest, CalculateFrameClustersEmpty) {
std::ofstream stats_file;
TEST_F(VideoQualityAnalysisTest, GetMaxRepeatedFramesEmpty) {
EXPECT_EQ(0, GetMaxRepeatedFrames({}));
}
stats_file.open(stats_filename_.c_str());
stats_file.close();
TEST_F(VideoQualityAnalysisTest, GetMaxSkippedFramesEmpty) {
EXPECT_EQ(0, GetMaxSkippedFrames({}));
}
FILE* stats_filef = fopen(stats_filename_.c_str(), "r");
ASSERT_TRUE(stats_filef != NULL);
TEST_F(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesEmpty) {
EXPECT_EQ(0, GetTotalNumberOfSkippedFrames({}));
auto clusters = CalculateFrameClusters(stats_filef, nullptr);
ASSERT_EQ(0, fclose(stats_filef));
decltype(clusters) expected;
ASSERT_EQ(expected, clusters);
}
} // namespace test
} // namespace webrtc

View File

@ -79,7 +79,7 @@ def _ParseArgs():
help='Path to the frame analyzer executable.'
'Default: %default')
parser.add_option('--zxing_path', type='string',
help='DEPRECATED.')
help='Path to the zebra xing barcode analyzer.')
parser.add_option('--ref_rec_dir', type='string', default='ref',
help='Path to where reference recordings will be created.'
'Ideally keep the ref and test directories on separate'
@ -118,6 +118,8 @@ def _ParseArgs():
'generated!')
if not os.path.isfile(options.frame_analyzer):
parser.warning('Cannot find frame_analyzer, no metrics will be generated!')
if not os.path.isfile(options.zxing_path):
parser.warning('Cannot find Zebra Xing, no metrics will be generated!')
return options
@ -428,11 +430,26 @@ def CompareVideos(options, cropped_ref_file, cropped_test_file):
result_file_name = os.path.join(rec_path, '%s_%s_result.txt') % (
options.app_name, CURRENT_TIME)
# Find the crop dimensions (e.g. 950 and 420) in the ref crop parameter
# string: 'hflip, crop=950:420:130:56'
for param in options.ref_crop_parameters.split('crop'):
if param[0] == '=':
crop_width = int(param.split(':')[0].split('=')[1])
crop_height = int(param.split(':')[1])
compare_cmd = [
compare_videos_script,
'--ref_video=%s' % cropped_ref_file,
'--test_video=%s' % cropped_test_file,
'--frame_analyzer=%s' % os.path.abspath(options.frame_analyzer),
'--zxing_path=%s' % options.zxing_path,
'--ffmpeg_path=%s' % options.ffmpeg,
'--stats_file_ref=%s_stats.txt' %
os.path.join(os.path.dirname(cropped_ref_file), cropped_ref_file),
'--stats_file_test=%s_stats.txt' %
os.path.join(os.path.dirname(cropped_test_file), cropped_test_file),
'--yuv_frame_height=%d' % crop_height,
'--yuv_frame_width=%d' % crop_width
]
with open(result_file_name, 'w') as f:
@ -455,6 +472,7 @@ def main():
--app_name AppRTCMobile \
--ffmpeg ./ffmpeg --ref_video_device=/dev/video0 \
--test_video_device=/dev/video1 \
--zxing_path ./zxing \
--test_crop_parameters 'crop=950:420:130:56' \
--ref_crop_parameters 'hflip, crop=950:420:130:56' \
--ref_rec_dir /tmp/ref \