Print per-frame VMAF score instead of average.

compare_videos.py will now print the VMAF score for each frame.
The CL also removes some stale comments.

Bug: webrtc:9642
Change-Id: I5623588580dea06dd487d7763dc3a2511bd2cd3c
Reviewed-on: https://webrtc-review.googlesource.com/c/105103
Commit-Queue: Paulina Hensman <phensman@webrtc.org>
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Reviewed-by: Sami Kalliomäki <sakal@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#25092}
This commit is contained in:
Paulina Hensman
2018-10-10 15:48:30 +02:00
committed by Commit Bot
parent b3b017950a
commit ede87964ba

View File

@ -7,6 +7,7 @@
# in the file PATENTS. All contributing project authors may # in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree. # be found in the AUTHORS file in the root of the source tree.
import json
import optparse import optparse
import os import os
import shutil import shutil
@ -162,12 +163,9 @@ def _RunFrameAnalyzer(options, yuv_directory=None):
return frame_analyzer.returncode return frame_analyzer.returncode
def _RunVmaf(options, yuv_directory): def _RunVmaf(options, yuv_directory, logfile):
""" Run VMAF to compare videos and print output. """ Run VMAF to compare videos and print output.
The provided vmaf directory is assumed to contain a c++ wrapper executable
and a model.
The yuv_directory is assumed to have been populated with a reference and test The yuv_directory is assumed to have been populated with a reference and test
video in .yuv format, with names according to the label. video in .yuv format, with names according to the label.
""" """
@ -179,26 +177,29 @@ def _RunVmaf(options, yuv_directory):
os.path.join(yuv_directory, "ref.yuv"), os.path.join(yuv_directory, "ref.yuv"),
os.path.join(yuv_directory, "test.yuv"), os.path.join(yuv_directory, "test.yuv"),
options.vmaf_model, options.vmaf_model,
'--log',
logfile,
'--log-fmt',
'json',
] ]
if options.vmaf_phone_model: if options.vmaf_phone_model:
cmd.append('--phone-model') cmd.append('--phone-model')
vmaf = subprocess.Popen(cmd, stdin=_DevNull(), vmaf = subprocess.Popen(cmd, stdin=_DevNull(),
stdout=subprocess.PIPE, stderr=sys.stderr) stdout=sys.stdout, stderr=sys.stderr)
vmaf.wait() vmaf.wait()
if vmaf.returncode != 0: if vmaf.returncode != 0:
print 'Failed to run VMAF.' print 'Failed to run VMAF.'
return 1 return 1
output = vmaf.stdout.read()
# Extract score from VMAF output.
try:
score = float(output.split('\n')[2].split()[3])
except (ValueError, IndexError):
print 'Error in VMAF output (expected "VMAF score = [float]" on line 3):'
print output
return 1
print 'RESULT Vmaf: %s= %f' % (options.label, score) # Read per-frame scores from VMAF output and print.
with open(logfile) as f:
vmaf_data = json.load(f)
vmaf_scores = []
for frame in vmaf_data['frames']:
vmaf_scores.append(frame['metrics']['vmaf'])
print 'RESULT VMAF: %s=' % options.label, vmaf_scores
return 0 return 0
@ -238,15 +239,17 @@ def main():
try: try:
# Directory to save temporary YUV files for VMAF in frame_analyzer. # Directory to save temporary YUV files for VMAF in frame_analyzer.
yuv_directory = tempfile.mkdtemp() yuv_directory = tempfile.mkdtemp()
_, vmaf_logfile = tempfile.mkstemp()
# Run frame analyzer to compare the videos and print output. # Run frame analyzer to compare the videos and print output.
if _RunFrameAnalyzer(options, yuv_directory=yuv_directory) != 0: if _RunFrameAnalyzer(options, yuv_directory=yuv_directory) != 0:
return 1 return 1
# Run VMAF for further video comparison and print output. # Run VMAF for further video comparison and print output.
return _RunVmaf(options, yuv_directory) return _RunVmaf(options, yuv_directory, vmaf_logfile)
finally: finally:
shutil.rmtree(yuv_directory) shutil.rmtree(yuv_directory)
os.remove(vmaf_logfile)
else: else:
return _RunFrameAnalyzer(options) return _RunFrameAnalyzer(options)