Reformat python files checked by pylint (part 1/2).

After recently changing .pylintrc (see [1]) we discovered that
the presubmit check always checks all the python files when just
one python file gets updated.

This CL moves all these files one step closer to what the linter
wants.

Autogenerated with:

# Added all the files under pylint control to ~/Desktop/to-reformat
cat ~/Desktop/to-reformat | xargs sed -i '1i\\'
git cl format --python --full

This is part 1 out of 2. The second part will fix function names and
will not be automated.

[1] - https://webrtc-review.googlesource.com/c/src/+/186664

No-Presubmit: True
Bug: webrtc:12114
Change-Id: Idfec4d759f209a2090440d0af2413a1ddc01b841
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/190980
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32530}
This commit is contained in:
Mirko Bonadei
2020-10-30 10:13:45 +01:00
committed by Commit Bot
parent d3a3e9ef36
commit 8cc6695652
93 changed files with 9936 additions and 9285 deletions

View File

@ -100,8 +100,7 @@ API_DIRS = NATIVE_API_DIRS[:] + LEGACY_API_DIRS[:]
TARGET_RE = re.compile(
r'(?P<indent>\s*)(?P<target_type>\w+)\("(?P<target_name>\w+)"\) {'
r'(?P<target_contents>.*?)'
r'(?P=indent)}',
re.MULTILINE | re.DOTALL)
r'(?P=indent)}', re.MULTILINE | re.DOTALL)
# SOURCES_RE matches a block of sources inside a GN target.
SOURCES_RE = re.compile(r'sources \+?= \[(?P<sources>.*?)\]',
@ -139,16 +138,18 @@ def VerifyNativeApiHeadersListIsValid(input_api, output_api):
non_existing_paths = []
native_api_full_paths = [
input_api.os_path.join(input_api.PresubmitLocalPath(),
*path.split('/')) for path in API_DIRS]
*path.split('/')) for path in API_DIRS
]
for path in native_api_full_paths:
if not os.path.isdir(path):
non_existing_paths.append(path)
if non_existing_paths:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Directories to native API headers have changed which has made the '
'list in PRESUBMIT.py outdated.\nPlease update it to the current '
'location of our native APIs.',
non_existing_paths)]
'location of our native APIs.', non_existing_paths)
]
return []
@ -194,14 +195,13 @@ def CheckNativeApiHeaderChanges(input_api, output_api):
return []
def CheckNoIOStreamInHeaders(input_api, output_api,
source_file_filter):
def CheckNoIOStreamInHeaders(input_api, output_api, source_file_filter):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
file_filter = lambda x: (input_api.FilterSourceFile(x)
and source_file_filter(x))
file_filter = lambda x: (input_api.FilterSourceFile(x) and
source_file_filter(x))
for f in input_api.AffectedSourceFiles(file_filter):
if not f.LocalPath().endswith('.h'):
continue
@ -210,22 +210,22 @@ def CheckNoIOStreamInHeaders(input_api, output_api,
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static ' +
'initialization into every file including the header. Instead, ' +
'#include <ostream>. See http://crbug.com/94794',
files)]
return [
output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
+
'initialization into every file including the header. Instead, '
+ '#include <ostream>. See http://crbug.com/94794', files)
]
return []
def CheckNoPragmaOnce(input_api, output_api,
source_file_filter):
def CheckNoPragmaOnce(input_api, output_api, source_file_filter):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
file_filter = lambda x: (input_api.FilterSourceFile(x)
and source_file_filter(x))
pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE)
file_filter = lambda x: (input_api.FilterSourceFile(x) and
source_file_filter(x))
for f in input_api.AffectedSourceFiles(file_filter):
if not f.LocalPath().endswith('.h'):
continue
@ -234,22 +234,26 @@ def CheckNoPragmaOnce(input_api, output_api,
files.append(f)
if files:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
files)
]
return []
def CheckNoFRIEND_TEST(input_api, output_api, # pylint: disable=invalid-name
def CheckNoFRIEND_TEST(
input_api,
output_api, # pylint: disable=invalid-name
source_file_filter):
"""Make sure that gtest's FRIEND_TEST() macro is not used, the
FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be
used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes."""
problems = []
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h'))
and source_file_filter(f))
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and
source_file_filter(f))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'FRIEND_TEST(' in line:
@ -257,9 +261,12 @@ def CheckNoFRIEND_TEST(input_api, output_api, # pylint: disable=invalid-name
if not problems:
return []
return [output_api.PresubmitPromptWarning('WebRTC\'s code should not use '
return [
output_api.PresubmitPromptWarning(
'WebRTC\'s code should not use '
'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and '
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))
]
def IsLintDisabled(disabled_paths, file_path):
@ -289,8 +296,9 @@ def CheckApprovedFilesLintClean(input_api, output_api,
cpplint._SetFilters(','.join(lint_filters))
# Create a platform independent exempt list for cpplint.
disabled_paths = [input_api.os_path.join(*path.split('/'))
for path in CPPLINT_EXCEPTIONS]
disabled_paths = [
input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS
]
# Use the strictest verbosity level for cpplint.py (level 1) which is the
# default when running cpplint.py from command line. To make it possible to
@ -329,20 +337,23 @@ def CheckNoSourcesAbove(input_api, gn_files, output_api):
for source_block_match in source_pattern.finditer(contents):
# Find all source list entries starting with ../ in the source block
# (exclude overrides entries).
for file_list_match in file_pattern.finditer(source_block_match.group(1)):
for file_list_match in file_pattern.finditer(
source_block_match.group(1)):
source_file = file_list_match.group(1)
if 'overrides/' not in source_file:
violating_source_entries.append(source_file)
violating_gn_files.add(gn_file)
if violating_gn_files:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Referencing source files above the directory of the GN file is not '
'allowed. Please introduce new GN targets in the proper location '
'instead.\n'
'Invalid source entries:\n'
'%s\n'
'Violating GN files:' % '\n'.join(violating_source_entries),
items=violating_gn_files)]
items=violating_gn_files)
]
return []
@ -371,8 +382,9 @@ def CheckAbseilDependencies(input_api, gn_files, output_api):
for dep in deps:
if re.search(absl_re, dep):
errors.append(
output_api.PresubmitError(error_msg % (target_name,
gn_file.LocalPath())))
output_api.PresubmitError(
error_msg %
(target_name, gn_file.LocalPath())))
break # no need to warn more than once per target
return errors
@ -420,7 +432,8 @@ def CheckNoMixingSources(input_api, gn_files, output_api):
c_files = []
cc_files = []
objc_files = []
for file_match in FILE_PATH_RE.finditer(sources_match.group(1)):
for file_match in FILE_PATH_RE.finditer(
sources_match.group(1)):
file_path = file_match.group('file_path')
extension = file_match.group('extension')
if extension == '.c':
@ -431,31 +444,41 @@ def CheckNoMixingSources(input_api, gn_files, output_api):
objc_files.append(file_path + extension)
list_of_sources.append((c_files, cc_files, objc_files))
for c_files_list, cc_files_list, objc_files_list in list_of_sources:
if _MoreThanOneSourceUsed(c_files_list, cc_files_list, objc_files_list):
all_sources = sorted(c_files_list + cc_files_list + objc_files_list)
errors[gn_file.LocalPath()].append((target_name, all_sources))
if _MoreThanOneSourceUsed(c_files_list, cc_files_list,
objc_files_list):
all_sources = sorted(c_files_list + cc_files_list +
objc_files_list)
errors[gn_file.LocalPath()].append(
(target_name, all_sources))
if errors:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n'
'Please create a separate target for each collection of sources.\n'
'Mixed sources: \n'
'%s\n'
'Violating GN files:\n%s\n' % (json.dumps(errors, indent=2),
'\n'.join(errors.keys())))]
'Violating GN files:\n%s\n' %
(json.dumps(errors, indent=2), '\n'.join(errors.keys())))
]
return []
def CheckNoPackageBoundaryViolations(input_api, gn_files, output_api):
cwd = input_api.PresubmitLocalPath()
with _AddToPath(input_api.os_path.join(
cwd, 'tools_webrtc', 'presubmit_checks_lib')):
with _AddToPath(
input_api.os_path.join(cwd, 'tools_webrtc',
'presubmit_checks_lib')):
from check_package_boundaries import CheckPackageBoundaries
build_files = [os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files]
build_files = [
os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files
]
errors = CheckPackageBoundaries(cwd, build_files)[:5]
if errors:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'There are package boundary violations in the following GN files:',
long_text='\n\n'.join(str(err) for err in errors))]
long_text='\n\n'.join(str(err) for err in errors))
]
return []
@ -464,7 +487,9 @@ def _ReportFileAndLine(filename, line_num):
return '%s (line %s)' % (filename, line_num)
def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api,
def CheckNoWarningSuppressionFlagsAreAdded(gn_files,
input_api,
output_api,
error_formatter=_ReportFileAndLine):
"""Make sure that warning suppression flags are not added wihtout a reason."""
msg = ('Usage of //build/config/clang:extra_warnings is discouraged '
@ -476,7 +501,8 @@ def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api,
'\n'
'Affected files:\n')
errors = [] # 2-element tuples with (file, line number)
clang_warn_re = input_api.re.compile(r'//build/config/clang:extra_warnings')
clang_warn_re = input_api.re.compile(
r'//build/config/clang:extra_warnings')
no_presubmit_re = input_api.re.compile(
r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)')
for f in gn_files:
@ -488,7 +514,9 @@ def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api,
return []
def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter,
def CheckNoTestCaseUsageIsAdded(input_api,
output_api,
source_file_filter,
error_formatter=_ReportFileAndLine):
error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the '
'new API: https://github.com/google/googletest/blob/master/'
@ -496,8 +524,8 @@ def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter,
'Affected files:\n')
errors = [] # 2-element tuples with (file, line number)
test_case_re = input_api.re.compile(r'TEST_CASE')
file_filter = lambda f: (source_file_filter(f)
and f.LocalPath().endswith('.cc'))
file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith(
'.cc'))
for f in input_api.AffectedSourceFiles(file_filter):
for line_num, line in f.ChangedContents():
if test_case_re.search(line):
@ -507,11 +535,13 @@ def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter,
return []
def CheckNoStreamUsageIsAdded(input_api, output_api,
def CheckNoStreamUsageIsAdded(input_api,
output_api,
source_file_filter,
error_formatter=_ReportFileAndLine):
"""Make sure that no more dependencies on stringstream are added."""
error_msg = ('Usage of <sstream>, <istream> and <ostream> in WebRTC is '
error_msg = (
'Usage of <sstream>, <istream> and <ostream> in WebRTC is '
'deprecated.\n'
'This includes the following types:\n'
'std::istringstream, std::ostringstream, std::wistringstream, '
@ -531,20 +561,19 @@ def CheckNoStreamUsageIsAdded(input_api, output_api,
'Affected files:\n')
errors = [] # 2-element tuples with (file, line number)
include_re = input_api.re.compile(r'#include <(i|o|s)stream>')
usage_re = input_api.re.compile(r'std::(w|i|o|io|wi|wo|wio)(string)*stream')
usage_re = input_api.re.compile(
r'std::(w|i|o|io|wi|wo|wio)(string)*stream')
no_presubmit_re = input_api.re.compile(
r'// no-presubmit-check TODO\(webrtc:8982\)')
file_filter = lambda x: (input_api.FilterSourceFile(x)
and source_file_filter(x))
file_filter = lambda x: (input_api.FilterSourceFile(x) and
source_file_filter(x))
def _IsException(file_path):
is_test = any(file_path.endswith(x) for x in ['_test.cc', '_tests.cc',
'_unittest.cc',
'_unittests.cc'])
return (file_path.startswith('examples') or
file_path.startswith('test') or
is_test)
is_test = any(
file_path.endswith(x) for x in
['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc'])
return (file_path.startswith('examples')
or file_path.startswith('test') or is_test)
for f in input_api.AffectedSourceFiles(file_filter):
# Usage of stringstream is allowed under examples/ and in tests.
@ -577,14 +606,16 @@ def CheckPublicDepsIsNotUsed(gn_files, input_api, output_api):
surpressed = no_presubmit_check_re.search(affected_line)
if not surpressed:
result.append(
output_api.PresubmitError(error_msg % (affected_file.LocalPath(),
line_number)))
output_api.PresubmitError(
error_msg %
(affected_file.LocalPath(), line_number)))
return result
def CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api):
result = []
error_msg = ('check_includes overrides are not allowed since it can cause '
error_msg = (
'check_includes overrides are not allowed since it can cause '
'incorrect dependencies to form. It effectively means that your '
'module can include any .h file without depending on its '
'corresponding target. There are some exceptional cases when '
@ -598,14 +629,15 @@ def CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api):
if ('check_includes' in affected_line
and not no_presubmit_re.search(affected_line)):
result.append(
output_api.PresubmitError(error_msg % (affected_file.LocalPath(),
line_number)))
output_api.PresubmitError(
error_msg % (affected_file.LocalPath(), line_number)))
return result
def CheckGnChanges(input_api, output_api):
file_filter = lambda x: (input_api.FilterSourceFile(
x, files_to_check=(r'.+\.(gn|gni)$',),
x,
files_to_check=(r'.+\.(gn|gni)$', ),
files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', )))
gn_files = []
@ -617,11 +649,14 @@ def CheckGnChanges(input_api, output_api):
result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api))
result.extend(CheckNoMixingSources(input_api, gn_files, output_api))
result.extend(CheckAbseilDependencies(input_api, gn_files, output_api))
result.extend(CheckNoPackageBoundaryViolations(input_api, gn_files,
result.extend(
CheckNoPackageBoundaryViolations(input_api, gn_files, output_api))
result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api,
output_api))
result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, output_api))
result.extend(CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api))
result.extend(CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api,
result.extend(
CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api))
result.extend(
CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api,
output_api))
return result
@ -631,15 +666,18 @@ def CheckGnGen(input_api, output_api):
#includes and dependencies in the BUILD.gn files, as well as general build
errors.
"""
with _AddToPath(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools_webrtc', 'presubmit_checks_lib')):
with _AddToPath(
input_api.os_path.join(input_api.PresubmitLocalPath(),
'tools_webrtc', 'presubmit_checks_lib')):
from build_helpers import RunGnCheck
errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5]
if errors:
return [output_api.PresubmitPromptWarning(
return [
output_api.PresubmitPromptWarning(
'Some #includes do not match the build dependency graph. Please run:\n'
' gn gen --check <out_dir>',
long_text='\n\n'.join(errors))]
long_text='\n\n'.join(errors))
]
return []
@ -654,11 +692,14 @@ def CheckUnwantedDependencies(input_api, output_api, source_file_filter):
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
src_path = FindSrcDirPath(input_api.PresubmitLocalPath())
checkdeps_path = input_api.os_path.join(src_path, 'buildtools', 'checkdeps')
checkdeps_path = input_api.os_path.join(src_path, 'buildtools',
'checkdeps')
if not os.path.exists(checkdeps_path):
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Cannot find checkdeps at %s\nHave you run "gclient sync" to '
'download all the DEPS entries?' % checkdeps_path)]
'download all the DEPS entries?' % checkdeps_path)
]
with _AddToPath(checkdeps_path):
import checkdeps
from cpp_checker import CppChecker
@ -686,20 +727,20 @@ def CheckUnwantedDependencies(input_api, output_api, source_file_filter):
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
results.append(
output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.\n'
'Check that the DEPS files in these locations contain valid rules.\n'
'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for '
'more details about checkdeps.',
error_descriptions))
'more details about checkdeps.', error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
results.append(
output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.\n'
'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for '
'more details about checkdeps.',
warning_descriptions))
'more details about checkdeps.', warning_descriptions))
return results
@ -707,7 +748,8 @@ def CheckCommitMessageBugEntry(input_api, output_api):
"""Check that bug entries are well-formed in commit message."""
bogus_bug_msg = (
'Bogus Bug entry: %s. Please specify the issue tracker prefix and the '
'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.')
'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.'
)
results = []
for bug in input_api.change.BugsFromDescription():
bug = bug.strip()
@ -720,7 +762,8 @@ def CheckCommitMessageBugEntry(input_api, output_api):
prefix_guess = 'chromium'
else:
prefix_guess = 'webrtc'
results.append('Bug entry requires issue tracker prefix, e.g. %s:%s' %
results.append(
'Bug entry requires issue tracker prefix, e.g. %s:%s' %
(prefix_guess, bug))
except ValueError:
results.append(bogus_bug_msg % bug)
@ -742,18 +785,22 @@ def CheckChangeHasBugField(input_api, output_api):
if input_api.change.BugsFromDescription():
return []
else:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'The "Bug: [bug number]" footer is mandatory. Please create a bug and '
'reference it using either of:\n'
' * https://bugs.webrtc.org - reference it using Bug: webrtc:XXXX\n'
' * https://crbug.com - reference it using Bug: chromium:XXXXXX')]
' * https://crbug.com - reference it using Bug: chromium:XXXXXX'
)
]
def CheckJSONParseErrors(input_api, output_api, source_file_filter):
"""Check that JSON files do not contain syntax errors."""
def FilterFile(affected_file):
return (input_api.os_path.splitext(affected_file.LocalPath())[1] == '.json'
return (input_api.os_path.splitext(
affected_file.LocalPath())[1] == '.json'
and source_file_filter(affected_file))
def GetJSONParseError(input_api, filename):
@ -765,14 +812,15 @@ def CheckJSONParseErrors(input_api, output_api, source_file_filter):
return None
results = []
for affected_file in input_api.AffectedFiles(
file_filter=FilterFile, include_deletes=False):
for affected_file in input_api.AffectedFiles(file_filter=FilterFile,
include_deletes=False):
parse_error = GetJSONParseError(input_api,
affected_file.AbsoluteLocalPath())
if parse_error:
results.append(output_api.PresubmitError('%s could not be parsed: %s' %
(affected_file.LocalPath(),
parse_error)))
results.append(
output_api.PresubmitError(
'%s could not be parsed: %s' %
(affected_file.LocalPath(), parse_error)))
return results
@ -785,8 +833,8 @@ def RunPythonTests(input_api, output_api):
Join('rtc_tools', 'py_event_log_analyzer'),
Join('audio', 'test', 'unittests'),
] + [
root for root, _, files in os.walk(Join('tools_webrtc'))
if any(f.endswith('_test.py') for f in files)
root for root, _, files in os.walk(Join('tools_webrtc')) if any(
f.endswith('_test.py') for f in files)
]
tests = []
@ -806,8 +854,8 @@ def CheckUsageOfGoogleProtobufNamespace(input_api, output_api,
files = []
pattern = input_api.re.compile(r'google::protobuf')
proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h')
file_filter = lambda x: (input_api.FilterSourceFile(x)
and source_file_filter(x))
file_filter = lambda x: (input_api.FilterSourceFile(x) and
source_file_filter(x))
for f in input_api.AffectedSourceFiles(file_filter):
if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']:
continue
@ -816,10 +864,12 @@ def CheckUsageOfGoogleProtobufNamespace(input_api, output_api,
files.append(f)
if files:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Please avoid to use namespace `google::protobuf` directly.\n'
'Add a using directive in `%s` and include that header instead.'
% proto_utils_path, files)]
% proto_utils_path, files)
]
return []
@ -854,14 +904,19 @@ def CommonChecks(input_api, output_api):
r".*\bobjc[\\\/].*",
r".*objc\.[hcm]+$",
)
source_file_filter = lambda x: input_api.FilterSourceFile(x, None,
exception_list)
results.extend(CheckApprovedFilesLintClean(
input_api, output_api, source_file_filter))
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, _LicenseHeader(input_api)))
results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
files_to_skip=(r'^base[\\\/].*\.py$',
source_file_filter = lambda x: input_api.FilterSourceFile(
x, None, exception_list)
results.extend(
CheckApprovedFilesLintClean(input_api, output_api, source_file_filter))
results.extend(
input_api.canned_checks.CheckLicense(input_api, output_api,
_LicenseHeader(input_api)))
results.extend(
input_api.canned_checks.RunPylint(
input_api,
output_api,
files_to_skip=(
r'^base[\\\/].*\.py$',
r'^build[\\\/].*\.py$',
r'^buildtools[\\\/].*\.py$',
r'^infra[\\\/].*\.py$',
@ -872,7 +927,8 @@ def CommonChecks(input_api, output_api):
r'^tools[\\\/].*\.py$',
# TODO(phoglund): should arguably be checked.
r'^tools_webrtc[\\\/]mb[\\\/].*\.py$',
r'^xcodebuild.*[\\\/].*\.py$',),
r'^xcodebuild.*[\\\/].*\.py$',
),
pylintrc='pylintrc'))
# TODO(nisse): talk/ is no more, so make below checks simpler?
@ -887,62 +943,93 @@ def CommonChecks(input_api, output_api):
build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS')
# Also we will skip most checks for third_party directory.
third_party_filter_list = (r'^third_party[\\\/].+', )
eighty_char_sources = lambda x: input_api.FilterSourceFile(x,
eighty_char_sources = lambda x: input_api.FilterSourceFile(
x,
files_to_skip=build_file_filter_list + objc_filter_list +
third_party_filter_list)
hundred_char_sources = lambda x: input_api.FilterSourceFile(x,
files_to_check=objc_filter_list)
non_third_party_sources = lambda x: input_api.FilterSourceFile(x,
files_to_skip=third_party_filter_list)
hundred_char_sources = lambda x: input_api.FilterSourceFile(
x, files_to_check=objc_filter_list)
non_third_party_sources = lambda x: input_api.FilterSourceFile(
x, files_to_skip=third_party_filter_list)
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen=80, source_file_filter=eighty_char_sources))
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen=100,
results.extend(
input_api.canned_checks.CheckLongLines(
input_api,
output_api,
maxlen=80,
source_file_filter=eighty_char_sources))
results.extend(
input_api.canned_checks.CheckLongLines(
input_api,
output_api,
maxlen=100,
source_file_filter=hundred_char_sources))
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
results.extend(
input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
results.extend(
input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api, bot_allowlist=[
results.extend(
input_api.canned_checks.CheckAuthorizedAuthor(
input_api,
output_api,
bot_allowlist=[
'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com'
]))
results.extend(input_api.canned_checks.CheckChangeTodoHasOwner(
results.extend(
input_api.canned_checks.CheckChangeTodoHasOwner(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(
input_api.canned_checks.CheckPatchFormatted(input_api, output_api))
results.extend(CheckNativeApiHeaderChanges(input_api, output_api))
results.extend(CheckNoIOStreamInHeaders(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckNoPragmaOnce(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckNoFRIEND_TEST(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(
CheckNoIOStreamInHeaders(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(
CheckNoPragmaOnce(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(
CheckNoFRIEND_TEST(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(CheckGnChanges(input_api, output_api))
results.extend(CheckUnwantedDependencies(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckJSONParseErrors(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(
CheckUnwantedDependencies(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(
CheckJSONParseErrors(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(RunPythonTests(input_api, output_api))
results.extend(CheckUsageOfGoogleProtobufNamespace(
results.extend(
CheckUsageOfGoogleProtobufNamespace(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckOrphanHeaders(
results.extend(
CheckOrphanHeaders(input_api,
output_api,
source_file_filter=non_third_party_sources))
results.extend(
CheckNewlineAtTheEndOfProtoFiles(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckNewlineAtTheEndOfProtoFiles(
input_api, output_api, source_file_filter=non_third_party_sources))
results.extend(CheckNoStreamUsageIsAdded(
input_api, output_api, non_third_party_sources))
results.extend(CheckNoTestCaseUsageIsAdded(
input_api, output_api, non_third_party_sources))
results.extend(
CheckNoStreamUsageIsAdded(input_api, output_api,
non_third_party_sources))
results.extend(
CheckNoTestCaseUsageIsAdded(input_api, output_api,
non_third_party_sources))
results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api))
results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api))
results.extend(CheckAbslMemoryInclude(
input_api, output_api, non_third_party_sources))
results.extend(CheckBannedAbslMakeUnique(
input_api, output_api, non_third_party_sources))
results.extend(CheckObjcApiSymbols(
input_api, output_api, non_third_party_sources))
results.extend(
CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources))
results.extend(
CheckBannedAbslMakeUnique(input_api, output_api,
non_third_party_sources))
results.extend(
CheckObjcApiSymbols(input_api, output_api, non_third_party_sources))
return results
@ -969,8 +1056,8 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api):
for f in input_api.AffectedFiles():
path_tokens = [t for t in f.LocalPath().split(os.sep) if t]
if len(path_tokens) > 1:
if (path_tokens[0] not in dirs_to_skip and
os.path.isdir(os.path.join(input_api.PresubmitLocalPath(),
if (path_tokens[0] not in dirs_to_skip and os.path.isdir(
os.path.join(input_api.PresubmitLocalPath(),
path_tokens[0]))):
dirs_to_check.add(path_tokens[0])
@ -992,7 +1079,8 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api):
error_msg.append(' ...\n')
error_msg.append(']\n')
results.append(output_api.PresubmitError(
results.append(
output_api.PresubmitError(
'New root level directory detected! WebRTC api/ headers should '
'not #include headers from \n'
'the new directory, so please update "include_rules" in file\n'
@ -1000,34 +1088,37 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api):
return results
def CheckBannedAbslMakeUnique(input_api, output_api, source_file_filter):
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h'))
and source_file_filter(f))
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and
source_file_filter(f))
files = []
for f in input_api.AffectedFiles(
include_deletes=False, file_filter=file_filter):
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
for _, line in f.ChangedContents():
if 'absl::make_unique' in line:
files.append(f)
break
if len(files):
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Please use std::make_unique instead of absl::make_unique.\n'
'Affected files:',
files)]
'Affected files:', files)
]
return []
def CheckObjcApiSymbols(input_api, output_api, source_file_filter):
rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}',
re.MULTILINE | re.DOTALL)
file_filter = lambda f: (f.LocalPath().endswith(('.h'))
and source_file_filter(f))
file_filter = lambda f: (f.LocalPath().endswith(('.h')) and
source_file_filter(f))
files = []
file_filter = lambda x: (input_api.FilterSourceFile(x)
and source_file_filter(x))
file_filter = lambda x: (input_api.FilterSourceFile(x) and
source_file_filter(x))
for f in input_api.AffectedSourceFiles(file_filter):
if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath():
continue
@ -1038,25 +1129,26 @@ def CheckObjcApiSymbols(input_api, output_api, source_file_filter):
files.append(f.LocalPath())
if len(files):
return [output_api.PresubmitError(
'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' +
'macro.\n\n' +
'For example:\n' +
return [
output_api.PresubmitError(
'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() '
+ 'macro.\n\n' + 'For example:\n' +
'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' +
'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' +
'Please fix the following files:',
files)]
'Please fix the following files:', files)
]
return []
def CheckAbslMemoryInclude(input_api, output_api, source_file_filter):
pattern = input_api.re.compile(
r'^#include\s*"absl/memory/memory.h"', input_api.re.MULTILINE)
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h'))
and source_file_filter(f))
pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"',
input_api.re.MULTILINE)
file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and
source_file_filter(f))
files = []
for f in input_api.AffectedFiles(
include_deletes=False, file_filter=file_filter):
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
contents = input_api.ReadFile(f)
if pattern.search(contents):
continue
@ -1066,13 +1158,15 @@ def CheckAbslMemoryInclude(input_api, output_api, source_file_filter):
break
if len(files):
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
'Please include "absl/memory/memory.h" header for absl::WrapUnique.\n'
'This header may or may not be included transitively depending on the '
'C++ standard version.',
files)]
'C++ standard version.', files)
]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(CommonChecks(input_api, output_api))
@ -1087,14 +1181,17 @@ def CheckChangeOnCommit(input_api, output_api):
results.extend(CommonChecks(input_api, output_api))
results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
results.extend(
input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api))
results.extend(
input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(CheckChangeHasBugField(input_api, output_api))
results.extend(CheckCommitMessageBugEntry(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
results.extend(
input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://webrtc-status.appspot.com/current?format=json'))
return results
@ -1108,8 +1205,9 @@ def CheckOrphanHeaders(input_api, output_api, source_file_filter):
exempt_paths = [
os.path.join('tools_webrtc', 'ios', 'SDK'),
]
with _AddToPath(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools_webrtc', 'presubmit_checks_lib')):
with _AddToPath(
input_api.os_path.join(input_api.PresubmitLocalPath(),
'tools_webrtc', 'presubmit_checks_lib')):
from check_orphan_headers import GetBuildGnPathFromFilePath
from check_orphan_headers import IsHeaderInBuildGn
@ -1119,16 +1217,19 @@ def CheckOrphanHeaders(input_api, output_api, source_file_filter):
if f.LocalPath().endswith('.h'):
file_path = os.path.abspath(f.LocalPath())
root_dir = os.getcwd()
gn_file_path = GetBuildGnPathFromFilePath(file_path, os.path.exists,
root_dir)
gn_file_path = GetBuildGnPathFromFilePath(file_path,
os.path.exists, root_dir)
in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path)
if not in_build_gn:
results.append(output_api.PresubmitError(error_msg.format(
f.LocalPath(), os.path.relpath(gn_file_path))))
results.append(
output_api.PresubmitError(
error_msg.format(f.LocalPath(),
os.path.relpath(gn_file_path))))
return results
def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter):
def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api,
source_file_filter):
"""Checks that all .proto files are terminated with a newline."""
error_msg = 'File {} must end with exactly one newline.'
results = []
@ -1139,7 +1240,8 @@ def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter):
with open(file_path) as f:
lines = f.readlines()
if len(lines) > 0 and not lines[-1].endswith('\n'):
results.append(output_api.PresubmitError(error_msg.format(file_path)))
results.append(
output_api.PresubmitError(error_msg.format(file_path)))
return results
@ -1153,8 +1255,7 @@ def _ExtractAddRulesFromParsedDeps(parsed_deps):
rule[1:] for rule in parsed_deps.get('include_rules', [])
if rule.startswith('+') or rule.startswith('!')
])
for _, rules in parsed_deps.get('specific_include_rules',
{}).iteritems():
for _, rules in parsed_deps.get('specific_include_rules', {}).iteritems():
add_rules.update([
rule[1:] for rule in rules
if rule.startswith('+') or rule.startswith('!')
@ -1164,9 +1265,9 @@ def _ExtractAddRulesFromParsedDeps(parsed_deps):
def _ParseDeps(contents):
"""Simple helper for parsing DEPS files."""
# Stubs for handling special syntax in the root DEPS file.
class VarImpl(object):
def __init__(self, local_scope):
self._local_scope = local_scope
@ -1227,8 +1328,8 @@ def CheckAddedDepsHaveTargetApprovals(input_api, output_api):
file_filter=file_filter):
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
virtual_depended_on_files.update(_CalculateAddedDeps(
input_api.os_path,
virtual_depended_on_files.update(
_CalculateAddedDeps(input_api.os_path,
'\n'.join(f.OldContents()),
'\n'.join(f.NewContents())))
@ -1237,15 +1338,23 @@ def CheckAddedDepsHaveTargetApprovals(input_api, output_api):
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
return [
output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions'
)
]
if input_api.dry_run:
return [output_api.PresubmitNotifyResult(
'This is a dry run, skipping OWNERS check for DEPS additions')]
return [
output_api.PresubmitNotifyResult(
'This is a dry run, skipping OWNERS check for DEPS additions'
)
]
if not input_api.change.issue:
return [output_api.PresubmitError(
return [
output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no change number, so we can't check it for approvals.")]
"no change number, so we can't check it for approvals.")
]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
@ -1274,17 +1383,21 @@ def CheckAddedDepsHaveTargetApprovals(input_api, output_api):
return path[:start_deps]
else:
return path
unapproved_dependencies = ["'+%s'," % StripDeps(path)
for path in missing_files]
unapproved_dependencies = [
"'+%s'," % StripDeps(path) for path in missing_files
]
if unapproved_dependencies:
output_list = [
output('You need LGTM from owners of depends-on paths in DEPS that were '
output(
'You need LGTM from owners of depends-on paths in DEPS that were '
'modified in this CL:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
'\n '.join(sorted(unapproved_dependencies)))
]
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
output_list.append(
output('Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
This script is the wrapper that runs the low-bandwidth audio test.
@ -23,7 +22,6 @@ import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
@ -46,24 +44,36 @@ def _LogCommand(command):
def _ParseArgs():
parser = argparse.ArgumentParser(description='Run low-bandwidth audio tests.')
parser = argparse.ArgumentParser(
description='Run low-bandwidth audio tests.')
parser.add_argument('build_dir',
help='Path to the build directory (e.g. out/Release).')
parser.add_argument('--remove', action='store_true',
parser.add_argument('--remove',
action='store_true',
help='Remove output audio files after testing.')
parser.add_argument('--android', action='store_true',
parser.add_argument(
'--android',
action='store_true',
help='Perform the test on a connected Android device instead.')
parser.add_argument('--adb-path', help='Path to adb binary.', default='adb')
parser.add_argument('--num-retries', default='0',
parser.add_argument('--adb-path',
help='Path to adb binary.',
default='adb')
parser.add_argument('--num-retries',
default='0',
help='Number of times to retry the test on Android.')
parser.add_argument('--isolated-script-test-perf-output', default=None,
parser.add_argument(
'--isolated-script-test-perf-output',
default=None,
help='Path to store perf results in histogram proto format.')
parser.add_argument('--extra-test-args', default=[], action='append',
parser.add_argument('--extra-test-args',
default=[],
action='append',
help='Extra args to path to the test binary.')
# Ignore Chromium-specific flags
parser.add_argument('--test-launcher-summary-output',
type=str, default=None)
type=str,
default=None)
args = parser.parse_args()
return args
@ -98,8 +108,7 @@ def _GetPathToTools():
polqa_path = None
if (platform != 'mac' and not polqa_path) or not pesq_path:
logging.error(NO_TOOLS_ERROR_MESSAGE,
toolchain_dir,
logging.error(NO_TOOLS_ERROR_MESSAGE, toolchain_dir,
os.path.join(tools_dir, 'download_tools.py'),
toolchain_dir)
@ -126,8 +135,11 @@ def ExtractTestRuns(lines, echo=False):
yield match.groups()
def _GetFile(file_path, out_dir, move=False,
android=False, adb_prefix=('adb',)):
def _GetFile(file_path,
out_dir,
move=False,
android=False,
adb_prefix=('adb', )):
out_file_name = os.path.basename(file_path)
out_file_path = os.path.join(out_dir, out_file_name)
@ -148,20 +160,26 @@ def _GetFile(file_path, out_dir, move=False,
return out_file_path
def _RunPesq(executable_path, reference_file, degraded_file,
def _RunPesq(executable_path,
reference_file,
degraded_file,
sample_rate_hz=16000):
directory = os.path.dirname(reference_file)
assert os.path.dirname(degraded_file) == directory
# Analyze audio.
command = [executable_path, '+%d' % sample_rate_hz,
command = [
executable_path,
'+%d' % sample_rate_hz,
os.path.basename(reference_file),
os.path.basename(degraded_file)]
os.path.basename(degraded_file)
]
# Need to provide paths in the current directory due to a bug in PESQ:
# On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than
# 'path/to', PESQ crashes.
out = subprocess.check_output(_LogCommand(command),
cwd=directory, stderr=subprocess.STDOUT)
cwd=directory,
stderr=subprocess.STDOUT)
# Find the scores in stdout of PESQ.
match = re.search(
@ -177,10 +195,13 @@ def _RunPesq(executable_path, reference_file, degraded_file,
def _RunPolqa(executable_path, reference_file, degraded_file):
# Analyze audio.
command = [executable_path, '-q', '-LC', 'NB',
'-Ref', reference_file, '-Test', degraded_file]
command = [
executable_path, '-q', '-LC', 'NB', '-Ref', reference_file, '-Test',
degraded_file
]
process = subprocess.Popen(_LogCommand(command),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
# Find the scores in stdout of POLQA.
@ -212,8 +233,8 @@ def _MergeInPerfResultsFromCcTests(histograms, run_perf_results_file):
histograms.Merge(cc_histograms)
Analyzer = collections.namedtuple('Analyzer', ['name', 'func', 'executable',
'sample_rate_hz'])
Analyzer = collections.namedtuple(
'Analyzer', ['name', 'func', 'executable', 'sample_rate_hz'])
def _ConfigurePythonPath(args):
@ -223,14 +244,15 @@ def _ConfigurePythonPath(args):
# TODO(https://crbug.com/1029452): Use a copy rule and add these from the out
# dir like for the third_party/protobuf code.
sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'catapult',
'tracing'))
sys.path.insert(
0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing'))
# The low_bandwidth_audio_perf_test gn rule will build the protobuf stub for
# python, so put it in the path for this script before we attempt to import
# it.
histogram_proto_path = os.path.join(
os.path.abspath(args.build_dir), 'pyproto', 'tracing', 'tracing', 'proto')
histogram_proto_path = os.path.join(os.path.abspath(args.build_dir),
'pyproto', 'tracing', 'tracing',
'proto')
sys.path.insert(0, histogram_proto_path)
proto_stub_path = os.path.join(os.path.abspath(args.build_dir), 'pyproto')
sys.path.insert(0, proto_stub_path)
@ -240,7 +262,8 @@ def _ConfigurePythonPath(args):
import histogram_pb2
except ImportError as e:
logging.exception(e)
raise ImportError('Could not import histogram_pb2. You need to build the '
raise ImportError(
'Could not import histogram_pb2. You need to build the '
'low_bandwidth_audio_perf_test target before invoking '
'this script. Expected to find '
'histogram_pb2.py in %s.' % histogram_proto_path)
@ -266,28 +289,32 @@ def main():
out_dir = os.path.join(args.build_dir, '..')
if args.android:
test_command = [os.path.join(args.build_dir, 'bin',
'run_low_bandwidth_audio_test'),
'-v', '--num-retries', args.num_retries]
test_command = [
os.path.join(args.build_dir, 'bin',
'run_low_bandwidth_audio_test'), '-v',
'--num-retries', args.num_retries
]
else:
test_command = [os.path.join(args.build_dir, 'low_bandwidth_audio_test')]
test_command = [
os.path.join(args.build_dir, 'low_bandwidth_audio_test')
]
analyzers = [Analyzer('pesq', _RunPesq, pesq_path, 16000)]
# Check if POLQA can run at all, or skip the 48 kHz tests entirely.
example_path = os.path.join(SRC_DIR, 'resources',
'voice_engine', 'audio_tiny48.wav')
example_path = os.path.join(SRC_DIR, 'resources', 'voice_engine',
'audio_tiny48.wav')
if polqa_path and _RunPolqa(polqa_path, example_path, example_path):
analyzers.append(Analyzer('polqa', _RunPolqa, polqa_path, 48000))
histograms = histogram_set.HistogramSet()
for analyzer in analyzers:
# Start the test executable that produces audio files.
test_process = subprocess.Popen(
_LogCommand(test_command + [
test_process = subprocess.Popen(_LogCommand(test_command + [
'--sample_rate_hz=%d' % analyzer.sample_rate_hz,
'--test_case_prefix=%s' % analyzer.name,
] + args.extra_test_args),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
perf_results_file = None
try:
lines = iter(test_process.stdout.readline, '')
@ -299,10 +326,15 @@ def main():
if android_device:
adb_prefix += ('-s', android_device)
reference_file = _GetFile(reference_file, out_dir,
android=args.android, adb_prefix=adb_prefix)
degraded_file = _GetFile(degraded_file, out_dir, move=True,
android=args.android, adb_prefix=adb_prefix)
reference_file = _GetFile(reference_file,
out_dir,
android=args.android,
adb_prefix=adb_prefix)
degraded_file = _GetFile(degraded_file,
out_dir,
move=True,
android=args.android,
adb_prefix=adb_prefix)
analyzer_results = analyzer.func(analyzer.executable,
reference_file, degraded_file)
@ -312,7 +344,8 @@ def main():
hist.diagnostics[reserved_infos.STORIES.name] = user_story
# Output human readable results.
print 'RESULT %s: %s= %s %s' % (metric, test_name, value, units)
print 'RESULT %s: %s= %s %s' % (metric, test_name, value,
units)
if args.remove:
os.remove(reference_file)
@ -320,8 +353,11 @@ def main():
finally:
test_process.terminate()
if perf_results_file:
perf_results_file = _GetFile(perf_results_file, out_dir, move=True,
android=args.android, adb_prefix=adb_prefix)
perf_results_file = _GetFile(perf_results_file,
out_dir,
move=True,
android=args.android,
adb_prefix=adb_prefix)
_MergeInPerfResultsFromCcTests(histograms, perf_results_file)
if args.remove:
os.remove(perf_results_file)

View File

@ -11,7 +11,6 @@ import os
import unittest
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir)
sys.path.append(PARENT_DIR)
@ -21,11 +20,13 @@ import low_bandwidth_audio_test
class TestExtractTestRuns(unittest.TestCase):
def _TestLog(self, log, *expected):
self.assertEqual(
tuple(low_bandwidth_audio_test.ExtractTestRuns(log.splitlines(True))),
expected)
tuple(
low_bandwidth_audio_test.ExtractTestRuns(
log.splitlines(True))), expected)
def testLinux(self):
self._TestLog(LINUX_LOG,
self._TestLog(
LINUX_LOG,
(None, 'GoodNetworkHighBitrate',
'/webrtc/src/resources/voice_engine/audio_tiny16.wav',
'/webrtc/src/out/LowBandwidth_GoodNetworkHighBitrate.wav', None),
@ -42,19 +43,22 @@ class TestExtractTestRuns(unittest.TestCase):
'/webrtc/src/out/PCLowBandwidth_perf_48.json'))
def testAndroid(self):
self._TestLog(ANDROID_LOG,
('ddfa6149', 'Mobile2GNetwork',
self._TestLog(ANDROID_LOG, (
'ddfa6149', 'Mobile2GNetwork',
'/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
'/sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav', None),
('TA99205CNO', 'GoodNetworkHighBitrate',
'/sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav',
None
), (
'TA99205CNO', 'GoodNetworkHighBitrate',
'/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
'/sdcard/chromium_tests_root/LowBandwidth_GoodNetworkHighBitrate.wav',
None),
('ddfa6149', 'PCMobile2GNetwork',
None
), (
'ddfa6149', 'PCMobile2GNetwork',
'/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
'/sdcard/chromium_tests_root/PCLowBandwidth_PCMobile2GNetwork.wav',
'/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json'),
('TA99205CNO', 'PCGoodNetworkHighBitrate',
'/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json'
), ('TA99205CNO', 'PCGoodNetworkHighBitrate',
'/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
('/sdcard/chromium_tests_root/'
'PCLowBandwidth_PCGoodNetworkHighBitrate.wav'),
@ -233,6 +237,5 @@ I 16.608s tear_down_device(ddfa6149) Wrote device cache: /webrtc/src/out/debu
I 16.608s tear_down_device(TA99205CNO) Wrote device cache: /webrtc/src/out/debug-android/device_cache_TA99305CMO.json
'''
if __name__ == "__main__":
unittest.main()

View File

@ -15,6 +15,7 @@ import time
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
def main():
parser = OptionParser()
@ -25,19 +26,16 @@ def main():
dest='videooutsave',
help='The path where to save the video out file on local computer')
parser.add_option(
'--videoout',
parser.add_option('--videoout',
dest='videoout',
help='The path where to put the video out file')
parser.add_option(
'--videoout_width',
parser.add_option('--videoout_width',
dest='videoout_width',
type='int',
help='The width for the video out file')
parser.add_option(
'--videoout_height',
parser.add_option('--videoout_height',
dest='videoout_height',
type='int',
help='The height for the video out file')
@ -47,8 +45,7 @@ def main():
dest='videoin',
help='The path where to read input file instead of camera')
parser.add_option(
'--call_length',
parser.add_option('--call_length',
dest='call_length',
type='int',
help='The length of the call')
@ -69,13 +66,13 @@ def main():
call_length = options.call_length or 10
room = ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(8))
room = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(8))
# Delete output video file.
if videoout:
subprocess.check_call(['adb', '-s', devname, 'shell', 'rm', '-f',
videoout])
subprocess.check_call(
['adb', '-s', devname, 'shell', 'rm', '-f', videoout])
device = MonkeyRunner.waitForConnection(2, devname)
@ -86,22 +83,28 @@ def main():
'org.appspot.apprtc.VIDEOCODEC': 'VP8',
'org.appspot.apprtc.CAPTURETOTEXTURE': False,
'org.appspot.apprtc.CAMERA2': False,
'org.appspot.apprtc.ROOMID': room}
'org.appspot.apprtc.ROOMID': room
}
if videoin:
extras.update({'org.appspot.apprtc.VIDEO_FILE_AS_CAMERA': videoin})
if videoout:
extras.update({
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE': videoout,
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_WIDTH': videoout_width,
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_HEIGHT': videoout_height})
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE':
videoout,
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_WIDTH':
videoout_width,
'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_HEIGHT':
videoout_height
})
print extras
device.startActivity(data='https://appr.tc',
action='android.intent.action.VIEW',
component='org.appspot.apprtc/.ConnectActivity', extras=extras)
component='org.appspot.apprtc/.ConnectActivity',
extras=extras)
print 'Running a call for %d seconds' % call_length
for _ in xrange(call_length):
@ -116,9 +119,9 @@ def main():
if videooutsave:
time.sleep(2)
subprocess.check_call(['adb', '-s', devname, 'pull',
videoout, videooutsave])
subprocess.check_call(
['adb', '-s', devname, 'pull', videoout, videooutsave])
if __name__ == '__main__':
main()

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
This scripts tests creating an Android Studio project using the
generate_gradle.py script and making a debug build using it.
@ -23,11 +22,10 @@ import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
GENERATE_GRADLE_SCRIPT = os.path.join(SRC_DIR,
'build/android/gradle/generate_gradle.py')
GENERATE_GRADLE_SCRIPT = os.path.join(
SRC_DIR, 'build/android/gradle/generate_gradle.py')
GRADLEW_BIN = os.path.join(SCRIPT_DIR, 'third_party/gradle/gradlew')
@ -62,12 +60,14 @@ def main():
try:
env = os.environ.copy()
env['PATH'] = os.pathsep.join([
os.path.join(SRC_DIR, 'third_party', 'depot_tools'), env.get('PATH', '')
os.path.join(SRC_DIR, 'third_party', 'depot_tools'),
env.get('PATH', '')
])
_RunCommand([GENERATE_GRADLE_SCRIPT, '--output-directory', output_dir,
'--target', '//examples:AppRTCMobile',
'--project-dir', project_dir,
'--use-gradle-process-resources', '--split-projects'],
_RunCommand([
GENERATE_GRADLE_SCRIPT, '--output-directory', output_dir,
'--target', '//examples:AppRTCMobile', '--project-dir',
project_dir, '--use-gradle-process-resources', '--split-projects'
],
env=env)
_RunCommand([GRADLEW_BIN, 'assembleDebug'], project_dir)
finally:

View File

@ -73,10 +73,11 @@ def ParseAnaDump(dump_file_to_parse):
if event.type == debug_dump_pb2.Event.ENCODER_RUNTIME_CONFIG:
for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
if event.encoder_runtime_config.HasField(decision.name):
decisions[decision.name]['time'].append(event.timestamp -
first_time_stamp)
decisions[decision.name]['time'].append(
event.timestamp - first_time_stamp)
decisions[decision.name]['value'].append(
getattr(event.encoder_runtime_config, decision.name))
getattr(event.encoder_runtime_config,
decision.name))
if event.type == debug_dump_pb2.Event.NETWORK_METRICS:
for metric in event.network_metrics.DESCRIPTOR.fields:
if event.network_metrics.HasField(metric.name):
@ -89,10 +90,11 @@ def ParseAnaDump(dump_file_to_parse):
def main():
parser = OptionParser()
parser.add_option(
"-f", "--dump_file", dest="dump_file_to_parse", help="dump file to parse")
parser.add_option(
'-m',
parser.add_option("-f",
"--dump_file",
dest="dump_file_to_parse",
help="dump file to parse")
parser.add_option('-m',
'--metric_plot',
default=[],
type=str,
@ -100,8 +102,7 @@ def main():
dest='metric_keys',
action='append')
parser.add_option(
'-d',
parser.add_option('-d',
'--decision_plot',
default=[],
type=str,
@ -143,5 +144,6 @@ def main():
f.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Perform APM module quality assessment on one or more input files using one or
more APM simulator configuration files and one or more test data generators.
@ -54,28 +53,44 @@ def _InstanceArgumentsParser():
'one or more APM simulator configuration files and one or more '
'test data generators.'))
parser.add_argument('-c', '--config_files', nargs='+', required=False,
parser.add_argument('-c',
'--config_files',
nargs='+',
required=False,
help=('path to the configuration files defining the '
'arguments with which the APM simulator tool is '
'called'),
default=[_DEFAULT_CONFIG_FILE])
parser.add_argument('-i', '--capture_input_files', nargs='+', required=True,
parser.add_argument(
'-i',
'--capture_input_files',
nargs='+',
required=True,
help='path to the capture input wav files (one or more)')
parser.add_argument('-r', '--render_input_files', nargs='+', required=False,
parser.add_argument('-r',
'--render_input_files',
nargs='+',
required=False,
help=('path to the render input wav files; either '
'omitted or one file for each file in '
'--capture_input_files (files will be paired by '
'index)'), default=None)
'index)'),
default=None)
parser.add_argument('-p', '--echo_path_simulator', required=False,
parser.add_argument('-p',
'--echo_path_simulator',
required=False,
help=('custom echo path simulator name; required if '
'--render_input_files is specified'),
choices=_ECHO_PATH_SIMULATOR_NAMES,
default=echo_path_simulation.NoEchoPathSimulator.NAME)
parser.add_argument('-t', '--test_data_generators', nargs='+', required=False,
parser.add_argument('-t',
'--test_data_generators',
nargs='+',
required=False,
help='custom list of test data generators to use',
choices=_TEST_DATA_GENERATORS_NAMES,
default=_TEST_DATA_GENERATORS_NAMES)
@ -86,21 +101,28 @@ def _InstanceArgumentsParser():
AdditiveNoiseTestDataGenerator. \
DEFAULT_NOISE_TRACKS_PATH)
parser.add_argument('-e', '--eval_scores', nargs='+', required=False,
parser.add_argument('-e',
'--eval_scores',
nargs='+',
required=False,
help='custom list of evaluation scores to use',
choices=_EVAL_SCORE_WORKER_NAMES,
default=_EVAL_SCORE_WORKER_NAMES)
parser.add_argument('-o', '--output_dir', required=False,
parser.add_argument('-o',
'--output_dir',
required=False,
help=('base path to the output directory in which the '
'output wav files and the evaluation outcomes '
'are saved'),
default='output')
parser.add_argument('--polqa_path', required=True,
parser.add_argument('--polqa_path',
required=True,
help='path to the POLQA tool')
parser.add_argument('--air_db_path', required=True,
parser.add_argument('--air_db_path',
required=True,
help='path to the Aechen IR database')
parser.add_argument('--apm_sim_path', required=False,
@ -109,36 +131,47 @@ def _InstanceArgumentsParser():
AudioProcWrapper. \
DEFAULT_APM_SIMULATOR_BIN_PATH)
parser.add_argument('--echo_metric_tool_bin_path', required=False,
parser.add_argument('--echo_metric_tool_bin_path',
required=False,
help=('path to the echo metric binary '
'(required for the echo eval score)'),
default=None)
parser.add_argument('--copy_with_identity_generator', required=False,
parser.add_argument(
'--copy_with_identity_generator',
required=False,
help=('If true, the identity test data generator makes a '
'copy of the clean speech input file.'),
default=False)
parser.add_argument('--external_vad_paths', nargs='+', required=False,
parser.add_argument('--external_vad_paths',
nargs='+',
required=False,
help=('Paths to external VAD programs. Each must take'
'\'-i <wav file> -o <output>\' inputs'), default=[])
'\'-i <wav file> -o <output>\' inputs'),
default=[])
parser.add_argument('--external_vad_names', nargs='+', required=False,
parser.add_argument('--external_vad_names',
nargs='+',
required=False,
help=('Keys to the vad paths. Must be different and '
'as many as the paths.'), default=[])
'as many as the paths.'),
default=[])
return parser
def _ValidateArguments(args, parser):
if args.capture_input_files and args.render_input_files and (
len(args.capture_input_files) != len(args.render_input_files)):
parser.error('--render_input_files and --capture_input_files must be lists '
if args.capture_input_files and args.render_input_files and (len(
args.capture_input_files) != len(args.render_input_files)):
parser.error(
'--render_input_files and --capture_input_files must be lists '
'having the same length')
sys.exit(1)
if args.render_input_files and not args.echo_path_simulator:
parser.error('when --render_input_files is set, --echo_path_simulator is '
parser.error(
'when --render_input_files is set, --echo_path_simulator is '
'also required')
sys.exit(1)
@ -162,16 +195,15 @@ def main():
aechen_ir_database_path=args.air_db_path,
noise_tracks_path=args.additive_noise_tracks_path,
copy_with_identity=args.copy_with_identity_generator)),
evaluation_score_factory=eval_scores_factory.EvaluationScoreWorkerFactory(
evaluation_score_factory=eval_scores_factory.
EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(args.polqa_path, _POLQA_BIN_NAME),
echo_metric_tool_bin_path=args.echo_metric_tool_bin_path
),
echo_metric_tool_bin_path=args.echo_metric_tool_bin_path),
ap_wrapper=audioproc_wrapper.AudioProcWrapper(args.apm_sim_path),
evaluator=evaluation.ApmModuleEvaluator(),
external_vads=external_vad.ExternalVad.ConstructVadDict(
args.external_vad_paths, args.external_vad_names))
simulator.Run(
config_filepaths=args.config_files,
simulator.Run(config_filepaths=args.config_files,
capture_input_filepaths=args.capture_input_files,
render_input_filepaths=args.render_input_files,
echo_path_simulator_name=args.echo_path_simulator,

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Shows boxplots of given score for different values of selected
parameters. Can be used to compare scores by audioproc_f flag.
@ -37,15 +36,23 @@ def InstanceArgumentsParser():
'Shows boxplot of given score for different values of selected'
'parameters. Can be used to compare scores by audioproc_f flag')
parser.add_argument('-v', '--eval_score', required=True,
parser.add_argument('-v',
'--eval_score',
required=True,
help=('Score name for constructing boxplots'))
parser.add_argument('-n', '--config_dir', required=False,
parser.add_argument(
'-n',
'--config_dir',
required=False,
help=('path to the folder with the configuration files'),
default='apm_configs')
parser.add_argument('-z', '--params_to_plot', required=True,
nargs='+', help=('audioproc_f parameter values'
parser.add_argument('-z',
'--params_to_plot',
required=True,
nargs='+',
help=('audioproc_f parameter values'
'by which to group scores (no leading dash)'))
return parser
@ -82,7 +89,9 @@ def FilterScoresByParams(data_frame, filter_params, score_name, config_dir):
score_name]
# Exactly one of |params_to_plot| must match:
(matching_param, ) = [x for x in filter_params if '-' + x in config_json]
(matching_param, ) = [
x for x in filter_params if '-' + x in config_json
]
# Add scores for every track to the result.
for capture_name in data_cell_scores.capture:
@ -128,8 +137,7 @@ def main():
# Filter the data by `args.params_to_plot`
scores_filtered = FilterScoresByParams(scores_data_frame,
args.params_to_plot,
args.eval_score,
args.config_dir)
args.eval_score, args.config_dir)
data_list = sorted(scores_filtered.items())
data_values = [_FlattenToScoresList(x) for (_, x) in data_list]

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Export the scores computed by the apm_quality_assessment.py script into an
HTML file.
"""
@ -32,11 +31,14 @@ def _BuildOutputFilename(filename_suffix):
return 'results.html'
return 'results-{}.html'.format(filename_suffix)
def main():
# Init.
logging.basicConfig(level=logging.DEBUG) # TODO(alessio): INFO once debugged.
logging.basicConfig(
level=logging.DEBUG) # TODO(alessio): INFO once debugged.
parser = collect_data.InstanceArgumentsParser()
parser.add_argument('-f', '--filename_suffix',
parser.add_argument('-f',
'--filename_suffix',
help=('suffix of the exported file'))
parser.description = ('Exports pre-computed APM module quality assessment '
'results into HTML tables')
@ -48,8 +50,8 @@ def main():
scores_data_frame = collect_data.FindScores(src_path, args)
# Export.
output_filepath = os.path.join(args.output_dir, _BuildOutputFilename(
args.filename_suffix))
output_filepath = os.path.join(args.output_dir,
_BuildOutputFilename(args.filename_suffix))
exporter = export.HtmlExport(output_filepath)
exporter.Export(scores_data_frame)

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generate .json files with which the APM module can be tested using the
apm_quality_assessment.py script and audioproc_f as APM simulator.
"""
@ -49,8 +48,8 @@ def _GenerateDefaultOverridden(config_override):
config = config_override[config_filename]
config['-all_default'] = None
config_filepath = os.path.join(OUTPUT_PATH, 'default-{}.json'.format(
config_filename))
config_filepath = os.path.join(
OUTPUT_PATH, 'default-{}.json'.format(config_filename))
logging.debug('config file <%s> | %s', config_filepath, config)
data_access.AudioProcConfigFile.Save(config_filepath, config)
@ -61,13 +60,27 @@ def _GenerateAllDefaultButOne():
"""Disables the flags enabled by default one-by-one.
"""
config_sets = {
'no_AEC': {'-aec': 0,},
'no_AGC': {'-agc': 0,},
'no_HP_filter': {'-hpf': 0,},
'no_level_estimator': {'-le': 0,},
'no_noise_suppressor': {'-ns': 0,},
'no_transient_suppressor': {'-ts': 0,},
'no_vad': {'-vad': 0,},
'no_AEC': {
'-aec': 0,
},
'no_AGC': {
'-agc': 0,
},
'no_HP_filter': {
'-hpf': 0,
},
'no_level_estimator': {
'-le': 0,
},
'no_noise_suppressor': {
'-ns': 0,
},
'no_transient_suppressor': {
'-ts': 0,
},
'no_vad': {
'-vad': 0,
},
}
_GenerateDefaultOverridden(config_sets)
@ -76,14 +89,31 @@ def _GenerateAllDefaultPlusOne():
"""Enables the flags disabled by default one-by-one.
"""
config_sets = {
'with_AECM': {'-aec': 0, '-aecm': 1,}, # AEC and AECM are exclusive.
'with_AGC_limiter': {'-agc_limiter': 1,},
'with_AEC_delay_agnostic': {'-delay_agnostic': 1,},
'with_drift_compensation': {'-drift_compensation': 1,},
'with_residual_echo_detector': {'-ed': 1,},
'with_AEC_extended_filter': {'-extended_filter': 1,},
'with_LC': {'-lc': 1,},
'with_refined_adaptive_filter': {'-refined_adaptive_filter': 1,},
'with_AECM': {
'-aec': 0,
'-aecm': 1,
}, # AEC and AECM are exclusive.
'with_AGC_limiter': {
'-agc_limiter': 1,
},
'with_AEC_delay_agnostic': {
'-delay_agnostic': 1,
},
'with_drift_compensation': {
'-drift_compensation': 1,
},
'with_residual_echo_detector': {
'-ed': 1,
},
'with_AEC_extended_filter': {
'-extended_filter': 1,
},
'with_LC': {
'-lc': 1,
},
'with_refined_adaptive_filter': {
'-refined_adaptive_filter': 1,
},
}
_GenerateDefaultOverridden(config_sets)

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Finds the APM configuration that maximizes a provided metric by
parsing the output generated apm_quality_assessment.py.
"""
@ -20,6 +19,7 @@ import os
import quality_assessment.data_access as data_access
import quality_assessment.collect_data as collect_data
def _InstanceArgumentsParser():
"""Arguments parser factory. Extends the arguments from 'collect_data'
with a few extra for selecting what parameters to optimize for.
@ -29,23 +29,33 @@ def _InstanceArgumentsParser():
'Rudimentary optimization of a function over different parameter'
'combinations.')
parser.add_argument('-n', '--config_dir', required=False,
parser.add_argument(
'-n',
'--config_dir',
required=False,
help=('path to the folder with the configuration files'),
default='apm_configs')
parser.add_argument('-p', '--params', required=True, nargs='+',
parser.add_argument('-p',
'--params',
required=True,
nargs='+',
help=('parameters to parse from the config files in'
'config_dir'))
parser.add_argument('-z', '--params_not_to_optimize', required=False,
nargs='+', default=[],
parser.add_argument(
'-z',
'--params_not_to_optimize',
required=False,
nargs='+',
default=[],
help=('parameters from `params` not to be optimized for'))
return parser
def _ConfigurationAndScores(data_frame, params,
params_not_to_optimize, config_dir):
def _ConfigurationAndScores(data_frame, params, params_not_to_optimize,
config_dir):
"""Returns a list of all configurations and scores.
Args:
@ -74,7 +84,8 @@ def _ConfigurationAndScores(data_frame, params,
"""
results = collections.defaultdict(list)
config_names = data_frame['apm_config'].drop_duplicates().values.tolist()
score_names = data_frame['eval_score_name'].drop_duplicates().values.tolist()
score_names = data_frame['eval_score_name'].drop_duplicates(
).values.tolist()
# Normalize the scores
normalization_constants = {}
@ -105,8 +116,7 @@ def _ConfigurationAndScores(data_frame, params,
else:
result['params'][param] = config_json['-' + param]
current_param_combination = param_combination(
**config_optimize_params)
current_param_combination = param_combination(**config_optimize_params)
results[current_param_combination].append(result)
return results
@ -165,8 +175,7 @@ def main():
src_path = collect_data.ConstructSrcPath(args)
logging.debug('Src path <%s>', src_path)
scores_data_frame = collect_data.FindScores(src_path, args)
all_scores = _ConfigurationAndScores(scores_data_frame,
args.params,
all_scores = _ConfigurationAndScores(scores_data_frame, args.params,
args.params_not_to_optimize,
args.config_dir)
@ -175,5 +184,6 @@ def main():
logging.info('Optimal parameter combination: <%s>', opt_param)
logging.info('It\'s score values: <%s>', all_scores[opt_param])
if __name__ == "__main__":
main()

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the apm_quality_assessment module.
"""
@ -16,6 +15,7 @@ import mock
import apm_quality_assessment
class TestSimulationScript(unittest.TestCase):
"""Unit tests for the apm_quality_assessment module.
"""

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Extraction of annotations from audio files.
"""
@ -40,8 +39,8 @@ class AudioAnnotationsExtractor(object):
def __init__(self, value):
if (not isinstance(value, int)) or not 0 <= value <= 7:
raise exceptions.InitializationException(
'Invalid vad type: ' + value)
raise exceptions.InitializationException('Invalid vad type: ' +
value)
self._value = value
def Contains(self, vad_type):
@ -69,12 +68,11 @@ class AudioAnnotationsExtractor(object):
# VAD params.
_VAD_THRESHOLD = 1
_VAD_WEBRTC_PATH = os.path.join(os.path.dirname(
os.path.abspath(__file__)), os.pardir, os.pardir)
_VAD_WEBRTC_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir)
_VAD_WEBRTC_COMMON_AUDIO_PATH = os.path.join(_VAD_WEBRTC_PATH, 'vad')
_VAD_WEBRTC_APM_PATH = os.path.join(
_VAD_WEBRTC_PATH, 'apm_vad')
_VAD_WEBRTC_APM_PATH = os.path.join(_VAD_WEBRTC_PATH, 'apm_vad')
def __init__(self, vad_type, external_vads=None):
self._signal = None
@ -100,10 +98,9 @@ class AudioAnnotationsExtractor(object):
'The external VAD names must be unique.')
for vad in external_vads.values():
if not isinstance(vad, external_vad.ExternalVad):
raise exceptions.InitializationException(
'Invalid vad type: ' + str(type(vad)))
logging.info('External VAD used for annotation: ' +
str(vad.name))
raise exceptions.InitializationException('Invalid vad type: ' +
str(type(vad)))
logging.info('External VAD used for annotation: ' + str(vad.name))
assert os.path.exists(self._VAD_WEBRTC_COMMON_AUDIO_PATH), \
self._VAD_WEBRTC_COMMON_AUDIO_PATH
@ -132,8 +129,8 @@ class AudioAnnotationsExtractor(object):
elif vad_type == self.VadType.WEBRTC_APM:
return (self._apm_vad_probs, self._apm_vad_rms)
else:
raise exceptions.InitializationException(
'Invalid vad type: ' + vad_type)
raise exceptions.InitializationException('Invalid vad type: ' +
vad_type)
def GetVadFrameSize(self):
return self._vad_frame_size
@ -143,19 +140,21 @@ class AudioAnnotationsExtractor(object):
def Extract(self, filepath):
# Load signal.
self._signal = signal_processing.SignalProcessingUtils.LoadWav(filepath)
self._signal = signal_processing.SignalProcessingUtils.LoadWav(
filepath)
if self._signal.channels != 1:
raise NotImplementedError('Multiple-channel annotations not implemented')
raise NotImplementedError(
'Multiple-channel annotations not implemented')
# Level estimation params.
self._level_frame_size = int(self._signal.frame_rate / 1000 * (
self._LEVEL_FRAME_SIZE_MS))
self._level_frame_size = int(self._signal.frame_rate / 1000 *
(self._LEVEL_FRAME_SIZE_MS))
self._c_attack = 0.0 if self._LEVEL_ATTACK_MS == 0 else (
self._ONE_DB_REDUCTION ** (
self._LEVEL_FRAME_SIZE_MS / self._LEVEL_ATTACK_MS))
self._ONE_DB_REDUCTION**(self._LEVEL_FRAME_SIZE_MS /
self._LEVEL_ATTACK_MS))
self._c_decay = 0.0 if self._LEVEL_DECAY_MS == 0 else (
self._ONE_DB_REDUCTION ** (
self._LEVEL_FRAME_SIZE_MS / self._LEVEL_DECAY_MS))
self._ONE_DB_REDUCTION**(self._LEVEL_FRAME_SIZE_MS /
self._LEVEL_DECAY_MS))
# Compute level.
self._LevelEstimation()
@ -177,11 +176,12 @@ class AudioAnnotationsExtractor(object):
self._external_vads[extvad_name].Run(filepath)
def Save(self, output_path, annotation_name=""):
ext_kwargs = {'extvad_conf-' + ext_vad:
ext_kwargs = {
'extvad_conf-' + ext_vad:
self._external_vads[ext_vad].GetVadOutput()
for ext_vad in self._external_vads}
np.savez_compressed(
file=os.path.join(
for ext_vad in self._external_vads
}
np.savez_compressed(file=os.path.join(
output_path,
self.GetOutputFileNameTemplate().format(annotation_name)),
level=self._level,
@ -193,8 +193,7 @@ class AudioAnnotationsExtractor(object):
vad_frame_size_ms=self._vad_frame_size_ms,
vad_probs=self._apm_vad_probs,
vad_rms=self._apm_vad_rms,
**ext_kwargs
)
**ext_kwargs)
def _LevelEstimation(self):
# Read samples.
@ -204,8 +203,9 @@ class AudioAnnotationsExtractor(object):
num_samples = num_frames * self._level_frame_size
# Envelope.
self._level = np.max(np.reshape(np.abs(samples[:num_samples]), (
num_frames, self._level_frame_size)), axis=1)
self._level = np.max(np.reshape(np.abs(samples[:num_samples]),
(num_frames, self._level_frame_size)),
axis=1)
assert len(self._level) == num_frames
# Envelope smoothing.
@ -213,8 +213,8 @@ class AudioAnnotationsExtractor(object):
self._level[0] = smooth(self._level[0], 0.0, self._c_attack)
for i in range(1, num_frames):
self._level[i] = smooth(
self._level[i], self._level[i - 1], self._c_attack if (
self._level[i] > self._level[i - 1]) else self._c_decay)
self._level[i], self._level[i - 1], self._c_attack if
(self._level[i] > self._level[i - 1]) else self._c_decay)
def _RunWebRtcCommonAudioVad(self, wav_file_path, sample_rate):
self._common_audio_vad = None
@ -223,15 +223,16 @@ class AudioAnnotationsExtractor(object):
# Create temporary output path.
tmp_path = tempfile.mkdtemp()
output_file_path = os.path.join(
tmp_path, os.path.split(wav_file_path)[1] + '_vad.tmp')
tmp_path,
os.path.split(wav_file_path)[1] + '_vad.tmp')
# Call WebRTC VAD.
try:
subprocess.call([
self._VAD_WEBRTC_COMMON_AUDIO_PATH,
'-i', wav_file_path,
'-o', output_file_path
], cwd=self._VAD_WEBRTC_PATH)
self._VAD_WEBRTC_COMMON_AUDIO_PATH, '-i', wav_file_path, '-o',
output_file_path
],
cwd=self._VAD_WEBRTC_PATH)
# Read bytes.
with open(output_file_path, 'rb') as f:
@ -246,7 +247,8 @@ class AudioAnnotationsExtractor(object):
# Init VAD vector.
num_bytes = len(raw_data)
num_frames = 8 * (num_bytes - 2) - extra_bits # 8 frames for each byte.
num_frames = 8 * (num_bytes -
2) - extra_bits # 8 frames for each byte.
self._common_audio_vad = np.zeros(num_frames, np.uint8)
# Read VAD decisions.
@ -256,7 +258,8 @@ class AudioAnnotationsExtractor(object):
self._common_audio_vad[i * 8 + j] = int(byte & 1)
byte = byte >> 1
except Exception as e:
logging.error('Error while running the WebRTC VAD (' + e.message + ')')
logging.error('Error while running the WebRTC VAD (' + e.message +
')')
finally:
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
@ -265,21 +268,23 @@ class AudioAnnotationsExtractor(object):
# Create temporary output path.
tmp_path = tempfile.mkdtemp()
output_file_path_probs = os.path.join(
tmp_path, os.path.split(wav_file_path)[1] + '_vad_probs.tmp')
tmp_path,
os.path.split(wav_file_path)[1] + '_vad_probs.tmp')
output_file_path_rms = os.path.join(
tmp_path, os.path.split(wav_file_path)[1] + '_vad_rms.tmp')
tmp_path,
os.path.split(wav_file_path)[1] + '_vad_rms.tmp')
# Call WebRTC VAD.
try:
subprocess.call([
self._VAD_WEBRTC_APM_PATH,
'-i', wav_file_path,
'-o_probs', output_file_path_probs,
'-o_rms', output_file_path_rms
], cwd=self._VAD_WEBRTC_PATH)
self._VAD_WEBRTC_APM_PATH, '-i', wav_file_path, '-o_probs',
output_file_path_probs, '-o_rms', output_file_path_rms
],
cwd=self._VAD_WEBRTC_PATH)
# Parse annotations.
self._apm_vad_probs = np.fromfile(output_file_path_probs, np.double)
self._apm_vad_probs = np.fromfile(output_file_path_probs,
np.double)
self._apm_vad_rms = np.fromfile(output_file_path_rms, np.double)
assert len(self._apm_vad_rms) == len(self._apm_vad_probs)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the annotations module.
"""
@ -31,10 +30,9 @@ class TestAnnotationsExtraction(unittest.TestCase):
_CLEAN_TMP_OUTPUT = True
_DEBUG_PLOT_VAD = False
_VAD_TYPE_CLASS = annotations.AudioAnnotationsExtractor.VadType
_ALL_VAD_TYPES = (_VAD_TYPE_CLASS.ENERGY_THRESHOLD |
_VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO |
_VAD_TYPE_CLASS.WEBRTC_APM)
_ALL_VAD_TYPES = (_VAD_TYPE_CLASS.ENERGY_THRESHOLD
| _VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO
| _VAD_TYPE_CLASS.WEBRTC_APM)
def setUp(self):
"""Create temporary folder."""
@ -51,14 +49,15 @@ class TestAnnotationsExtraction(unittest.TestCase):
if self._CLEAN_TMP_OUTPUT:
shutil.rmtree(self._tmp_path)
else:
logging.warning(self.id() + ' did not clean the temporary path ' + (
self._tmp_path))
logging.warning(self.id() + ' did not clean the temporary path ' +
(self._tmp_path))
def testFrameSizes(self):
e = annotations.AudioAnnotationsExtractor(self._ALL_VAD_TYPES)
e.Extract(self._wav_file_path)
samples_to_ms = lambda n, sr: 1000 * n // sr
self.assertEqual(samples_to_ms(e.GetLevelFrameSize(), self._sample_rate),
self.assertEqual(
samples_to_ms(e.GetLevelFrameSize(), self._sample_rate),
e.GetLevelFrameSizeMs())
self.assertEqual(samples_to_ms(e.GetVadFrameSize(), self._sample_rate),
e.GetVadFrameSizeMs())
@ -70,36 +69,38 @@ class TestAnnotationsExtraction(unittest.TestCase):
e.Extract(self._wav_file_path)
if vad_type.Contains(self._VAD_TYPE_CLASS.ENERGY_THRESHOLD):
# pylint: disable=unpacking-non-sequence
vad_output = e.GetVadOutput(self._VAD_TYPE_CLASS.ENERGY_THRESHOLD)
vad_output = e.GetVadOutput(
self._VAD_TYPE_CLASS.ENERGY_THRESHOLD)
self.assertGreater(len(vad_output), 0)
self.assertGreaterEqual(float(np.sum(vad_output)) / len(vad_output),
0.95)
self.assertGreaterEqual(
float(np.sum(vad_output)) / len(vad_output), 0.95)
if vad_type.Contains(self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO):
# pylint: disable=unpacking-non-sequence
vad_output = e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO)
vad_output = e.GetVadOutput(
self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO)
self.assertGreater(len(vad_output), 0)
self.assertGreaterEqual(float(np.sum(vad_output)) / len(vad_output),
0.95)
self.assertGreaterEqual(
float(np.sum(vad_output)) / len(vad_output), 0.95)
if vad_type.Contains(self._VAD_TYPE_CLASS.WEBRTC_APM):
# pylint: disable=unpacking-non-sequence
(vad_probs, vad_rms) = e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)
(vad_probs,
vad_rms) = e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)
self.assertGreater(len(vad_probs), 0)
self.assertGreater(len(vad_rms), 0)
self.assertGreaterEqual(float(np.sum(vad_probs)) / len(vad_probs),
0.5)
self.assertGreaterEqual(float(np.sum(vad_rms)) / len(vad_rms), 20000)
self.assertGreaterEqual(
float(np.sum(vad_probs)) / len(vad_probs), 0.5)
self.assertGreaterEqual(
float(np.sum(vad_rms)) / len(vad_rms), 20000)
if self._DEBUG_PLOT_VAD:
frame_times_s = lambda num_frames, frame_size_ms: np.arange(
num_frames).astype(np.float32) * frame_size_ms / 1000.0
level = e.GetLevel()
t_level = frame_times_s(
num_frames=len(level),
t_level = frame_times_s(num_frames=len(level),
frame_size_ms=e.GetLevelFrameSizeMs())
t_vad = frame_times_s(
num_frames=len(vad_output),
t_vad = frame_times_s(num_frames=len(vad_output),
frame_size_ms=e.GetVadFrameSizeMs())
import matplotlib.pyplot as plt
plt.figure()
@ -113,7 +114,8 @@ class TestAnnotationsExtraction(unittest.TestCase):
e.Extract(self._wav_file_path)
e.Save(self._tmp_path, "fake-annotation")
data = np.load(os.path.join(
data = np.load(
os.path.join(
self._tmp_path,
e.GetOutputFileNameTemplate().format("fake-annotation")))
np.testing.assert_array_equal(e.GetLevel(), data['level'])
@ -125,9 +127,11 @@ class TestAnnotationsExtraction(unittest.TestCase):
e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_COMMON_AUDIO),
data['vad_output'])
np.testing.assert_array_equal(
e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[0], data['vad_probs'])
e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[0],
data['vad_probs'])
np.testing.assert_array_equal(
e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[1], data['vad_rms'])
e.GetVadOutput(self._VAD_TYPE_CLASS.WEBRTC_APM)[1],
data['vad_rms'])
self.assertEqual(np.uint8, data['vad_energy_output'].dtype)
self.assertEqual(np.float64, data['vad_probs'].dtype)
self.assertEqual(np.float64, data['vad_rms'].dtype)
@ -139,17 +143,16 @@ class TestAnnotationsExtraction(unittest.TestCase):
def testFakeExternalSaveLoad(self):
def FakeExternalFactory():
return external_vad.ExternalVad(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fake_external_vad.py'),
'fake'
)
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fake_external_vad.py'), 'fake')
for vad_type_value in range(0, self._ALL_VAD_TYPES + 1):
e = annotations.AudioAnnotationsExtractor(
vad_type_value,
{'fake': FakeExternalFactory()})
vad_type_value, {'fake': FakeExternalFactory()})
e.Extract(self._wav_file_path)
e.Save(self._tmp_path, annotation_name="fake-annotation")
data = np.load(os.path.join(
data = np.load(
os.path.join(
self._tmp_path,
e.GetOutputFileNameTemplate().format("fake-annotation")))
self.assertEqual(np.float32, data['extvad_conf-fake'].dtype)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Class implementing a wrapper for APM simulators.
"""
@ -22,8 +21,8 @@ class AudioProcWrapper(object):
"""Wrapper for APM simulators.
"""
DEFAULT_APM_SIMULATOR_BIN_PATH = os.path.abspath(os.path.join(
os.pardir, 'audioproc_f'))
DEFAULT_APM_SIMULATOR_BIN_PATH = os.path.abspath(
os.path.join(os.pardir, 'audioproc_f'))
OUTPUT_FILENAME = 'output.wav'
def __init__(self, simulator_bin_path):
@ -43,7 +42,10 @@ class AudioProcWrapper(object):
def output_filepath(self):
return self._output_signal_filepath
def Run(self, config_filepath, capture_input_filepath, output_path,
def Run(self,
config_filepath,
capture_input_filepath,
output_path,
render_input_filepath=None):
"""Runs APM simulator.
@ -57,8 +59,8 @@ class AudioProcWrapper(object):
reverse or far-end).
"""
# Init.
self._output_signal_filepath = os.path.join(
output_path, self.OUTPUT_FILENAME)
self._output_signal_filepath = os.path.join(output_path,
self.OUTPUT_FILENAME)
profiling_stats_filepath = os.path.join(output_path, 'profiling.stats')
# Skip if the output has already been generated.
@ -71,12 +73,14 @@ class AudioProcWrapper(object):
# Set remaining parameters.
if not os.path.exists(capture_input_filepath):
raise exceptions.FileNotFoundError('cannot find capture input file')
raise exceptions.FileNotFoundError(
'cannot find capture input file')
self._config['-i'] = capture_input_filepath
self._config['-o'] = self._output_signal_filepath
if render_input_filepath is not None:
if not os.path.exists(render_input_filepath):
raise exceptions.FileNotFoundError('cannot find render input file')
raise exceptions.FileNotFoundError(
'cannot find render input file')
self._config['-ri'] = render_input_filepath
# Build arguments list.

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Imports a filtered subset of the scores and configurations computed
by apm_quality_assessment.py into a pandas data frame.
"""
@ -27,54 +26,71 @@ from . import data_access as data_access
from . import simulation as sim
# Compiled regular expressions used to extract score descriptors.
RE_CONFIG_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixApmConfig() + r'(.+)')
RE_CAPTURE_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixCapture() + r'(.+)')
RE_RENDER_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixRender() + r'(.+)')
RE_ECHO_SIM_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixEchoSimulator() + r'(.+)')
RE_CONFIG_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixApmConfig() +
r'(.+)')
RE_CAPTURE_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixCapture() +
r'(.+)')
RE_RENDER_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixRender() + r'(.+)')
RE_ECHO_SIM_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixEchoSimulator() +
r'(.+)')
RE_TEST_DATA_GEN_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixTestDataGenerator() + r'(.+)')
RE_TEST_DATA_GEN_PARAMS = re.compile(
sim.ApmModuleSimulator.GetPrefixTestDataGeneratorParameters() + r'(.+)')
RE_SCORE_NAME = re.compile(
sim.ApmModuleSimulator.GetPrefixScore() + r'(.+)(\..+)')
RE_SCORE_NAME = re.compile(sim.ApmModuleSimulator.GetPrefixScore() +
r'(.+)(\..+)')
def InstanceArgumentsParser():
"""Arguments parser factory.
"""
parser = argparse.ArgumentParser(description=(
'Override this description in a user script by changing'
parser = argparse.ArgumentParser(
description=('Override this description in a user script by changing'
' `parser.description` of the returned parser.'))
parser.add_argument('-o', '--output_dir', required=True,
parser.add_argument('-o',
'--output_dir',
required=True,
help=('the same base path used with the '
'apm_quality_assessment tool'))
parser.add_argument('-c', '--config_names', type=re.compile,
parser.add_argument(
'-c',
'--config_names',
type=re.compile,
help=('regular expression to filter the APM configuration'
' names'))
parser.add_argument('-i', '--capture_names', type=re.compile,
parser.add_argument(
'-i',
'--capture_names',
type=re.compile,
help=('regular expression to filter the capture signal '
'names'))
parser.add_argument('-r', '--render_names', type=re.compile,
parser.add_argument('-r',
'--render_names',
type=re.compile,
help=('regular expression to filter the render signal '
'names'))
parser.add_argument('-e', '--echo_simulator_names', type=re.compile,
parser.add_argument(
'-e',
'--echo_simulator_names',
type=re.compile,
help=('regular expression to filter the echo simulator '
'names'))
parser.add_argument('-t', '--test_data_generators', type=re.compile,
parser.add_argument('-t',
'--test_data_generators',
type=re.compile,
help=('regular expression to filter the test data '
'generator names'))
parser.add_argument('-s', '--eval_scores', type=re.compile,
parser.add_argument(
'-s',
'--eval_scores',
type=re.compile,
help=('regular expression to filter the evaluation score '
'names'))
@ -93,8 +109,8 @@ def _GetScoreDescriptors(score_filepath):
test data generator parameters as string, evaluation score name).
"""
fields = score_filepath.split(os.sep)[-7:]
extract_name = lambda index, reg_expr: (
reg_expr.match(fields[index]).groups(0)[0])
extract_name = lambda index, reg_expr: (reg_expr.match(fields[index]).
groups(0)[0])
return (
extract_name(0, RE_CONFIG_NAME),
extract_name(1, RE_CAPTURE_NAME),
@ -158,31 +174,17 @@ def FindScores(src_path, args):
scores = []
for score_filepath in glob.iglob(src_path):
# Extract score descriptor fields from the path.
(config_name,
capture_name,
render_name,
echo_simulator_name,
test_data_gen_name,
test_data_gen_params,
(config_name, capture_name, render_name, echo_simulator_name,
test_data_gen_name, test_data_gen_params,
score_name) = _GetScoreDescriptors(score_filepath)
# Ignore the score if required.
if _ExcludeScore(
config_name,
capture_name,
render_name,
echo_simulator_name,
test_data_gen_name,
score_name,
if _ExcludeScore(config_name, capture_name, render_name,
echo_simulator_name, test_data_gen_name, score_name,
args):
logging.info(
'ignored score: %s %s %s %s %s %s',
config_name,
capture_name,
render_name,
echo_simulator_name,
test_data_gen_name,
score_name)
logging.info('ignored score: %s %s %s %s %s %s', config_name,
capture_name, render_name, echo_simulator_name,
test_data_gen_name, score_name)
continue
# Read metadata and score.
@ -209,8 +211,7 @@ def FindScores(src_path, args):
score,
))
return pd.DataFrame(
data=scores,
return pd.DataFrame(data=scores,
columns=(
'clean_capture_input_filepath',
'echo_free_capture_filepath',

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Data access utility functions and classes.
"""
@ -68,8 +67,8 @@ class Metadata(object):
Returns:
Tuple with the paths to the input and output audio tracks.
"""
metadata_filepath = os.path.join(
metadata_path, cls._AUDIO_TEST_DATA_FILENAME)
metadata_filepath = os.path.join(metadata_path,
cls._AUDIO_TEST_DATA_FILENAME)
with open(metadata_filepath) as f:
return json.load(f)
@ -83,7 +82,8 @@ class Metadata(object):
Keyword Args:
filepaths: collection of audio track file paths to save.
"""
output_filepath = os.path.join(output_path, cls._AUDIO_TEST_DATA_FILENAME)
output_filepath = os.path.join(output_path,
cls._AUDIO_TEST_DATA_FILENAME)
with open(output_filepath, 'w') as f:
json.dump(filepaths, f)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Echo path simulation module.
"""
@ -94,8 +93,8 @@ class LinearEchoPathSimulator(EchoPathSimulator):
# Form the file name with a hash of the impulse response.
impulse_response_hash = hashlib.sha256(
str(self._impulse_response).encode('utf-8', 'ignore')).hexdigest()
echo_filepath = os.path.join(output_path, 'linear_echo_{}.wav'.format(
impulse_response_hash))
echo_filepath = os.path.join(
output_path, 'linear_echo_{}.wav'.format(impulse_response_hash))
# If the simulated echo audio track file does not exists, create it.
if not os.path.exists(echo_filepath):
@ -103,7 +102,8 @@ class LinearEchoPathSimulator(EchoPathSimulator):
self._render_input_filepath)
echo = signal_processing.SignalProcessingUtils.ApplyImpulseResponse(
render, self._impulse_response)
signal_processing.SignalProcessingUtils.SaveWav(echo_filepath, echo)
signal_processing.SignalProcessingUtils.SaveWav(
echo_filepath, echo)
return echo_filepath
@ -129,8 +129,8 @@ class RecordedEchoPathSimulator(EchoPathSimulator):
"""Uses recorded echo path."""
path, file_name_ext = os.path.split(self._render_input_filepath)
file_name, file_ext = os.path.splitext(file_name_ext)
echo_filepath = os.path.join(path, '{}{}{}'.format(
file_name, self._FILE_NAME_SUFFIX, file_ext))
echo_filepath = os.path.join(
path, '{}{}{}'.format(file_name, self._FILE_NAME_SUFFIX, file_ext))
assert os.path.exists(echo_filepath), (
'cannot find the echo audio track file {}'.format(echo_filepath))
return echo_filepath

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Echo path simulation factory module.
"""
@ -36,7 +35,8 @@ class EchoPathSimulatorFactory(object):
An EchoPathSimulator instance.
"""
assert render_input_filepath is not None or (
echo_path_simulator_class == echo_path_simulation.NoEchoPathSimulator)
echo_path_simulator_class ==
echo_path_simulation.NoEchoPathSimulator)
if echo_path_simulator_class == echo_path_simulation.NoEchoPathSimulator:
return echo_path_simulation.NoEchoPathSimulator()

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the echo path simulation module.
"""
@ -35,14 +34,15 @@ class TestEchoPathSimulators(unittest.TestCase):
silence)
self._audio_track_num_samples = (
signal_processing.SignalProcessingUtils.CountSamples(white_noise))
self._audio_track_filepath = os.path.join(self._tmp_path, 'white_noise.wav')
self._audio_track_filepath = os.path.join(self._tmp_path,
'white_noise.wav')
signal_processing.SignalProcessingUtils.SaveWav(
self._audio_track_filepath, white_noise)
# Make a copy the white noise audio track file; it will be used by
# echo_path_simulation.RecordedEchoPathSimulator.
shutil.copy(self._audio_track_filepath, os.path.join(
self._tmp_path, 'white_noise_echo.wav'))
shutil.copy(self._audio_track_filepath,
os.path.join(self._tmp_path, 'white_noise_echo.wav'))
def tearDown(self):
"""Recursively deletes temporary folders."""
@ -75,7 +75,8 @@ class TestEchoPathSimulators(unittest.TestCase):
# Check that the echo audio track file exists and its length is greater or
# equal to that of the render audio track.
self.assertTrue(os.path.exists(echo_filepath))
echo = signal_processing.SignalProcessingUtils.LoadWav(echo_filepath)
echo = signal_processing.SignalProcessingUtils.LoadWav(
echo_filepath)
self.assertGreaterEqual(
signal_processing.SignalProcessingUtils.CountSamples(echo),
self._audio_track_num_samples)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Evaluation score abstract class and implementations.
"""
@ -128,7 +127,6 @@ class EvaluationScore(object):
self._tested_signal = signal_processing.SignalProcessingUtils.LoadWav(
self._tested_signal_filepath)
def _LoadScore(self):
return data_access.ScoreFile.Load(self._output_filepath)
@ -182,12 +180,13 @@ class MeanAudioLevelScore(EvaluationScore):
self._LoadTestedSignal()
dbfs_diffs_sum = 0.0
seconds = min(len(self._tested_signal), len(self._reference_signal)) // 1000
seconds = min(len(self._tested_signal), len(
self._reference_signal)) // 1000
for t in range(seconds):
t0 = t * seconds
t1 = t0 + seconds
dbfs_diffs_sum += (
self._tested_signal[t0:t1].dBFS - self._reference_signal[t0:t1].dBFS)
dbfs_diffs_sum += (self._tested_signal[t0:t1].dBFS -
self._reference_signal[t0:t1].dBFS)
self._score = dbfs_diffs_sum / float(seconds)
self._SaveScore()
@ -218,20 +217,21 @@ class EchoMetric(EvaluationScore):
self._echo_detector_bin_filepath)
def _Run(self, output_path):
echo_detector_out_filepath = os.path.join(output_path, 'echo_detector.out')
echo_detector_out_filepath = os.path.join(output_path,
'echo_detector.out')
if os.path.exists(echo_detector_out_filepath):
os.unlink(echo_detector_out_filepath)
logging.debug("Render signal filepath: %s", self._render_signal_filepath)
logging.debug("Render signal filepath: %s",
self._render_signal_filepath)
if not os.path.exists(self._render_signal_filepath):
logging.error("Render input required for evaluating the echo metric.")
logging.error(
"Render input required for evaluating the echo metric.")
args = [
self._echo_detector_bin_filepath,
'--output_file', echo_detector_out_filepath,
'--',
'-i', self._tested_signal_filepath,
'-ri', self._render_signal_filepath
self._echo_detector_bin_filepath, '--output_file',
echo_detector_out_filepath, '--', '-i',
self._tested_signal_filepath, '-ri', self._render_signal_filepath
]
logging.debug(' '.join(args))
subprocess.call(args, cwd=self._echo_detector_bin_path)
@ -254,6 +254,7 @@ class EchoMetric(EvaluationScore):
with open(echo_metric_file_path) as f:
return float(f.read())
@EvaluationScore.RegisterClass
class PolqaScore(EvaluationScore):
"""POLQA score.
@ -285,11 +286,18 @@ class PolqaScore(EvaluationScore):
os.unlink(polqa_out_filepath)
args = [
self._polqa_bin_filepath, '-t', '-q', '-Overwrite',
'-Ref', self._reference_signal_filepath,
'-Test', self._tested_signal_filepath,
'-LC', 'NB',
'-Out', polqa_out_filepath,
self._polqa_bin_filepath,
'-t',
'-q',
'-Overwrite',
'-Ref',
self._reference_signal_filepath,
'-Test',
self._tested_signal_filepath,
'-LC',
'NB',
'-Out',
polqa_out_filepath,
]
logging.debug(' '.join(args))
subprocess.call(args, cwd=self._polqa_tool_path)
@ -328,7 +336,10 @@ class PolqaScore(EvaluationScore):
# Build and return a dictionary with field names (header) as keys and the
# corresponding field values as values.
return {data[0][index]: data[1][index] for index in range(number_of_fields)}
return {
data[0][index]: data[1][index]
for index in range(number_of_fields)
}
@EvaluationScore.RegisterClass
@ -371,15 +382,17 @@ class TotalHarmonicDistorsionScore(EvaluationScore):
b_terms = []
n = 1
while f0_freq * n < max_freq:
x_n = np.sum(samples * np.sin(2.0 * np.pi * n * f0_freq * t)) * scaling
y_n = np.sum(samples * np.cos(2.0 * np.pi * n * f0_freq * t)) * scaling
x_n = np.sum(
samples * np.sin(2.0 * np.pi * n * f0_freq * t)) * scaling
y_n = np.sum(
samples * np.cos(2.0 * np.pi * n * f0_freq * t)) * scaling
b_terms.append(np.sqrt(x_n**2 + y_n**2))
n += 1
output_without_fundamental = samples - b_terms[0] * np.sin(
2.0 * np.pi * f0_freq * t)
distortion_and_noise = np.sqrt(np.sum(
output_without_fundamental**2) * np.pi * scaling)
distortion_and_noise = np.sqrt(
np.sum(output_without_fundamental**2) * np.pi * scaling)
# TODO(alessiob): Fix or remove if not needed.
# thd = np.sqrt(np.sum(b_terms[1:]**2)) / b_terms[0]
@ -398,14 +411,17 @@ class TotalHarmonicDistorsionScore(EvaluationScore):
raise exceptions.EvaluationScoreException(
'The THD score requires a pure tone as input signal')
self._input_frequency = self._input_signal_metadata['frequency']
if self._input_signal_metadata['test_data_gen_name'] != 'identity' or (
self._input_signal_metadata['test_data_gen_config'] != 'default'):
if self._input_signal_metadata[
'test_data_gen_name'] != 'identity' or (
self._input_signal_metadata['test_data_gen_config'] !=
'default'):
raise exceptions.EvaluationScoreException(
'The THD score cannot be used with any test data generator other '
'than "identity"')
except TypeError:
raise exceptions.EvaluationScoreException(
'The THD score requires an input signal with associated metadata')
'The THD score requires an input signal with associated metadata'
)
except KeyError:
raise exceptions.EvaluationScoreException(
'Invalid input signal metadata to compute the THD score')

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""EvaluationScore factory class.
"""
@ -41,15 +40,16 @@ class EvaluationScoreWorkerFactory(object):
"""
if self._score_filename_prefix is None:
raise exceptions.InitializationException(
'The score file name prefix for evaluation score workers is not set')
logging.debug(
'factory producing a %s evaluation score', evaluation_score_class)
'The score file name prefix for evaluation score workers is not set'
)
logging.debug('factory producing a %s evaluation score',
evaluation_score_class)
if evaluation_score_class == eval_scores.PolqaScore:
return eval_scores.PolqaScore(
self._score_filename_prefix, self._polqa_tool_bin_path)
return eval_scores.PolqaScore(self._score_filename_prefix,
self._polqa_tool_bin_path)
elif evaluation_score_class == eval_scores.EchoMetric:
return eval_scores.EchoMetric(
self._score_filename_prefix, self._echo_metric_tool_bin_path)
return eval_scores.EchoMetric(self._score_filename_prefix,
self._echo_metric_tool_bin_path)
else:
return evaluation_score_class(self._score_filename_prefix)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the eval_scores module.
"""
@ -32,10 +31,10 @@ class TestEvalScores(unittest.TestCase):
# Create fake reference and tested (i.e., APM output) audio track files.
silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
fake_reference_signal = (
signal_processing.SignalProcessingUtils.GenerateWhiteNoise(silence))
fake_tested_signal = (
signal_processing.SignalProcessingUtils.GenerateWhiteNoise(silence))
fake_reference_signal = (signal_processing.SignalProcessingUtils.
GenerateWhiteNoise(silence))
fake_tested_signal = (signal_processing.SignalProcessingUtils.
GenerateWhiteNoise(silence))
# Save fake audio tracks.
self._fake_reference_signal_filepath = os.path.join(
@ -68,8 +67,7 @@ class TestEvalScores(unittest.TestCase):
eval_scores_factory.EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fake_polqa'),
echo_metric_tool_bin_path=None
))
echo_metric_tool_bin_path=None))
eval_score_workers_factory.SetScoreFilenamePrefix('scores-')
# Try each registered evaluation score worker.
@ -89,7 +87,8 @@ class TestEvalScores(unittest.TestCase):
eval_score_worker.Run(self._output_path)
# Check output.
score = data_access.ScoreFile.Load(eval_score_worker.output_filepath)
score = data_access.ScoreFile.Load(
eval_score_worker.output_filepath)
self.assertTrue(isinstance(score, float))
def testTotalHarmonicDistorsionScore(self):
@ -97,10 +96,14 @@ class TestEvalScores(unittest.TestCase):
pure_tone_freq = 5000.0
eval_score_worker = eval_scores.TotalHarmonicDistorsionScore('scores-')
eval_score_worker.SetInputSignalMetadata({
'signal': 'pure_tone',
'frequency': pure_tone_freq,
'test_data_gen_name': 'identity',
'test_data_gen_config': 'default',
'signal':
'pure_tone',
'frequency':
pure_tone_freq,
'test_data_gen_name':
'identity',
'test_data_gen_config':
'default',
})
template = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
@ -115,7 +118,8 @@ class TestEvalScores(unittest.TestCase):
# Compute scores for increasingly distorted pure tone signals.
scores = [None, None, None]
for index, tested_signal in enumerate([pure_tone, noisy_tone, white_noise]):
for index, tested_signal in enumerate(
[pure_tone, noisy_tone, white_noise]):
# Save signal.
tmp_filepath = os.path.join(self._output_path, 'tmp_thd.wav')
signal_processing.SignalProcessingUtils.SaveWav(

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Evaluator of the APM module.
"""
@ -41,7 +40,8 @@ class ApmModuleEvaluator(object):
scores = {}
for evaluation_score_worker in evaluation_score_workers:
logging.info(' computing <%s> score', evaluation_score_worker.NAME)
logging.info(' computing <%s> score',
evaluation_score_worker.NAME)
evaluation_score_worker.SetInputSignalMetadata(apm_input_metadata)
evaluation_score_worker.SetReferenceSignalFilepath(
reference_input_filepath)
@ -51,6 +51,7 @@ class ApmModuleEvaluator(object):
render_input_filepath)
evaluation_score_worker.Run(output_path)
scores[evaluation_score_worker.NAME] = evaluation_score_worker.score
scores[
evaluation_score_worker.NAME] = evaluation_score_worker.score
return scores

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Exception classes.
"""

View File

@ -16,7 +16,8 @@ import sys
try:
import csscompressor
except ImportError:
logging.critical('Cannot import the third-party Python package csscompressor')
logging.critical(
'Cannot import the third-party Python package csscompressor')
sys.exit(1)
try:
@ -49,7 +50,8 @@ class HtmlExport(object):
scores_data_frame: DataFrame instance.
"""
self._scores_data_frame = scores_data_frame
html = ['<html>',
html = [
'<html>',
self._BuildHeader(),
('<script type="text/javascript">'
'(function () {'
@ -57,11 +59,9 @@ class HtmlExport(object):
'var inspector = new AudioInspector();'
'});'
'})();'
'</script>'),
'<body>',
self._BuildBody(),
'</body>',
'</html>']
'</script>'), '<body>',
self._BuildBody(), '</body>', '</html>'
]
self._Save(self._output_filepath, self._NEW_LINE.join(html))
def _BuildHeader(self):
@ -78,9 +78,11 @@ class HtmlExport(object):
# Add Material Design hosted libs.
html.append('<link rel="stylesheet" href="http://fonts.googleapis.com/'
'css?family=Roboto:300,400,500,700" type="text/css">')
html.append('<link rel="stylesheet" href="https://fonts.googleapis.com/'
html.append(
'<link rel="stylesheet" href="https://fonts.googleapis.com/'
'icon?family=Material+Icons">')
html.append('<link rel="stylesheet" href="https://code.getmdl.io/1.3.0/'
html.append(
'<link rel="stylesheet" href="https://code.getmdl.io/1.3.0/'
'material.indigo-pink.min.css">')
html.append('<script defer src="https://code.getmdl.io/1.3.0/'
'material.min.js"></script>')
@ -88,12 +90,14 @@ class HtmlExport(object):
# Embed custom JavaScript and CSS files.
html.append('<script>')
with open(self._JS_FILEPATH) as f:
html.append(jsmin.jsmin(f.read()) if self._JS_MINIFIED else (
html.append(
jsmin.jsmin(f.read()) if self._JS_MINIFIED else (
f.read().rstrip()))
html.append('</script>')
html.append('<style>')
with open(self._CSS_FILEPATH) as f:
html.append(csscompressor.compress(f.read()) if self._CSS_MINIFIED else (
html.append(
csscompressor.compress(f.read()) if self._CSS_MINIFIED else (
f.read().rstrip()))
html.append('</style>')
@ -103,8 +107,8 @@ class HtmlExport(object):
def _BuildBody(self):
"""Builds the content of the <body> section."""
score_names = self._scores_data_frame['eval_score_name'].drop_duplicates(
).values.tolist()
score_names = self._scores_data_frame[
'eval_score_name'].drop_duplicates().values.tolist()
html = [
('<div class="mdl-layout mdl-js-layout mdl-layout--fixed-header '
@ -127,7 +131,8 @@ class HtmlExport(object):
html.append('</div>')
html.append('</header>')
html.append('<main class="mdl-layout__content" style="overflow-x: auto;">')
html.append(
'<main class="mdl-layout__content" style="overflow-x: auto;">')
# Tabs content.
for tab_index, score_name in enumerate(score_names):
@ -135,7 +140,8 @@ class HtmlExport(object):
'id="score-tab-{}">'.format(
' is-active' if is_active else '', tab_index))
html.append('<div class="page-content">')
html.append(self._BuildScoreTab(score_name, ('s{}'.format(tab_index),)))
html.append(
self._BuildScoreTab(score_name, ('s{}'.format(tab_index), )))
html.append('</div>')
html.append('</section>')
@ -158,8 +164,9 @@ class HtmlExport(object):
scores = self._scores_data_frame[
self._scores_data_frame.eval_score_name == score_name]
apm_configs = sorted(self._FindUniqueTuples(scores, ['apm_config']))
test_data_gen_configs = sorted(self._FindUniqueTuples(
scores, ['test_data_gen', 'test_data_gen_params']))
test_data_gen_configs = sorted(
self._FindUniqueTuples(scores,
['test_data_gen', 'test_data_gen_params']))
html = [
'<div class="mdl-grid">',
@ -173,7 +180,8 @@ class HtmlExport(object):
html.append('<thead><tr><th>APM config / Test data generator</th>')
for test_data_gen_info in test_data_gen_configs:
html.append('<th>{} {}</th>'.format(
self._FormatName(test_data_gen_info[0]), test_data_gen_info[1]))
self._FormatName(test_data_gen_info[0]),
test_data_gen_info[1]))
html.append('</tr></thead>')
# Body.
@ -185,17 +193,22 @@ class HtmlExport(object):
score_name, apm_config[0], test_data_gen_info[0],
test_data_gen_info[1])
html.append(
'<td onclick="openScoreStatsInspector(\'{}\')">{}</td>'.format(
dialog_id, self._BuildScoreTableCell(
score_name, test_data_gen_info[0], test_data_gen_info[1],
'<td onclick="openScoreStatsInspector(\'{}\')">{}</td>'.
format(
dialog_id,
self._BuildScoreTableCell(score_name,
test_data_gen_info[0],
test_data_gen_info[1],
apm_config[0])))
html.append('</tr>')
html.append('</tbody>')
html.append('</table></div><div class="mdl-layout-spacer"></div></div>')
html.append(
'</table></div><div class="mdl-layout-spacer"></div></div>')
html.append(self._BuildScoreStatsInspectorDialogs(
score_name, apm_configs, test_data_gen_configs,
html.append(
self._BuildScoreStatsInspectorDialogs(score_name, apm_configs,
test_data_gen_configs,
anchor_data))
return self._NEW_LINE.join(html)
@ -203,19 +216,21 @@ class HtmlExport(object):
def _BuildScoreTableCell(self, score_name, test_data_gen,
test_data_gen_params, apm_config):
"""Builds the content of a table cell for a score table."""
scores = self._SliceDataForScoreTableCell(
score_name, apm_config, test_data_gen, test_data_gen_params)
scores = self._SliceDataForScoreTableCell(score_name, apm_config,
test_data_gen,
test_data_gen_params)
stats = self._ComputeScoreStats(scores)
html = []
items_id_prefix = (
score_name + test_data_gen + test_data_gen_params + apm_config)
items_id_prefix = (score_name + test_data_gen + test_data_gen_params +
apm_config)
if stats['count'] == 1:
# Show the only available score.
item_id = hashlib.md5(items_id_prefix.encode('utf-8')).hexdigest()
html.append('<div id="single-value-{0}">{1:f}</div>'.format(
item_id, scores['score'].mean()))
html.append('<div class="mdl-tooltip" data-mdl-for="single-value-{}">{}'
html.append(
'<div class="mdl-tooltip" data-mdl-for="single-value-{}">{}'
'</div>'.format(item_id, 'single value'))
else:
# Show stats.
@ -224,32 +239,35 @@ class HtmlExport(object):
(items_id_prefix + stat_name).encode('utf-8')).hexdigest()
html.append('<div id="stats-{0}">{1:f}</div>'.format(
item_id, stats[stat_name]))
html.append('<div class="mdl-tooltip" data-mdl-for="stats-{}">{}'
html.append(
'<div class="mdl-tooltip" data-mdl-for="stats-{}">{}'
'</div>'.format(item_id, stat_name))
return self._NEW_LINE.join(html)
def _BuildScoreStatsInspectorDialogs(
self, score_name, apm_configs, test_data_gen_configs, anchor_data):
def _BuildScoreStatsInspectorDialogs(self, score_name, apm_configs,
test_data_gen_configs, anchor_data):
"""Builds a set of score stats inspector dialogs."""
html = []
for apm_config in apm_configs:
for test_data_gen_info in test_data_gen_configs:
dialog_id = self._ScoreStatsInspectorDialogId(
score_name, apm_config[0],
test_data_gen_info[0], test_data_gen_info[1])
score_name, apm_config[0], test_data_gen_info[0],
test_data_gen_info[1])
html.append('<dialog class="mdl-dialog" id="{}" '
'style="width: 40%;">'.format(dialog_id))
# Content.
html.append('<div class="mdl-dialog__content">')
html.append('<h6><strong>APM config preset</strong>: {}<br/>'
'<strong>Test data generator</strong>: {} ({})</h6>'.format(
self._FormatName(apm_config[0]),
html.append(
'<h6><strong>APM config preset</strong>: {}<br/>'
'<strong>Test data generator</strong>: {} ({})</h6>'.
format(self._FormatName(apm_config[0]),
self._FormatName(test_data_gen_info[0]),
test_data_gen_info[1]))
html.append(self._BuildScoreStatsInspectorDialog(
html.append(
self._BuildScoreStatsInspectorDialog(
score_name, apm_config[0], test_data_gen_info[0],
test_data_gen_info[1], anchor_data + (dialog_id, )))
html.append('</div>')
@ -265,18 +283,22 @@ class HtmlExport(object):
return self._NEW_LINE.join(html)
def _BuildScoreStatsInspectorDialog(
self, score_name, apm_config, test_data_gen, test_data_gen_params,
def _BuildScoreStatsInspectorDialog(self, score_name, apm_config,
test_data_gen, test_data_gen_params,
anchor_data):
"""Builds one score stats inspector dialog."""
scores = self._SliceDataForScoreTableCell(
score_name, apm_config, test_data_gen, test_data_gen_params)
scores = self._SliceDataForScoreTableCell(score_name, apm_config,
test_data_gen,
test_data_gen_params)
capture_render_pairs = sorted(self._FindUniqueTuples(
scores, ['capture', 'render']))
echo_simulators = sorted(self._FindUniqueTuples(scores, ['echo_simulator']))
capture_render_pairs = sorted(
self._FindUniqueTuples(scores, ['capture', 'render']))
echo_simulators = sorted(
self._FindUniqueTuples(scores, ['echo_simulator']))
html = ['<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp">']
html = [
'<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp">'
]
# Header.
html.append('<thead><tr><th>Capture-Render / Echo simulator</th>')
@ -294,7 +316,8 @@ class HtmlExport(object):
scores, capture, render, echo_simulator[0])
cell_class = 'r{}c{}'.format(row, col)
html.append('<td class="single-score-cell {}">{}</td>'.format(
cell_class, self._BuildScoreStatsInspectorTableCell(
cell_class,
self._BuildScoreStatsInspectorTableCell(
score_tuple, anchor_data + (cell_class, ))))
html.append('</tr>')
html.append('</tbody>')
@ -318,27 +341,28 @@ class HtmlExport(object):
# Add all the available file paths as hidden data.
for field_name in score_tuple.keys():
if field_name.endswith('_filepath'):
html.append('<input type="hidden" name="{}" value="{}">'.format(
html.append(
'<input type="hidden" name="{}" value="{}">'.format(
field_name, score_tuple[field_name]))
return self._NEW_LINE.join(html)
def _SliceDataForScoreTableCell(
self, score_name, apm_config, test_data_gen, test_data_gen_params):
def _SliceDataForScoreTableCell(self, score_name, apm_config,
test_data_gen, test_data_gen_params):
"""Slices |self._scores_data_frame| to extract the data for a tab."""
masks = []
masks.append(self._scores_data_frame.eval_score_name == score_name)
masks.append(self._scores_data_frame.apm_config == apm_config)
masks.append(self._scores_data_frame.test_data_gen == test_data_gen)
masks.append(
self._scores_data_frame.test_data_gen_params == test_data_gen_params)
masks.append(self._scores_data_frame.test_data_gen_params ==
test_data_gen_params)
mask = functools.reduce((lambda i1, i2: i1 & i2), masks)
del masks
return self._scores_data_frame[mask]
@classmethod
def _SliceDataForScoreStatsTableCell(
cls, scores, capture, render, echo_simulator):
def _SliceDataForScoreStatsTableCell(cls, scores, capture, render,
echo_simulator):
"""Slices |scores| to extract the data for a tab."""
masks = []
@ -370,8 +394,8 @@ class HtmlExport(object):
}
@classmethod
def _ScoreStatsInspectorDialogId(cls, score_name, apm_config, test_data_gen,
test_data_gen_params):
def _ScoreStatsInspectorDialogId(cls, score_name, apm_config,
test_data_gen, test_data_gen_params):
"""Assigns a unique name to a dialog."""
return 'score-stats-dialog-' + hashlib.md5(
'score-stats-inspector-{}-{}-{}-{}'.format(

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the export module.
"""
@ -46,11 +45,12 @@ class TestExport(unittest.TestCase):
evaluation_score_factory=(
eval_scores_factory.EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fake_polqa'),
echo_metric_tool_bin_path=None
)),
os.path.dirname(os.path.abspath(__file__)),
'fake_polqa'),
echo_metric_tool_bin_path=None)),
ap_wrapper=audioproc_wrapper.AudioProcWrapper(
audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH),
audioproc_wrapper.AudioProcWrapper.
DEFAULT_APM_SIMULATOR_BIN_PATH),
evaluator=evaluation.ApmModuleEvaluator())
simulator.Run(
config_filepaths=['apm_configs/default.json'],
@ -73,8 +73,8 @@ class TestExport(unittest.TestCase):
if self._CLEAN_TMP_OUTPUT:
shutil.rmtree(self._tmp_path)
else:
logging.warning(self.id() + ' did not clean the temporary path ' + (
self._tmp_path))
logging.warning(self.id() + ' did not clean the temporary path ' +
(self._tmp_path))
def testCreateHtmlReport(self):
fn_out = os.path.join(self._tmp_path, 'results.html')

View File

@ -23,8 +23,8 @@ except ImportError:
from . import signal_processing
class ExternalVad(object):
class ExternalVad(object):
def __init__(self, path_to_binary, name):
"""Args:
path_to_binary: path to binary that accepts '-i <wav>', '-o
@ -35,12 +35,12 @@ class ExternalVad(object):
"""
self._path_to_binary = path_to_binary
self.name = name
assert os.path.exists(self._path_to_binary), (
self._path_to_binary)
assert os.path.exists(self._path_to_binary), (self._path_to_binary)
self._vad_output = None
def Run(self, wav_file_path):
_signal = signal_processing.SignalProcessingUtils.LoadWav(wav_file_path)
_signal = signal_processing.SignalProcessingUtils.LoadWav(
wav_file_path)
if _signal.channels != 1:
raise NotImplementedError('Multiple-channel'
' annotations not implemented')
@ -50,17 +50,15 @@ class ExternalVad(object):
tmp_path = tempfile.mkdtemp()
try:
output_file_path = os.path.join(
tmp_path, self.name + '_vad.tmp')
output_file_path = os.path.join(tmp_path, self.name + '_vad.tmp')
subprocess.call([
self._path_to_binary,
'-i', wav_file_path,
'-o', output_file_path
self._path_to_binary, '-i', wav_file_path, '-o',
output_file_path
])
self._vad_output = np.fromfile(output_file_path, np.float32)
except Exception as e:
logging.error('Error while running the ' + self.name +
' VAD (' + e.message + ')')
logging.error('Error while running the ' + self.name + ' VAD (' +
e.message + ')')
finally:
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)

View File

@ -9,6 +9,7 @@
import argparse
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', required=True)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Input mixer module.
"""
@ -67,15 +66,18 @@ class ApmInputMixer(object):
echo_file_name, _ = os.path.splitext(os.path.split(echo_filepath)[1])
capture_input_file_name, _ = os.path.splitext(
os.path.split(capture_input_filepath)[1])
mix_filepath = os.path.join(output_path, 'mix_capture_{}_{}.wav'.format(
capture_input_file_name, echo_file_name))
mix_filepath = os.path.join(
output_path,
'mix_capture_{}_{}.wav'.format(capture_input_file_name,
echo_file_name))
# Create the mix if not done yet.
mix = None
if not os.path.exists(mix_filepath):
echo_free_capture = signal_processing.SignalProcessingUtils.LoadWav(
capture_input_filepath)
echo = signal_processing.SignalProcessingUtils.LoadWav(echo_filepath)
echo = signal_processing.SignalProcessingUtils.LoadWav(
echo_filepath)
if signal_processing.SignalProcessingUtils.CountSamples(echo) < (
signal_processing.SignalProcessingUtils.CountSamples(

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the input mixer module.
"""
@ -46,10 +45,11 @@ class TestApmInputMixer(unittest.TestCase):
# Create audio track files.
self._audio_tracks = {}
for filename, peak_power, duration in zip(
self._FILENAMES, self._MAX_PEAK_POWER_LEVELS, self._DURATIONS):
audio_track_filepath = os.path.join(self._tmp_path, '{}.wav'.format(
filename))
for filename, peak_power, duration in zip(self._FILENAMES,
self._MAX_PEAK_POWER_LEVELS,
self._DURATIONS):
audio_track_filepath = os.path.join(self._tmp_path,
'{}.wav'.format(filename))
# Create a pure tone with the target peak power level.
template = signal_processing.SignalProcessingUtils.GenerateSilence(
@ -62,9 +62,10 @@ class TestApmInputMixer(unittest.TestCase):
signal_processing.SignalProcessingUtils.SaveWav(
audio_track_filepath, signal)
self._audio_tracks[filename] = {
'filepath': audio_track_filepath,
'num_samples': signal_processing.SignalProcessingUtils.CountSamples(
signal)
'filepath':
audio_track_filepath,
'num_samples':
signal_processing.SignalProcessingUtils.CountSamples(signal)
}
def tearDown(self):
@ -74,21 +75,20 @@ class TestApmInputMixer(unittest.TestCase):
def testCheckMixSameDuration(self):
"""Checks the duration when mixing capture and echo with same duration."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(self._audio_tracks['capture']['num_samples'],
self.assertEqual(
self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testRejectShorterEcho(self):
"""Rejects echo signals that are shorter than the capture signal."""
try:
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['shorter']['filepath'])
self.fail('no exception raised')
except exceptions.InputMixerException:
@ -97,26 +97,24 @@ class TestApmInputMixer(unittest.TestCase):
def testCheckMixDurationWithLongerEcho(self):
"""Checks the duration when mixing an echo longer than the capture."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['longer']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(self._audio_tracks['capture']['num_samples'],
self.assertEqual(
self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testCheckOutputFileNamesConflict(self):
"""Checks that different echo files lead to different output file names."""
mix1_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix1_filepath))
mix2_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
self.assertTrue(os.path.exists(mix2_filepath))
@ -126,8 +124,7 @@ class TestApmInputMixer(unittest.TestCase):
"""Checks that hard clipping warning is raised when occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
logging.warning.assert_called_once_with(
input_mixer.ApmInputMixer.HardClippingLogMessage())
@ -136,8 +133,7 @@ class TestApmInputMixer(unittest.TestCase):
"""Checks that hard clipping warning is not raised when not occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertNotIn(
mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()),

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Input signal creator module.
"""
@ -35,7 +34,8 @@ class InputSignalCreator(object):
if name == 'pure_tone':
params['frequency'] = float(raw_params[0])
params['duration'] = int(raw_params[1])
signal = cls._CreatePureTone(params['frequency'], params['duration'])
signal = cls._CreatePureTone(params['frequency'],
params['duration'])
else:
raise exceptions.InputSignalCreatorException(
'Invalid input signal creator name')
@ -62,6 +62,7 @@ class InputSignalCreator(object):
"""
assert 0 < frequency <= 24000
assert duration > 0
template = signal_processing.SignalProcessingUtils.GenerateSilence(duration)
template = signal_processing.SignalProcessingUtils.GenerateSilence(
duration)
return signal_processing.SignalProcessingUtils.GeneratePureTone(
template, frequency)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Signal processing utility module.
"""
@ -65,8 +64,9 @@ class SignalProcessingUtils(object):
if not os.path.exists(filepath):
logging.error('cannot find the <%s> audio track file', filepath)
raise exceptions.FileNotFoundError()
return pydub.AudioSegment.from_file(
filepath, format='wav', channels=channels)
return pydub.AudioSegment.from_file(filepath,
format='wav',
channels=channels)
@classmethod
def SaveWav(cls, output_filepath, signal):
@ -127,14 +127,11 @@ class SignalProcessingUtils(object):
if frequency > template.frame_rate >> 1:
raise exceptions.SignalProcessingException('Invalid frequency')
generator = pydub.generators.Sine(
sample_rate=template.frame_rate,
generator = pydub.generators.Sine(sample_rate=template.frame_rate,
bit_depth=template.sample_width * 8,
freq=frequency)
return generator.to_audio_segment(
duration=len(template),
volume=0.0)
return generator.to_audio_segment(duration=len(template), volume=0.0)
@classmethod
def GenerateWhiteNoise(cls, template):
@ -152,15 +149,14 @@ class SignalProcessingUtils(object):
generator = pydub.generators.WhiteNoise(
sample_rate=template.frame_rate,
bit_depth=template.sample_width * 8)
return generator.to_audio_segment(
duration=len(template),
volume=0.0)
return generator.to_audio_segment(duration=len(template), volume=0.0)
@classmethod
def AudioSegmentToRawData(cls, signal):
samples = signal.get_array_of_samples()
if samples.typecode != 'h':
raise exceptions.SignalProcessingException('Unsupported samples type')
raise exceptions.SignalProcessingException(
'Unsupported samples type')
return np.array(signal.get_array_of_samples(), np.int16)
@classmethod
@ -190,7 +186,8 @@ class SignalProcessingUtils(object):
True if hard clipping is detect, False otherwise.
"""
if signal.channels != 1:
raise NotImplementedError('multiple-channel clipping not implemented')
raise NotImplementedError(
'multiple-channel clipping not implemented')
if signal.sample_width != 2: # Note that signal.sample_width is in bytes.
raise exceptions.SignalProcessingException(
'hard-clipping detection only supported for 16 bit samples')
@ -229,12 +226,13 @@ class SignalProcessingUtils(object):
samples = signal.get_array_of_samples()
# Convolve.
logging.info('applying %d order impulse response to a signal lasting %d ms',
logging.info(
'applying %d order impulse response to a signal lasting %d ms',
len(impulse_response), len(signal))
convolved_samples = scipy.signal.fftconvolve(
in1=samples,
convolved_samples = scipy.signal.fftconvolve(in1=samples,
in2=impulse_response,
mode='full').astype(np.int16)
mode='full').astype(
np.int16)
logging.info('convolution computed')
# Cast.
@ -242,16 +240,19 @@ class SignalProcessingUtils(object):
# Verify.
logging.debug('signal length: %d samples', len(samples))
logging.debug('convolved signal length: %d samples', len(convolved_samples))
logging.debug('convolved signal length: %d samples',
len(convolved_samples))
assert len(convolved_samples) > len(samples)
# Generate convolved signal AudioSegment instance.
convolved_signal = pydub.AudioSegment(
data=convolved_samples,
convolved_signal = pydub.AudioSegment(data=convolved_samples,
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
'frame_width': signal.frame_width,
'sample_width':
signal.sample_width,
'frame_rate':
signal.frame_rate,
'frame_width':
signal.frame_width,
'channels': signal.channels,
})
assert len(convolved_signal) > len(signal)
@ -280,8 +281,7 @@ class SignalProcessingUtils(object):
Returns:
An AudioSegment instance.
"""
return pydub.AudioSegment(
data=signal.get_array_of_samples(),
return pydub.AudioSegment(data=signal.get_array_of_samples(),
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
@ -290,7 +290,10 @@ class SignalProcessingUtils(object):
})
@classmethod
def MixSignals(cls, signal, noise, target_snr=0.0,
def MixSignals(cls,
signal,
noise,
target_snr=0.0,
pad_noise=MixPadding.NO_PADDING):
"""Mixes |signal| and |noise| with a target SNR.

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the signal_processing module.
"""
@ -89,13 +88,15 @@ class TestSignalProcessing(unittest.TestCase):
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=shorter,
noise=longer,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.NO_PADDING)
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
NO_PADDING)
self.assertEqual(len(shorter), len(mix))
# With noise padding, length of signal less than that of noise.
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=shorter,
noise=longer,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING)
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
ZERO_PADDING)
self.assertEqual(len(shorter), len(mix))
# When the signal is longer than the noise, the mix length depends on
@ -104,13 +105,15 @@ class TestSignalProcessing(unittest.TestCase):
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=longer,
noise=shorter,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.NO_PADDING)
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
NO_PADDING)
self.assertEqual(len(shorter), len(mix))
# With noise padding, length of signal greater than that of noise.
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=longer,
noise=shorter,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING)
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
ZERO_PADDING)
self.assertEqual(len(longer), len(mix))
def testMixSignalNoisePaddingTypes(self):
@ -125,7 +128,8 @@ class TestSignalProcessing(unittest.TestCase):
signal=longer,
noise=shorter,
target_snr=-6,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING)
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.
ZERO_PADDING)
# Loop: expect pure tone plus noise in 1-2s.
mix_loop = signal_processing.SignalProcessingUtils.MixSignals(
@ -154,33 +158,26 @@ class TestSignalProcessing(unittest.TestCase):
def ToneAmplitudes(mix):
"""Returns the amplitude of the coefficients #16 and #192, which
correspond to the tones at 250 and 3k Hz respectively."""
mix_fft = np.absolute(signal_processing.SignalProcessingUtils.Fft(mix))
mix_fft = np.absolute(
signal_processing.SignalProcessingUtils.Fft(mix))
return mix_fft[16], mix_fft[192]
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=tone_low,
noise=tone_high,
target_snr=-6)
signal=tone_low, noise=tone_high, target_snr=-6)
ampl_low, ampl_high = ToneAmplitudes(mix)
self.assertLess(ampl_low, ampl_high)
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=tone_high,
noise=tone_low,
target_snr=-6)
signal=tone_high, noise=tone_low, target_snr=-6)
ampl_low, ampl_high = ToneAmplitudes(mix)
self.assertLess(ampl_high, ampl_low)
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=tone_low,
noise=tone_high,
target_snr=6)
signal=tone_low, noise=tone_high, target_snr=6)
ampl_low, ampl_high = ToneAmplitudes(mix)
self.assertLess(ampl_high, ampl_low)
mix = signal_processing.SignalProcessingUtils.MixSignals(
signal=tone_high,
noise=tone_low,
target_snr=6)
signal=tone_high, noise=tone_low, target_snr=6)
ampl_low, ampl_high = ToneAmplitudes(mix)
self.assertLess(ampl_low, ampl_high)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""APM module simulator.
"""
@ -40,8 +39,12 @@ class ApmModuleSimulator(object):
_PREFIX_TEST_DATA_GEN_PARAMS = 'datagen_params-'
_PREFIX_SCORE = 'score-'
def __init__(self, test_data_generator_factory, evaluation_score_factory,
ap_wrapper, evaluator, external_vads=None):
def __init__(self,
test_data_generator_factory,
evaluation_score_factory,
ap_wrapper,
evaluator,
external_vads=None):
if external_vads is None:
external_vads = {}
self._test_data_generator_factory = test_data_generator_factory
@ -49,11 +52,10 @@ class ApmModuleSimulator(object):
self._audioproc_wrapper = ap_wrapper
self._evaluator = evaluator
self._annotator = annotations.AudioAnnotationsExtractor(
annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD |
annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO |
annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM,
external_vads
)
annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD
| annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO
| annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM,
external_vads)
# Init.
self._test_data_generator_factory.SetOutputDirectoryPrefix(
@ -99,9 +101,14 @@ class ApmModuleSimulator(object):
def GetPrefixScore(cls):
return cls._PREFIX_SCORE
def Run(self, config_filepaths, capture_input_filepaths,
test_data_generator_names, eval_score_names, output_dir,
render_input_filepaths=None, echo_path_simulator_name=(
def Run(self,
config_filepaths,
capture_input_filepaths,
test_data_generator_names,
eval_score_names,
output_dir,
render_input_filepaths=None,
echo_path_simulator_name=(
echo_path_simulation.NoEchoPathSimulator.NAME)):
"""Runs the APM simulation.
@ -129,19 +136,23 @@ class ApmModuleSimulator(object):
self._base_output_path = os.path.abspath(output_dir)
# Output path used to cache the data shared across simulations.
self._output_cache_path = os.path.join(self._base_output_path, '_cache')
self._output_cache_path = os.path.join(self._base_output_path,
'_cache')
# Instance test data generators.
self._test_data_generators = [self._test_data_generator_factory.GetInstance(
self._test_data_generators = [
self._test_data_generator_factory.GetInstance(
test_data_generators_class=(
self._TEST_DATA_GENERATOR_CLASSES[name])) for name in (
test_data_generator_names)]
self._TEST_DATA_GENERATOR_CLASSES[name]))
for name in (test_data_generator_names)
]
# Instance evaluation score workers.
self._evaluation_score_workers = [
self._evaluation_score_factory.GetInstance(
evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name]) for (
name) in eval_score_names]
evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name])
for (name) in eval_score_names
]
# Set APM configuration file paths.
self._config_filepaths = self._CreatePathsCollection(config_filepaths)
@ -154,13 +165,13 @@ class ApmModuleSimulator(object):
self._render_input_filepaths = None
else:
# Set both capture and render input signals.
self._SetTestInputSignalFilePaths(
capture_input_filepaths, render_input_filepaths)
self._SetTestInputSignalFilePaths(capture_input_filepaths,
render_input_filepaths)
# Set the echo path simulator class.
self._echo_path_simulator_class = (
echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES[
echo_path_simulator_name])
echo_path_simulation.EchoPathSimulator.
REGISTERED_CLASSES[echo_path_simulator_name])
self._SimulateAll()
@ -201,13 +212,15 @@ class ApmModuleSimulator(object):
self._render_input_filepaths[capture_input_name])
render_input_name = '(none)' if without_render_input else (
self._ExtractFileName(render_input_filepath))
echo_path_simulator = (
echo_path_simulation_factory.EchoPathSimulatorFactory.GetInstance(
self._echo_path_simulator_class, render_input_filepath))
echo_path_simulator = (echo_path_simulation_factory.
EchoPathSimulatorFactory.GetInstance(
self._echo_path_simulator_class,
render_input_filepath))
# Try different test data generators.
for test_data_generators in self._test_data_generators:
logging.info('APM config preset: <%s>, capture: <%s>, render: <%s>,'
logging.info(
'APM config preset: <%s>, capture: <%s>, render: <%s>,'
'test data generator: <%s>, echo simulator: <%s>',
config_name, capture_input_name, render_input_name,
test_data_generators.NAME, echo_path_simulator.NAME)
@ -217,12 +230,13 @@ class ApmModuleSimulator(object):
capture_annotations_cache_path,
self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME)
data_access.MakeDirectory(test_data_cache_path)
logging.debug('test data cache path: <%s>', test_data_cache_path)
logging.debug('test data cache path: <%s>',
test_data_cache_path)
# Output path for the echo simulator and APM input mixer output.
echo_test_data_cache_path = os.path.join(
test_data_cache_path, 'echosim-{}'.format(
echo_path_simulator.NAME))
test_data_cache_path,
'echosim-{}'.format(echo_path_simulator.NAME))
data_access.MakeDirectory(echo_test_data_cache_path)
logging.debug('echo test data cache path: <%s>',
echo_test_data_cache_path)
@ -238,7 +252,8 @@ class ApmModuleSimulator(object):
data_access.MakeDirectory(output_path)
logging.debug('output path: <%s>', output_path)
self._Simulate(test_data_generators, capture_input_filepath,
self._Simulate(test_data_generators,
capture_input_filepath,
render_input_filepath, test_data_cache_path,
echo_test_data_cache_path, output_path,
config_filepath, echo_path_simulator)
@ -257,7 +272,8 @@ class ApmModuleSimulator(object):
Raises:
InputSignalCreatorException
"""
filename = os.path.splitext(os.path.split(input_signal_filepath)[-1])[0]
filename = os.path.splitext(
os.path.split(input_signal_filepath)[-1])[0]
filename_parts = filename.split('-')
if len(filename_parts) < 2:
@ -271,7 +287,9 @@ class ApmModuleSimulator(object):
input_signal_filepath, signal)
data_access.Metadata.SaveFileMetadata(input_signal_filepath, metadata)
def _ExtractCaptureAnnotations(self, input_filepath, output_path,
def _ExtractCaptureAnnotations(self,
input_filepath,
output_path,
annotation_name=""):
self._annotator.Extract(input_filepath)
self._annotator.Save(output_path, annotation_name)
@ -357,7 +375,8 @@ class ApmModuleSimulator(object):
self._evaluator.Run(
evaluation_score_workers=self._evaluation_score_workers,
apm_input_metadata=apm_input_metadata,
apm_output_filepath=self._audioproc_wrapper.output_filepath,
apm_output_filepath=self._audioproc_wrapper.
output_filepath,
reference_input_filepath=reference_signal_filepath,
render_input_filepath=render_input_filepath,
output_path=evaluation_output_path,
@ -371,7 +390,8 @@ class ApmModuleSimulator(object):
echo_filepath=echo_path_filepath,
render_filepath=render_input_filepath,
capture_filepath=apm_input_filepath,
apm_output_filepath=self._audioproc_wrapper.output_filepath,
apm_output_filepath=self._audioproc_wrapper.
output_filepath,
apm_reference_filepath=reference_signal_filepath,
apm_config_filepath=config_filepath,
)

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the simulation module.
"""
@ -39,7 +38,8 @@ class TestApmModuleSimulator(unittest.TestCase):
silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
fake_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
silence)
self._fake_audio_track_path = os.path.join(self._output_path, 'fake.wav')
self._fake_audio_track_path = os.path.join(self._output_path,
'fake.wav')
signal_processing.SignalProcessingUtils.SaveWav(
self._fake_audio_track_path, fake_signal)
@ -63,10 +63,9 @@ class TestApmModuleSimulator(unittest.TestCase):
noise_tracks_path='',
copy_with_identity=False))
evaluation_score_factory = eval_scores_factory.EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(
os.path.dirname(__file__), 'fake_polqa'),
echo_metric_tool_bin_path=None
)
polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
'fake_polqa'),
echo_metric_tool_bin_path=None)
# Instance simulator.
simulator = simulation.ApmModuleSimulator(
@ -74,9 +73,12 @@ class TestApmModuleSimulator(unittest.TestCase):
evaluation_score_factory=evaluation_score_factory,
ap_wrapper=ap_wrapper,
evaluator=evaluator,
external_vads={'fake': external_vad.ExternalVad(os.path.join(
os.path.dirname(__file__), 'fake_external_vad.py'), 'fake')}
)
external_vads={
'fake':
external_vad.ExternalVad(
os.path.join(os.path.dirname(__file__),
'fake_external_vad.py'), 'fake')
})
# What to simulate.
config_files = ['apm_configs/default.json']
@ -85,8 +87,7 @@ class TestApmModuleSimulator(unittest.TestCase):
eval_scores = ['audio_level_mean', 'polqa']
# Run all simulations.
simulator.Run(
config_filepaths=config_files,
simulator.Run(config_filepaths=config_files,
capture_input_filepaths=input_files,
test_data_generator_names=test_data_generators,
eval_score_names=eval_scores,
@ -114,12 +115,12 @@ class TestApmModuleSimulator(unittest.TestCase):
copy_with_identity=False)),
evaluation_score_factory=(
eval_scores_factory.EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(
os.path.dirname(__file__), 'fake_polqa'),
echo_metric_tool_bin_path=None
)),
polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
'fake_polqa'),
echo_metric_tool_bin_path=None)),
ap_wrapper=audioproc_wrapper.AudioProcWrapper(
audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH),
audioproc_wrapper.AudioProcWrapper.
DEFAULT_APM_SIMULATOR_BIN_PATH),
evaluator=evaluation.ApmModuleEvaluator())
# Inexistent input files to be silently created.
@ -127,18 +128,17 @@ class TestApmModuleSimulator(unittest.TestCase):
os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'),
os.path.join(self._tmp_path, 'pure_tone-1000_500.wav'),
]
self.assertFalse(any([os.path.exists(input_file) for input_file in (
input_files)]))
self.assertFalse(
any([os.path.exists(input_file) for input_file in (input_files)]))
# The input files are created during the simulation.
simulator.Run(
config_filepaths=['apm_configs/default.json'],
simulator.Run(config_filepaths=['apm_configs/default.json'],
capture_input_filepaths=input_files,
test_data_generator_names=['identity'],
eval_score_names=['audio_level_peak'],
output_dir=self._output_path)
self.assertTrue(all([os.path.exists(input_file) for input_file in (
input_files)]))
self.assertTrue(
all([os.path.exists(input_file) for input_file in (input_files)]))
def testPureToneGenerationWithTotalHarmonicDistorsion(self):
logging.warning = mock.MagicMock(name='warning')
@ -152,12 +152,12 @@ class TestApmModuleSimulator(unittest.TestCase):
copy_with_identity=False)),
evaluation_score_factory=(
eval_scores_factory.EvaluationScoreWorkerFactory(
polqa_tool_bin_path=os.path.join(
os.path.dirname(__file__), 'fake_polqa'),
echo_metric_tool_bin_path=None
)),
polqa_tool_bin_path=os.path.join(os.path.dirname(__file__),
'fake_polqa'),
echo_metric_tool_bin_path=None)),
ap_wrapper=audioproc_wrapper.AudioProcWrapper(
audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH),
audioproc_wrapper.AudioProcWrapper.
DEFAULT_APM_SIMULATOR_BIN_PATH),
evaluator=evaluation.ApmModuleEvaluator())
# What to simulate.
@ -166,8 +166,7 @@ class TestApmModuleSimulator(unittest.TestCase):
eval_scores = ['thd']
# Should work.
simulator.Run(
config_filepaths=config_files,
simulator.Run(config_filepaths=config_files,
capture_input_filepaths=input_files,
test_data_generator_names=['identity'],
eval_score_names=eval_scores,

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Test data generators producing signals pairs intended to be used to
test the APM module. Each pair consists of a noisy input and a reference signal.
The former is used as APM input and it is generated by adding noise to a
@ -96,8 +95,8 @@ class TestDataGenerator(object):
def reference_signal_filepaths(self):
return self._reference_signal_filepaths
def Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
"""Generates a set of noisy input and reference audiotrack file pairs.
This method initializes an empty set of pairs and calls the _Generate()
@ -110,8 +109,8 @@ class TestDataGenerator(object):
base_output_path: base path where output is written.
"""
self.Clear()
self._Generate(
input_signal_filepath, test_data_cache_path, base_output_path)
self._Generate(input_signal_filepath, test_data_cache_path,
base_output_path)
def Clear(self):
"""Clears the generated output path dictionaries.
@ -120,8 +119,8 @@ class TestDataGenerator(object):
self._apm_output_paths = {}
self._reference_signal_filepaths = {}
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
"""Abstract method to be implemented in each concrete class.
"""
raise NotImplementedError()
@ -143,8 +142,8 @@ class TestDataGenerator(object):
output_path = self._MakeDir(base_output_path, config_name)
self._AddNoiseReferenceFilesPair(
config_name=config_name,
noisy_signal_filepath=noisy_mix_filepaths[
noise_track_name][snr_noisy],
noisy_signal_filepath=noisy_mix_filepaths[noise_track_name]
[snr_noisy],
reference_signal_filepath=noisy_mix_filepaths[
noise_track_name][snr_refence],
output_path=output_path)
@ -191,16 +190,17 @@ class IdentityTestDataGenerator(TestDataGenerator):
def copy_with_identity(self):
return self._copy_with_identity
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
config_name = 'default'
output_path = self._MakeDir(base_output_path, config_name)
if self._copy_with_identity:
input_signal_filepath_new = os.path.join(
test_data_cache_path, os.path.split(input_signal_filepath)[1])
logging.info('copying ' + input_signal_filepath + ' to ' + (
input_signal_filepath_new))
test_data_cache_path,
os.path.split(input_signal_filepath)[1])
logging.info('copying ' + input_signal_filepath + ' to ' +
(input_signal_filepath_new))
shutil.copy(input_signal_filepath, input_signal_filepath_new)
input_signal_filepath = input_signal_filepath_new
@ -233,8 +233,8 @@ class WhiteNoiseTestDataGenerator(TestDataGenerator):
def __init__(self, output_directory_prefix):
TestDataGenerator.__init__(self, output_directory_prefix)
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
input_signal_filepath)
@ -245,7 +245,8 @@ class WhiteNoiseTestDataGenerator(TestDataGenerator):
# Create the noisy mixes (once for each unique SNR value).
noisy_mix_filepaths = {}
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
snr_values = set(
[snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
for snr in snr_values:
noisy_signal_filepath = os.path.join(
test_data_cache_path,
@ -286,8 +287,8 @@ class NarrowBandNoiseTestDataGenerator(TestDataGenerator):
def __init__(self, output_directory_prefix):
TestDataGenerator.__init__(self, output_directory_prefix)
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
# TODO(alessiob): implement.
pass
@ -303,8 +304,8 @@ class AdditiveNoiseTestDataGenerator(TestDataGenerator):
NAME = 'additive_noise'
_NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav'
DEFAULT_NOISE_TRACKS_PATH = os.path.join(
os.path.dirname(__file__), os.pardir, 'noise_tracks')
DEFAULT_NOISE_TRACKS_PATH = os.path.join(os.path.dirname(__file__),
os.pardir, 'noise_tracks')
# TODO(alessiob): Make the list of SNR pairs customizable.
# Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
@ -320,15 +321,17 @@ class AdditiveNoiseTestDataGenerator(TestDataGenerator):
def __init__(self, output_directory_prefix, noise_tracks_path):
TestDataGenerator.__init__(self, output_directory_prefix)
self._noise_tracks_path = noise_tracks_path
self._noise_tracks_file_names = [n for n in os.listdir(
self._noise_tracks_path) if n.lower().endswith('.wav')]
self._noise_tracks_file_names = [
n for n in os.listdir(self._noise_tracks_path)
if n.lower().endswith('.wav')
]
if len(self._noise_tracks_file_names) == 0:
raise exceptions.InitializationException(
'No wav files found in the noise tracks path %s' % (
self._noise_tracks_path))
'No wav files found in the noise tracks path %s' %
(self._noise_tracks_path))
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
"""Generates test data pairs using environmental noise.
For each noise track and pair of SNR values, the following two audio tracks
@ -337,7 +340,8 @@ class AdditiveNoiseTestDataGenerator(TestDataGenerator):
track enforcing the target SNR.
"""
# Init.
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
snr_values = set(
[snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
@ -347,10 +351,11 @@ class AdditiveNoiseTestDataGenerator(TestDataGenerator):
for noise_track_filename in self._noise_tracks_file_names:
# Load the noise track.
noise_track_name, _ = os.path.splitext(noise_track_filename)
noise_track_filepath = os.path.join(
self._noise_tracks_path, noise_track_filename)
noise_track_filepath = os.path.join(self._noise_tracks_path,
noise_track_filename)
if not os.path.exists(noise_track_filepath):
logging.error('cannot find the <%s> noise track', noise_track_filename)
logging.error('cannot find the <%s> noise track',
noise_track_filename)
raise exceptions.FileNotFoundError()
noise_signal = signal_processing.SignalProcessingUtils.LoadWav(
@ -361,25 +366,30 @@ class AdditiveNoiseTestDataGenerator(TestDataGenerator):
for snr in snr_values:
noisy_signal_filepath = os.path.join(
test_data_cache_path,
self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(noise_track_name, snr))
self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(
noise_track_name, snr))
# Create and save if not done.
if not os.path.exists(noisy_signal_filepath):
# Create noisy signal.
noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
input_signal, noise_signal, snr,
pad_noise=signal_processing.SignalProcessingUtils.MixPadding.LOOP)
input_signal,
noise_signal,
snr,
pad_noise=signal_processing.SignalProcessingUtils.
MixPadding.LOOP)
# Save.
signal_processing.SignalProcessingUtils.SaveWav(
noisy_signal_filepath, noisy_signal)
# Add file to the collection of mixes.
noisy_mix_filepaths[noise_track_name][snr] = noisy_signal_filepath
noisy_mix_filepaths[noise_track_name][
snr] = noisy_signal_filepath
# Add all the noise-SNR pairs.
self._AddNoiseSnrPairs(
base_output_path, noisy_mix_filepaths, self._SNR_VALUE_PAIRS)
self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths,
self._SNR_VALUE_PAIRS)
@TestDataGenerator.RegisterClass
@ -414,8 +424,8 @@ class ReverberationTestDataGenerator(TestDataGenerator):
TestDataGenerator.__init__(self, output_directory_prefix)
self._aechen_ir_database_path = aechen_ir_database_path
def _Generate(
self, input_signal_filepath, test_data_cache_path, base_output_path):
def _Generate(self, input_signal_filepath, test_data_cache_path,
base_output_path):
"""Generates test data pairs using reverberation noise.
For each impulse response, one noise track is created. For each impulse
@ -425,7 +435,8 @@ class ReverberationTestDataGenerator(TestDataGenerator):
track enforcing the target SNR.
"""
# Init.
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
snr_values = set(
[snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
@ -435,8 +446,8 @@ class ReverberationTestDataGenerator(TestDataGenerator):
for impulse_response_name in self._IMPULSE_RESPONSES:
noise_track_filename = self._NOISE_TRACK_FILENAME_TEMPLATE.format(
impulse_response_name)
noise_track_filepath = os.path.join(
test_data_cache_path, noise_track_filename)
noise_track_filepath = os.path.join(test_data_cache_path,
noise_track_filename)
noise_signal = None
try:
# Load noise track.
@ -448,7 +459,8 @@ class ReverberationTestDataGenerator(TestDataGenerator):
self._aechen_ir_database_path,
self._IMPULSE_RESPONSES[impulse_response_name])
noise_signal = self._GenerateNoiseTrack(
noise_track_filepath, input_signal, impulse_response_filepath)
noise_track_filepath, input_signal,
impulse_response_filepath)
assert noise_signal is not None
# Create the noisy mixes (once for each unique SNR value).
@ -470,7 +482,8 @@ class ReverberationTestDataGenerator(TestDataGenerator):
noisy_signal_filepath, noisy_signal)
# Add file to the collection of mixes.
noisy_mix_filepaths[impulse_response_name][snr] = noisy_signal_filepath
noisy_mix_filepaths[impulse_response_name][
snr] = noisy_signal_filepath
# Add all the noise-SNR pairs.
self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths,
@ -496,8 +509,10 @@ class ReverberationTestDataGenerator(TestDataGenerator):
impulse_response = data['h_air'].flatten()
if self._MAX_IMPULSE_RESPONSE_LENGTH is not None:
logging.info('truncating impulse response from %d to %d samples',
len(impulse_response), self._MAX_IMPULSE_RESPONSE_LENGTH)
impulse_response = impulse_response[:self._MAX_IMPULSE_RESPONSE_LENGTH]
len(impulse_response),
self._MAX_IMPULSE_RESPONSE_LENGTH)
impulse_response = impulse_response[:self.
_MAX_IMPULSE_RESPONSE_LENGTH]
# Apply impulse response.
processed_signal = (

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""TestDataGenerator factory class.
"""
@ -52,7 +51,8 @@ class TestDataGeneratorFactory(object):
"""
if self._output_directory_prefix is None:
raise exceptions.InitializationException(
'The output directory prefix for test data generators is not set')
'The output directory prefix for test data generators is not set'
)
logging.debug('factory producing %s', test_data_generators_class)
if test_data_generators_class == (

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the test_data_generation module.
"""
@ -43,8 +42,9 @@ class TestTestDataGenerators(unittest.TestCase):
]
for impulse_response_mat_file_name in impulse_response_mat_file_names:
data = {'h_air': np.random.rand(1, 1000).astype('<f8')}
scipy.io.savemat(os.path.join(
self._fake_air_db_path, impulse_response_mat_file_name), data)
scipy.io.savemat(
os.path.join(self._fake_air_db_path,
impulse_response_mat_file_name), data)
def tearDown(self):
"""Recursively delete temporary folders."""
@ -73,8 +73,8 @@ class TestTestDataGenerators(unittest.TestCase):
generators_factory.SetOutputDirectoryPrefix('datagen-')
# Use a simple input file as clean input signal.
input_signal_filepath = os.path.join(
os.getcwd(), 'probing_signals', 'tone-880.wav')
input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',
'tone-880.wav')
self.assertTrue(os.path.exists(input_signal_filepath))
# Load input signal.
@ -88,8 +88,7 @@ class TestTestDataGenerators(unittest.TestCase):
registered_classes[generator_name])
# Generate the noisy input - reference pairs.
generator.Generate(
input_signal_filepath=input_signal_filepath,
generator.Generate(input_signal_filepath=input_signal_filepath,
test_data_cache_path=self._test_data_cache_path,
base_output_path=self._base_output_path)
@ -104,14 +103,15 @@ class TestTestDataGenerators(unittest.TestCase):
self.assertTrue(os.path.exists(self._test_data_cache_path))
# Use a simple input file as clean input signal.
input_signal_filepath = os.path.join(
os.getcwd(), 'probing_signals', 'tone-880.wav')
input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',
'tone-880.wav')
self.assertTrue(os.path.exists(input_signal_filepath))
def GetNoiseReferenceFilePaths(identity_generator):
noisy_signal_filepaths = identity_generator.noisy_signal_filepaths
reference_signal_filepaths = identity_generator.reference_signal_filepaths
assert noisy_signal_filepaths.keys() == reference_signal_filepaths.keys()
assert noisy_signal_filepaths.keys(
) == reference_signal_filepaths.keys()
assert len(noisy_signal_filepaths.keys()) == 1
key = noisy_signal_filepaths.keys()[0]
return noisy_signal_filepaths[key], reference_signal_filepaths[key]
@ -120,7 +120,8 @@ class TestTestDataGenerators(unittest.TestCase):
for copy_with_identity in [False, True]:
# Instance the generator through the factory.
factory = test_data_generation_factory.TestDataGeneratorFactory(
aechen_ir_database_path='', noise_tracks_path='',
aechen_ir_database_path='',
noise_tracks_path='',
copy_with_identity=copy_with_identity)
factory.SetOutputDirectoryPrefix('datagen-')
generator = factory.GetInstance(
@ -130,8 +131,7 @@ class TestTestDataGenerators(unittest.TestCase):
# Generate test data and extract the paths to the noise and the reference
# files.
generator.Generate(
input_signal_filepath=input_signal_filepath,
generator.Generate(input_signal_filepath=input_signal_filepath,
test_data_cache_path=self._test_data_cache_path,
base_output_path=self._base_output_path)
noisy_signal_filepath, reference_signal_filepath = (
@ -139,24 +139,25 @@ class TestTestDataGenerators(unittest.TestCase):
# Check that a copy is made if and only if |copy_with_identity| is True.
if copy_with_identity:
self.assertNotEqual(noisy_signal_filepath, input_signal_filepath)
self.assertNotEqual(reference_signal_filepath, input_signal_filepath)
self.assertNotEqual(noisy_signal_filepath,
input_signal_filepath)
self.assertNotEqual(reference_signal_filepath,
input_signal_filepath)
else:
self.assertEqual(noisy_signal_filepath, input_signal_filepath)
self.assertEqual(reference_signal_filepath, input_signal_filepath)
self.assertEqual(reference_signal_filepath,
input_signal_filepath)
def _CheckGeneratedPairsListSizes(self, generator):
config_names = generator.config_names
number_of_pairs = len(config_names)
self.assertEqual(number_of_pairs,
len(generator.noisy_signal_filepaths))
self.assertEqual(number_of_pairs,
len(generator.apm_output_paths))
self.assertEqual(number_of_pairs, len(generator.apm_output_paths))
self.assertEqual(number_of_pairs,
len(generator.reference_signal_filepaths))
def _CheckGeneratedPairsSignalDurations(
self, generator, input_signal):
def _CheckGeneratedPairsSignalDurations(self, generator, input_signal):
"""Checks duration of the generated signals.
Checks that the noisy input and the reference tracks are audio files
@ -178,8 +179,8 @@ class TestTestDataGenerators(unittest.TestCase):
noisy_signal_filepath)
# Check noisy input signal length.
noisy_signal_length = (
signal_processing.SignalProcessingUtils.CountSamples(noisy_signal))
noisy_signal_length = (signal_processing.SignalProcessingUtils.
CountSamples(noisy_signal))
self.assertGreaterEqual(noisy_signal_length, input_signal_length)
# Load the reference file.
@ -189,10 +190,10 @@ class TestTestDataGenerators(unittest.TestCase):
reference_signal_filepath)
# Check noisy input signal length.
reference_signal_length = (
signal_processing.SignalProcessingUtils.CountSamples(
reference_signal))
self.assertGreaterEqual(reference_signal_length, input_signal_length)
reference_signal_length = (signal_processing.SignalProcessingUtils.
CountSamples(reference_signal))
self.assertGreaterEqual(reference_signal_length,
input_signal_length)
def _CheckGeneratedPairsOutputPaths(self, generator):
"""Checks that the output path created by the generator exists.

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots statistics from WebRTC integration test logs.
Usage: $ python plot_webrtc_test_logs.py filename.txt
@ -280,7 +279,12 @@ def Plot(y_metric, x_metric, metrics):
label = y_metric + ' - ' + str(key)
plt.plot(x, y, label=label, linewidth=1.5, marker='o', markersize=5,
plt.plot(x,
y,
label=label,
linewidth=1.5,
marker='o',
markersize=5,
markeredgewidth=0.0)
@ -315,7 +319,11 @@ def PlotFigure(settings, y_metrics, x_metric, metrics, title):
pos += 1
plt.xlabel(x_metric, fontsize='large')
plt.subplots_adjust(left=0.06, right=0.98, bottom=0.05, top=0.94, hspace=0.08)
plt.subplots_adjust(left=0.06,
right=0.98,
bottom=0.05,
top=0.94,
hspace=0.08)
def GetTitle(filename, setting):
@ -335,14 +343,14 @@ def GetTitle(filename, setting):
for i in range(0, len(framerate)):
title += framerate[i].split('.')[0] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != ENCODER_IMPLEMENTATION_NAME[1]):
if (setting != CODEC_IMPLEMENTATION_NAME[1]
and setting != ENCODER_IMPLEMENTATION_NAME[1]):
enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(enc_names)):
title += enc_names[i] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != DECODER_IMPLEMENTATION_NAME[1]):
if (setting != CODEC_IMPLEMENTATION_NAME[1]
and setting != DECODER_IMPLEMENTATION_NAME[1]):
dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(dec_names)):
title += dec_names[i] + ', '
@ -393,8 +401,8 @@ def main():
resolutions = ParseSetting(filename, WIDTH[1])
idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
if X_SETTINGS[idx] == BITRATE:
idx = GetIdx("Plot per:\n%s" % ToStringWithoutMetric(SUBPLOT_SETTINGS,
BITRATE))
idx = GetIdx("Plot per:\n%s" %
ToStringWithoutMetric(SUBPLOT_SETTINGS, BITRATE))
idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs bitrate (per setting).

View File

@ -72,7 +72,6 @@ class CheckBugEntryFieldTest(unittest.TestCase):
class CheckNewlineAtTheEndOfProtoFilesTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.proto_file_path = os.path.join(self.tmp_dir, 'foo.proto')
@ -85,25 +84,24 @@ class CheckNewlineAtTheEndOfProtoFilesTest(unittest.TestCase):
def testErrorIfProtoFileDoesNotEndWithNewline(self):
self._GenerateProtoWithoutNewlineAtTheEnd()
self.input_api.files = [MockFile(self.proto_file_path)]
errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(self.input_api,
self.output_api,
lambda x: True)
errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(
self.input_api, self.output_api, lambda x: True)
self.assertEqual(1, len(errors))
self.assertEqual(
'File %s must end with exactly one newline.' % self.proto_file_path,
str(errors[0]))
'File %s must end with exactly one newline.' %
self.proto_file_path, str(errors[0]))
def testNoErrorIfProtoFileEndsWithNewline(self):
self._GenerateProtoWithNewlineAtTheEnd()
self.input_api.files = [MockFile(self.proto_file_path)]
errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(self.input_api,
self.output_api,
lambda x: True)
errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(
self.input_api, self.output_api, lambda x: True)
self.assertEqual(0, len(errors))
def _GenerateProtoWithNewlineAtTheEnd(self):
with open(self.proto_file_path, 'w') as f:
f.write(textwrap.dedent("""
f.write(
textwrap.dedent("""
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;
@ -111,14 +109,14 @@ class CheckNewlineAtTheEndOfProtoFilesTest(unittest.TestCase):
def _GenerateProtoWithoutNewlineAtTheEnd(self):
with open(self.proto_file_path, 'w') as f:
f.write(textwrap.dedent("""
f.write(
textwrap.dedent("""
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;"""))
class CheckNoMixingSourcesTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.file_path = os.path.join(self.tmp_dir, 'BUILD.gn')
@ -159,7 +157,8 @@ class CheckNoMixingSourcesTest(unittest.TestCase):
self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.mm', 'bar.h'])
def testNoErrorIfSourcesAreInExclusiveIfBranches(self):
self._GenerateBuildFile(textwrap.dedent("""
self._GenerateBuildFile(
textwrap.dedent("""
rtc_library("bar_foo") {
if (is_win) {
sources = [
@ -192,7 +191,8 @@ class CheckNoMixingSourcesTest(unittest.TestCase):
self.assertEqual(0, len(errors))
def testErrorIfSourcesAreNotInExclusiveIfBranches(self):
self._GenerateBuildFile(textwrap.dedent("""
self._GenerateBuildFile(
textwrap.dedent("""
rtc_library("bar_foo") {
if (is_win) {
sources = [
@ -237,8 +237,10 @@ class CheckNoMixingSourcesTest(unittest.TestCase):
self.assertTrue('bar.c' in str(errors[0]))
def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources):
assert len(sources) == 3, 'This function accepts a list of 3 source files'
self._GenerateBuildFile(textwrap.dedent("""
assert len(
sources) == 3, 'This function accepts a list of 3 source files'
self._GenerateBuildFile(
textwrap.dedent("""
rtc_static_library("bar_foo") {
sources = [
"%s",

View File

@ -33,7 +33,9 @@ class MockInputApi(object):
return self.files
@classmethod
def FilterSourceFile(cls, affected_file, files_to_check=(),
def FilterSourceFile(cls,
affected_file,
files_to_check=(),
files_to_skip=()):
# pylint: disable=unused-argument
return True
@ -69,7 +71,8 @@ class MockOutputApi(object):
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
MockOutputApi.PresubmitResult.__init__(self, message, items,
long_text)
self.type = 'error'
@ -102,13 +105,17 @@ class MockFile(object):
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents=None, old_contents=None,
def __init__(self,
local_path,
new_contents=None,
old_contents=None,
action='A'):
if new_contents is None:
new_contents = ["Data"]
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
self._changed_contents = [(i + 1, l)
for i, l in enumerate(new_contents)]
self._action = action
self._old_contents = old_contents

View File

@ -18,7 +18,6 @@ import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Chrome browsertests will throw away stderr; avoid that output gets lost.
@ -30,36 +29,52 @@ def _ParseArgs():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--label', type='string', default='MY_TEST',
parser.add_option('--label',
type='string',
default='MY_TEST',
help=('Label of the test, used to identify different '
'tests. Default: %default'))
parser.add_option('--ref_video', type='string',
parser.add_option('--ref_video',
type='string',
help='Reference video to compare with (YUV).')
parser.add_option('--test_video', type='string',
parser.add_option('--test_video',
type='string',
help=('Test video to be compared with the reference '
'video (YUV).'))
parser.add_option('--frame_analyzer', type='string',
parser.add_option('--frame_analyzer',
type='string',
help='Path to the frame analyzer executable.')
parser.add_option('--aligned_output_file', type='string',
parser.add_option('--aligned_output_file',
type='string',
help='Path for output aligned YUV or Y4M file.')
parser.add_option('--vmaf', type='string',
help='Path to VMAF executable.')
parser.add_option('--vmaf_model', type='string',
parser.add_option('--vmaf', type='string', help='Path to VMAF executable.')
parser.add_option('--vmaf_model',
type='string',
help='Path to VMAF model.')
parser.add_option('--vmaf_phone_model', action='store_true',
parser.add_option('--vmaf_phone_model',
action='store_true',
help='Whether to use phone model in VMAF.')
parser.add_option('--yuv_frame_width', type='int', default=640,
parser.add_option(
'--yuv_frame_width',
type='int',
default=640,
help='Width of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_frame_height', type='int', default=480,
parser.add_option(
'--yuv_frame_height',
type='int',
default=480,
help='Height of the YUV file\'s frames. Default: %default')
parser.add_option('--chartjson_result_file', type='str', default=None,
parser.add_option('--chartjson_result_file',
type='str',
default=None,
help='Where to store perf results in chartjson format.')
options, _ = parser.parse_args()
if not options.ref_video:
parser.error('You must provide a path to the reference video!')
if not os.path.exists(options.ref_video):
parser.error('Cannot find the reference video at %s' % options.ref_video)
parser.error('Cannot find the reference video at %s' %
options.ref_video)
if not options.test_video:
parser.error('You must provide a path to the test video!')
@ -67,7 +82,8 @@ def _ParseArgs():
parser.error('Cannot find the test video at %s' % options.test_video)
if not options.frame_analyzer:
parser.error('You must provide the path to the frame analyzer executable!')
parser.error(
'You must provide the path to the frame analyzer executable!')
if not os.path.exists(options.frame_analyzer):
parser.error('Cannot find frame analyzer executable at %s!' %
options.frame_analyzer)
@ -77,6 +93,7 @@ def _ParseArgs():
return options
def _DevNull():
"""On Windows, sometimes the inherited stdin handle from the parent process
fails. Workaround this by passing null to stdin to the subprocesses commands.
@ -96,13 +113,16 @@ def _RunFrameAnalyzer(options, yuv_directory=None):
'--height=%d' % options.yuv_frame_height,
]
if options.chartjson_result_file:
cmd.append('--chartjson_result_file=%s' % options.chartjson_result_file)
cmd.append('--chartjson_result_file=%s' %
options.chartjson_result_file)
if options.aligned_output_file:
cmd.append('--aligned_output_file=%s' % options.aligned_output_file)
if yuv_directory:
cmd.append('--yuv_directory=%s' % yuv_directory)
frame_analyzer = subprocess.Popen(cmd, stdin=_DevNull(),
stdout=sys.stdout, stderr=sys.stderr)
frame_analyzer = subprocess.Popen(cmd,
stdin=_DevNull(),
stdout=sys.stdout,
stderr=sys.stderr)
frame_analyzer.wait()
if frame_analyzer.returncode != 0:
print('Failed to run frame analyzer.')
@ -131,8 +151,10 @@ def _RunVmaf(options, yuv_directory, logfile):
if options.vmaf_phone_model:
cmd.append('--phone-model')
vmaf = subprocess.Popen(cmd, stdin=_DevNull(),
stdout=sys.stdout, stderr=sys.stderr)
vmaf = subprocess.Popen(cmd,
stdin=_DevNull(),
stdout=sys.stdout,
stderr=sys.stderr)
vmaf.wait()
if vmaf.returncode != 0:
print('Failed to run VMAF.')
@ -183,5 +205,6 @@ def main():
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -41,8 +41,13 @@ MICROSECONDS_IN_SECOND = 1e6
def main():
parser = argparse.ArgumentParser(
description='Plots metrics exported from WebRTC perf tests')
parser.add_argument('-m', '--metrics', type=str, nargs='*',
help='Metrics to plot. If nothing specified then will plot all available')
parser.add_argument(
'-m',
'--metrics',
type=str,
nargs='*',
help=
'Metrics to plot. If nothing specified then will plot all available')
args = parser.parse_args()
metrics_to_plot = set()
@ -60,7 +65,8 @@ def main():
print line
for metric in metrics:
if len(metrics_to_plot) > 0 and metric[GRAPH_NAME] not in metrics_to_plot:
if len(metrics_to_plot
) > 0 and metric[GRAPH_NAME] not in metrics_to_plot:
continue
figure = plt.figure()
@ -75,7 +81,8 @@ def main():
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
x_values.append(
(sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))

View File

@ -10,15 +10,14 @@
import network_tester_config_pb2
def AddConfig(all_configs,
packet_send_interval_ms,
packet_size,
def AddConfig(all_configs, packet_send_interval_ms, packet_size,
execution_time_ms):
config = all_configs.configs.add()
config.packet_send_interval_ms = packet_send_interval_ms
config.packet_size = packet_size
config.execution_time_ms = execution_time_ms
def main():
all_configs = network_tester_config_pb2.NetworkTesterAllConfigs()
AddConfig(all_configs, 10, 50, 200)
@ -26,5 +25,6 @@ def main():
with open("network_tester_config.dat", 'wb') as f:
f.write(all_configs.SerializeToString())
if __name__ == "__main__":
main()

View File

@ -20,6 +20,7 @@ import matplotlib.pyplot as plt
import network_tester_packet_pb2
def GetSize(file_to_parse):
data = file_to_parse.read(1)
if data == '':
@ -50,17 +51,16 @@ def GetTimeAxis(packets):
def CreateSendTimeDiffPlot(packets, plot):
first_send_time_diff = (
packets[0].arrival_timestamp - packets[0].send_timestamp)
y = [(packet.arrival_timestamp - packet.send_timestamp) - first_send_time_diff
for packet in packets]
first_send_time_diff = (packets[0].arrival_timestamp -
packets[0].send_timestamp)
y = [(packet.arrival_timestamp - packet.send_timestamp) -
first_send_time_diff for packet in packets]
plot.grid(True)
plot.set_title("SendTime difference [us]")
plot.plot(GetTimeAxis(packets), y)
class MovingAverageBitrate(object):
def __init__(self):
self.packet_window = []
self.window_time = 1000000
@ -104,11 +104,13 @@ def CreatePacketlossPlot(packets, plot):
first_arrival_time = 0
last_arrival_time = 0
last_arrival_time_diff = 0
for sequence_number in range(first_sequence_number, last_sequence_number + 1):
for sequence_number in range(first_sequence_number,
last_sequence_number + 1):
if sequence_number in packets_look_up:
y.append(0)
if first_arrival_time == 0:
first_arrival_time = packets_look_up[sequence_number].arrival_timestamp
first_arrival_time = packets_look_up[
sequence_number].arrival_timestamp
x_time = (packets_look_up[sequence_number].arrival_timestamp -
first_arrival_time)
if last_arrival_time != 0:
@ -117,7 +119,8 @@ def CreatePacketlossPlot(packets, plot):
x.append(x_time / 1000000.0)
else:
if last_arrival_time != 0 and last_arrival_time_diff != 0:
x.append((last_arrival_time + last_arrival_time_diff) / 1000000.0)
x.append(
(last_arrival_time + last_arrival_time_diff) / 1000000.0)
y.append(1)
plot.grid(True)
plot.set_title("Lost packets [0/1]")

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Utility functions for calculating statistics.
"""
@ -20,9 +19,8 @@ def CountReordered(sequence_numbers):
A reordered index is an index `i` for which sequence_numbers[i] >=
sequence_numbers[i + 1]
"""
return sum(1 for (s1, s2) in zip(sequence_numbers,
sequence_numbers[1:]) if
s1 >= s2)
return sum(1 for (s1, s2) in zip(sequence_numbers, sequence_numbers[1:])
if s1 >= s2)
def SsrcNormalizedSizeTable(data_points):
@ -68,8 +66,8 @@ def Unwrap(data, mod):
"""
lst = data[:]
for i in range(1, len(data)):
lst[i] = lst[i - 1] + (lst[i] - lst[i - 1] +
mod // 2) % mod - (mod // 2)
lst[i] = lst[i - 1] + (lst[i] - lst[i - 1] + mod // 2) % mod - (mod //
2)
return lst

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Run the tests with
python misc_test.py
@ -68,5 +67,6 @@ class TestMisc(unittest.TestCase):
self.assertEqual(random_data, random_data_copy)
if __name__ == "__main__":
unittest.main()

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Parses protobuf RTC dumps."""
from __future__ import division
@ -16,8 +15,8 @@ import pyproto.logging.rtc_event_log.rtc_event_log_pb2 as rtc_pb
class DataPoint(object):
"""Simple container class for RTP events."""
def __init__(self, rtp_header_str, packet_size,
arrival_timestamp_us, incoming):
def __init__(self, rtp_header_str, packet_size, arrival_timestamp_us,
incoming):
"""Builds a data point by parsing an RTP header, size and arrival time.
RTP header structure is defined in RFC 3550 section 5.1.
@ -45,8 +44,8 @@ def ParseProtobuf(file_path):
with open(file_path, "rb") as f:
event_stream.ParseFromString(f.read())
return [DataPoint(event.rtp_packet.header,
event.rtp_packet.packet_length,
return [
DataPoint(event.rtp_packet.header, event.rtp_packet.packet_length,
event.timestamp_us, event.rtp_packet.incoming)
for event in event_stream.stream
if event.HasField("rtp_packet")]
for event in event_stream.stream if event.HasField("rtp_packet")
]

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Displays statistics and plots graphs from RTC protobuf dump."""
from __future__ import division
@ -54,8 +53,9 @@ class RTPStatistics(object):
for point in self.data_points:
print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format(
point.sequence_number, point.timestamp,
int(point.arrival_timestamp_ms), point.size, point.payload_type,
point.marker_bit, "0x{:x}".format(point.ssrc)))
int(point.arrival_timestamp_ms), point.size,
point.payload_type, point.marker_bit,
"0x{:x}".format(point.ssrc)))
def PrintSsrcInfo(self, ssrc_id, ssrc):
"""Prints packet and size statistics for a given SSRC.
@ -64,24 +64,27 @@ class RTPStatistics(object):
ssrc_id: textual identifier of SSRC printed beside statistics for it.
ssrc: SSRC by which to filter data and display statistics
"""
filtered_ssrc = [point for point in self.data_points if point.ssrc
== ssrc]
filtered_ssrc = [
point for point in self.data_points if point.ssrc == ssrc
]
payloads = misc.NormalizeCounter(
collections.Counter([point.payload_type for point in
filtered_ssrc]))
collections.Counter(
[point.payload_type for point in filtered_ssrc]))
payload_info = "payload type(s): {}".format(
", ".join(str(payload) for payload in payloads))
payload_info = "payload type(s): {}".format(", ".join(
str(payload) for payload in payloads))
print("{} 0x{:x} {}, {:.2f}% packets, {:.2f}% data".format(
ssrc_id, ssrc, payload_info, self.ssrc_frequencies[ssrc] * 100,
self.ssrc_size_table[ssrc] * 100))
print(" packet sizes:")
(bin_counts, bin_bounds) = numpy.histogram([point.size for point in
filtered_ssrc], bins=5,
(bin_counts,
bin_bounds) = numpy.histogram([point.size for point in filtered_ssrc],
bins=5,
density=False)
bin_proportions = bin_counts / sum(bin_counts)
print("\n".join([
" {:.1f} - {:.1f}: {:.2f}%".format(bin_bounds[i], bin_bounds[i + 1],
" {:.1f} - {:.1f}: {:.2f}%".format(bin_bounds[i],
bin_bounds[i + 1],
bin_proportions[i] * 100)
for i in range(len(bin_proportions))
]))
@ -95,8 +98,12 @@ class RTPStatistics(object):
return chosen_ssrc
ssrc_is_incoming = misc.SsrcDirections(self.data_points)
incoming = [ssrc for ssrc in ssrc_is_incoming if ssrc_is_incoming[ssrc]]
outgoing = [ssrc for ssrc in ssrc_is_incoming if not ssrc_is_incoming[ssrc]]
incoming = [
ssrc for ssrc in ssrc_is_incoming if ssrc_is_incoming[ssrc]
]
outgoing = [
ssrc for ssrc in ssrc_is_incoming if not ssrc_is_incoming[ssrc]
]
print("\nIncoming:\n")
for (i, ssrc) in enumerate(incoming):
@ -119,36 +126,34 @@ class RTPStatistics(object):
Removes data points with `ssrc != chosen_ssrc`. Unwraps sequence
numbers and timestamps for the chosen selection.
"""
self.data_points = [point for point in self.data_points if
point.ssrc == chosen_ssrc]
self.data_points = [
point for point in self.data_points if point.ssrc == chosen_ssrc
]
unwrapped_sequence_numbers = misc.Unwrap(
[point.sequence_number for point in self.data_points], 2**16 - 1)
for (data_point, sequence_number) in zip(self.data_points,
unwrapped_sequence_numbers):
data_point.sequence_number = sequence_number
unwrapped_timestamps = misc.Unwrap([point.timestamp for point in
self.data_points], 2**32 - 1)
unwrapped_timestamps = misc.Unwrap(
[point.timestamp for point in self.data_points], 2**32 - 1)
for (data_point, timestamp) in zip(self.data_points,
unwrapped_timestamps):
data_point.timestamp = timestamp
def PrintSequenceNumberStatistics(self):
seq_no_set = set(point.sequence_number for point in
self.data_points)
seq_no_set = set(point.sequence_number for point in self.data_points)
missing_sequence_numbers = max(seq_no_set) - min(seq_no_set) + (
1 - len(seq_no_set))
print("Missing sequence numbers: {} out of {} ({:.2f}%)".format(
missing_sequence_numbers,
len(seq_no_set),
100 * missing_sequence_numbers / len(seq_no_set)
))
print("Duplicated packets: {}".format(len(self.data_points) -
len(seq_no_set)))
missing_sequence_numbers, len(seq_no_set),
100 * missing_sequence_numbers / len(seq_no_set)))
print("Duplicated packets: {}".format(
len(self.data_points) - len(seq_no_set)))
print("Reordered packets: {}".format(
misc.CountReordered([point.sequence_number for point in
self.data_points])))
misc.CountReordered(
[point.sequence_number for point in self.data_points])))
def EstimateFrequency(self, always_query_sample_rate):
"""Estimates frequency and updates data.
@ -160,7 +165,8 @@ class RTPStatistics(object):
"""
delta_timestamp = (self.data_points[-1].timestamp -
self.data_points[0].timestamp)
delta_arr_timestamp = float((self.data_points[-1].arrival_timestamp_ms -
delta_arr_timestamp = float(
(self.data_points[-1].arrival_timestamp_ms -
self.data_points[0].arrival_timestamp_ms))
freq_est = delta_timestamp / delta_arr_timestamp
@ -193,20 +199,18 @@ class RTPStatistics(object):
stream_duration_sender = self.data_points[-1].real_send_time_ms / 1000
print("Stream duration at sender: {:.1f} seconds".format(
stream_duration_sender
))
stream_duration_sender))
arrival_timestamps_ms = [point.arrival_timestamp_ms for point in
self.data_points]
arrival_timestamps_ms = [
point.arrival_timestamp_ms for point in self.data_points
]
stream_duration_receiver = (max(arrival_timestamps_ms) -
min(arrival_timestamps_ms)) / 1000
print("Stream duration at receiver: {:.1f} seconds".format(
stream_duration_receiver
))
stream_duration_receiver))
print("Clock drift: {:.2f}%".format(
100 * (stream_duration_receiver / stream_duration_sender - 1)
))
100 * (stream_duration_receiver / stream_duration_sender - 1)))
total_size = sum(point.size for point in self.data_points) * 8 / 1000
print("Send average bitrate: {:.2f} kbps".format(
@ -238,13 +242,15 @@ class RTPStatistics(object):
[point.real_send_time_ms for point in self.data_points],
bins=numpy.arange(start_ms, stop_ms,
RTPStatistics.PLOT_RESOLUTION_MS),
weights=[point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS
for point in self.data_points]
)
correlate_filter = (numpy.ones(
RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) /
weights=[
point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS
for point in self.data_points
])
correlate_filter = (
numpy.ones(RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) /
RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE)
self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps, correlate_filter)
self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps,
correlate_filter)
def PlotStatistics(self):
"""Plots changes in delay and average bandwidth."""
@ -283,25 +289,30 @@ def CalculateDelay(start, stop, step, points):
grouped_delays = [[] for _ in numpy.arange(start, stop + step, step)]
rounded_value_index = lambda x: int((x - start) / step)
for point in points:
grouped_delays[rounded_value_index(point.real_send_time_ms)
].append(point.absdelay)
regularized_delays = [numpy.average(arr) if arr else -1 for arr in
grouped_delays]
grouped_delays[rounded_value_index(point.real_send_time_ms)].append(
point.absdelay)
regularized_delays = [
numpy.average(arr) if arr else -1 for arr in grouped_delays
]
return numpy.ma.masked_values(regularized_delays, -1)
def main():
usage = "Usage: %prog [options] <filename of rtc event log>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("--dump_header_to_stdout",
default=False, action="store_true",
parser.add_option(
"--dump_header_to_stdout",
default=False,
action="store_true",
help="print header info to stdout; similar to rtp_analyze")
parser.add_option("--query_sample_rate",
default=False, action="store_true",
default=False,
action="store_true",
help="always query user for real sample rate")
parser.add_option("--working_directory",
default=None, action="store",
default=None,
action="store",
help="directory in which to search for relative paths")
(options, args) = parser.parse_args()
@ -336,5 +347,6 @@ def main():
rtp_stats.ComputeBandwidth()
rtp_stats.PlotStatistics()
if __name__ == "__main__":
main()

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Run the tests with
python rtp_analyzer_test.py

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Builds the AppRTC collider using the golang toolchain.
The golang toolchain is downloaded by download_apprtc.py. We use that here
@ -24,7 +23,6 @@ import sys
import utils
USAGE_STR = "Usage: {} <apprtc_dir> <go_dir> <output_dir>"
@ -57,10 +55,11 @@ def main(argv):
golang_env = os.environ.copy()
golang_env['GOROOT'] = go_root_dir
golang_env['GOPATH'] = golang_workspace
collider_out = os.path.join(golang_workspace,
'collidermain' + utils.GetExecutableExtension())
subprocess.check_call([golang_path, 'build', '-o', collider_out,
'collidermain'], env=golang_env)
collider_out = os.path.join(
golang_workspace, 'collidermain' + utils.GetExecutableExtension())
subprocess.check_call(
[golang_path, 'build', '-o', collider_out, 'collidermain'],
env=golang_env)
if __name__ == '__main__':

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Downloads prebuilt AppRTC and Go from WebRTC storage and unpacks it.
Requires that depot_tools is installed and in the PATH.
@ -21,7 +20,6 @@ import sys
import utils
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This script sets up AppRTC and its dependencies.
Requires that depot_tools is installed and in the PATH.
@ -19,7 +18,6 @@ import sys
import utils
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -30,15 +28,15 @@ def main(argv):
output_dir = os.path.abspath(argv[1])
download_apprtc_path = os.path.join(SCRIPT_DIR, 'download_apprtc.py')
utils.RunSubprocessWithRetry([sys.executable, download_apprtc_path,
output_dir])
utils.RunSubprocessWithRetry(
[sys.executable, download_apprtc_path, output_dir])
build_apprtc_path = os.path.join(SCRIPT_DIR, 'build_apprtc.py')
apprtc_dir = os.path.join(output_dir, 'apprtc')
go_dir = os.path.join(output_dir, 'go')
collider_dir = os.path.join(output_dir, 'collider')
utils.RunSubprocessWithRetry([sys.executable, build_apprtc_path,
apprtc_dir, go_dir, collider_dir])
utils.RunSubprocessWithRetry(
[sys.executable, build_apprtc_path, apprtc_dir, go_dir, collider_dir])
if __name__ == '__main__':

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Utilities for all our deps-management stuff."""
from __future__ import absolute_import
@ -41,9 +40,10 @@ def DownloadFilesFromGoogleStorage(path, auto_platform=True):
print('Downloading files in %s...' % path)
extension = 'bat' if 'win32' in sys.platform else 'py'
cmd = ['download_from_google_storage.%s' % extension,
'--bucket=chromium-webrtc-resources',
'--directory', path]
cmd = [
'download_from_google_storage.%s' % extension,
'--bucket=chromium-webrtc-resources', '--directory', path
]
if auto_platform:
cmd += ['--auto_platform', '--recursive']
subprocess.check_call(cmd)
@ -77,9 +77,10 @@ def RemoveDirectory(*path):
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for _ in range(3):
print('RemoveDirectory running %s' % (' '.join(
['cmd.exe', '/c', 'rd', '/q', '/s', file_path])))
if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
print('RemoveDirectory running %s' %
(' '.join(['cmd.exe', '/c', 'rd', '/q', '/s', file_path])))
if not subprocess.call(
['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
break
print(' Failed')
time.sleep(3)

View File

@ -6,23 +6,26 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(CheckPatchFormatted(input_api, output_api))
return results
def CheckPatchFormatted(input_api, output_api):
import git_cl
cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()]
code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True)
if code == 2:
short_path = input_api.basename(input_api.PresubmitLocalPath())
full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(),
input_api.change.RepositoryRoot())
return [output_api.PresubmitPromptWarning(
full_path = input_api.os_path.relpath(
input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot())
return [
output_api.PresubmitPromptWarning(
'The %s directory requires source formatting. '
'Please run git cl format %s' %
(short_path, full_path))]
'Please run git cl format %s' % (short_path, full_path))
]
# As this is just a warning, ignore all other errors if the user
# happens to have a broken clang-format, doesn't use git, etc etc.
return []

View File

@ -28,18 +28,22 @@ def _LicenseHeader(input_api):
}
return license_header
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, _LicenseHeader(input_api)))
results.extend(
input_api.canned_checks.CheckLicense(input_api, output_api,
_LicenseHeader(input_api)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))

View File

@ -7,7 +7,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to generate libwebrtc.aar for distribution.
The script has to be run from the root src folder.
@ -33,7 +32,6 @@ import sys
import tempfile
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
DEFAULT_ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
@ -52,32 +50,49 @@ sys.path.append(os.path.join(SRC_DIR, 'build'))
import find_depot_tools
def _ParseArgs():
parser = argparse.ArgumentParser(description='libwebrtc.aar generator.')
parser.add_argument('--build-dir',
parser.add_argument(
'--build-dir',
help='Build dir. By default will create and use temporary dir.')
parser.add_argument('--output', default='libwebrtc.aar',
parser.add_argument('--output',
default='libwebrtc.aar',
help='Output file of the script.')
parser.add_argument('--arch', default=DEFAULT_ARCHS, nargs='*',
parser.add_argument(
'--arch',
default=DEFAULT_ARCHS,
nargs='*',
help='Architectures to build. Defaults to %(default)s.')
parser.add_argument('--use-goma', action='store_true', default=False,
parser.add_argument('--use-goma',
action='store_true',
default=False,
help='Use goma.')
parser.add_argument('--verbose', action='store_true', default=False,
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Debug logging.')
parser.add_argument('--extra-gn-args', default=[], nargs='*',
parser.add_argument(
'--extra-gn-args',
default=[],
nargs='*',
help="""Additional GN arguments to be used during Ninja generation.
These are passed to gn inside `--args` switch and
applied after any other arguments and will
override any values defined by the script.
Example of building debug aar file:
build_aar.py --extra-gn-args='is_debug=true'""")
parser.add_argument('--extra-ninja-switches', default=[], nargs='*',
parser.add_argument(
'--extra-ninja-switches',
default=[],
nargs='*',
help="""Additional Ninja switches to be used during compilation.
These are applied after any other Ninja switches.
Example of enabling verbose Ninja output:
build_aar.py --extra-ninja-switches='-v'""")
parser.add_argument('--extra-gn-switches', default=[], nargs='*',
parser.add_argument(
'--extra-gn-switches',
default=[],
nargs='*',
help="""Additional GN switches to be used during compilation.
These are applied after any other GN switches.
Example of enabling verbose GN output:
@ -86,16 +101,20 @@ def _ParseArgs():
def _RunGN(args):
cmd = [sys.executable,
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py')]
cmd = [
sys.executable,
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py')
]
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _RunNinja(output_directory, args):
cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'),
'-C', output_directory]
cmd = [
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), '-C',
output_directory
]
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
@ -158,8 +177,9 @@ def Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches,
arm_version = _GetArmVersion(arch)
if arm_version:
gn_args['arm_version'] = arm_version
gn_args_str = '--args=' + ' '.join([
k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args)
gn_args_str = '--args=' + ' '.join(
[k + '=' + _EncodeForGN(v)
for k, v in gn_args.items()] + extra_gn_args)
gn_args_list = ['gen', output_directory, gn_args_str]
gn_args_list.extend(extra_gn_switches)
@ -197,8 +217,12 @@ def GenerateLicenses(output_dir, build_dir, archs):
builder.GenerateLicenseText(output_dir)
def BuildAar(archs, output_file, use_goma=False, extra_gn_args=None,
ext_build_dir=None, extra_gn_switches=None,
def BuildAar(archs,
output_file,
use_goma=False,
extra_gn_args=None,
ext_build_dir=None,
extra_gn_switches=None,
extra_ninja_switches=None):
extra_gn_args = extra_gn_args or []
extra_gn_switches = extra_gn_switches or []

View File

@ -7,7 +7,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for publishing WebRTC AAR on Bintray.
Set BINTRAY_USER and BINTRAY_API_KEY environment variables before running
@ -25,7 +24,6 @@ import sys
import tempfile
import time
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
@ -36,7 +34,6 @@ import jinja2
sys.path.append(os.path.join(CHECKOUT_ROOT, 'tools_webrtc'))
from android.build_aar import BuildAar
ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
MAVEN_REPOSITORY = 'https://google.bintray.com/webrtc'
API = 'https://api.bintray.com'
@ -63,23 +60,34 @@ AAR_PROJECT_VERSION_DEPENDENCY = "implementation 'org.webrtc:google-webrtc:%s'"
def _ParseArgs():
parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.')
parser.add_argument('--use-goma', action='store_true', default=False,
parser.add_argument('--use-goma',
action='store_true',
default=False,
help='Use goma.')
parser.add_argument('--skip-tests', action='store_true', default=False,
parser.add_argument('--skip-tests',
action='store_true',
default=False,
help='Skips running the tests.')
parser.add_argument('--publish', action='store_true', default=False,
parser.add_argument(
'--publish',
action='store_true',
default=False,
help='Automatically publishes the library if the tests pass.')
parser.add_argument('--build-dir', default=None,
parser.add_argument(
'--build-dir',
default=None,
help='Temporary directory to store the build files. If not specified, '
'a new directory will be created.')
parser.add_argument('--verbose', action='store_true', default=False,
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Debug logging.')
return parser.parse_args()
def _GetCommitHash():
commit_hash = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=CHECKOUT_ROOT).strip()
commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=CHECKOUT_ROOT).strip()
return commit_hash
@ -87,11 +95,11 @@ def _GetCommitPos():
commit_message = subprocess.check_output(
['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'],
cwd=CHECKOUT_ROOT)
commit_pos_match = re.search(
COMMIT_POSITION_REGEX, commit_message, re.MULTILINE)
commit_pos_match = re.search(COMMIT_POSITION_REGEX, commit_message,
re.MULTILINE)
if not commit_pos_match:
raise Exception('Commit position not found in the commit message: %s'
% commit_message)
raise Exception('Commit position not found in the commit message: %s' %
commit_message)
return commit_pos_match.group(1)
@ -111,7 +119,9 @@ def _UploadFile(user, password, filename, version, target_file):
for attempt in xrange(UPLOAD_TRIES):
try:
response = requests.put(url, data=file_data, auth=(user, password),
response = requests.put(url,
data=file_data,
auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
break
except requests.exceptions.Timeout as e:
@ -121,14 +131,13 @@ def _UploadFile(user, password, filename, version, target_file):
raise Exception('Failed to upload %s' % filename)
if not response.ok:
raise Exception('Failed to upload %s. Response: %s' % (filename, response))
raise Exception('Failed to upload %s. Response: %s' %
(filename, response))
logging.info('Uploaded %s: %s', filename, response)
def _GeneratePom(target_file, version, commit):
env = jinja2.Environment(
loader=jinja2.PackageLoader('release_aar'),
)
env = jinja2.Environment(loader=jinja2.PackageLoader('release_aar'), )
template = env.get_template('pom.jinja')
pom = template.render(version=version, commit=commit)
with open(target_file, 'w') as fh:
@ -138,9 +147,7 @@ def _GeneratePom(target_file, version, commit):
def _TestAAR(tmp_dir, username, password, version):
"""Runs AppRTCMobile tests using the AAR. Returns true if the tests pass."""
logging.info('Testing library.')
env = jinja2.Environment(
loader=jinja2.PackageLoader('release_aar'),
)
env = jinja2.Environment(loader=jinja2.PackageLoader('release_aar'), )
gradle_backup = os.path.join(tmp_dir, 'build.gradle.backup')
app_gradle_backup = os.path.join(tmp_dir, 'app-build.gradle.backup')
@ -163,18 +170,20 @@ def _TestAAR(tmp_dir, username, password, version):
gradle_app = gradle_app_file.read()
if AAR_PROJECT_DEPENDENCY not in gradle_app:
raise Exception(
'%s not found in the build file.' % AAR_PROJECT_DEPENDENCY)
raise Exception('%s not found in the build file.' %
AAR_PROJECT_DEPENDENCY)
# Set version to the version to be tested.
target_dependency = AAR_PROJECT_VERSION_DEPENDENCY % version
gradle_app = gradle_app.replace(AAR_PROJECT_DEPENDENCY, target_dependency)
gradle_app = gradle_app.replace(AAR_PROJECT_DEPENDENCY,
target_dependency)
# Write back.
with open(AAR_PROJECT_APP_GRADLE, 'w') as gradle_app_file:
gradle_app_file.write(gradle_app)
# Uninstall any existing version of AppRTCMobile.
logging.info('Uninstalling previous AppRTCMobile versions. It is okay for '
logging.info(
'Uninstalling previous AppRTCMobile versions. It is okay for '
'these commands to fail if AppRTCMobile is not installed.')
subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc'])
subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test'])
@ -204,7 +213,9 @@ def _PublishAAR(user, password, version, additional_args):
args.update(additional_args)
url = CONTENT_API + '/' + version + '/publish'
response = requests.post(url, data=json.dumps(args), auth=(user, password),
response = requests.post(url,
data=json.dumps(args),
auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
@ -213,7 +224,8 @@ def _PublishAAR(user, password, version, additional_args):
def _DeleteUnpublishedVersion(user, password, version):
url = PACKAGES_API + '/versions/' + version
response = requests.get(url, auth=(user, password),
response = requests.get(url,
auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
raise Exception('Failed to get version info. Response: %s' % response)
@ -224,7 +236,8 @@ def _DeleteUnpublishedVersion(user, password, version):
return
logging.info('Deleting unpublished version.')
response = requests.delete(url, auth=(user, password),
response = requests.delete(url,
auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
raise Exception('Failed to delete version. Response: %s' % response)
@ -238,7 +251,8 @@ def ReleaseAar(use_goma, skip_tests, publish, build_dir):
user = os.environ.get('BINTRAY_USER', None)
api_key = os.environ.get('BINTRAY_API_KEY', None)
if not user or not api_key:
raise Exception('Environment variables BINTRAY_USER and BINTRAY_API_KEY '
raise Exception(
'Environment variables BINTRAY_USER and BINTRAY_API_KEY '
'must be defined.')
# If build directory is not specified, create a temporary directory.
@ -253,7 +267,8 @@ def ReleaseAar(use_goma, skip_tests, publish, build_dir):
pom_file = os.path.join(build_dir, base_name + '.pom')
logging.info('Building at %s', build_dir)
BuildAar(ARCHS, aar_file,
BuildAar(ARCHS,
aar_file,
use_goma=use_goma,
ext_build_dir=os.path.join(build_dir, 'aar-build'))
_GeneratePom(pom_file, version, commit)
@ -274,7 +289,8 @@ def ReleaseAar(use_goma, skip_tests, publish, build_dir):
logging.info('Publishing library.')
_PublishAAR(user, api_key, version, {})
else:
logging.info('Note: The library has not not been published automatically.'
logging.info(
'Note: The library has not not been published automatically.'
' Please do so manually if desired.')
finally:
if use_tmp_dir:

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to automatically roll dependencies in the WebRTC DEPS file."""
import argparse
@ -19,6 +18,7 @@ import subprocess
import sys
import urllib2
def FindSrcDirPath():
"""Returns the abs path to the src/ dir of the project."""
src_dir = os.path.dirname(os.path.abspath(__file__))
@ -26,6 +26,7 @@ def FindSrcDirPath():
src_dir = os.path.normpath(os.path.join(src_dir, os.pardir))
return src_dir
# Skip these dependencies (list without solution name prefix).
DONT_AUTOROLL_THESE = [
'src/examples/androidtests/third_party/gradle',
@ -48,7 +49,6 @@ WEBRTC_ONLY_DEPS = [
'src/tools',
]
WEBRTC_URL = 'https://webrtc.googlesource.com/src'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
@ -71,7 +71,6 @@ ANDROID_DEPS_PATH = 'src/third_party/android_deps/'
NOTIFY_EMAIL = 'webrtc-trooper@grotations.appspotmail.com'
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
@ -82,8 +81,8 @@ CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools',
'clang', 'scripts', 'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple(
'ChangedDep', 'path url current_rev new_rev')
ChangedDep = collections.namedtuple('ChangedDep',
'path url current_rev new_rev')
CipdDepsEntry = collections.namedtuple('CipdDepsEntry', 'path packages')
ChangedCipdPackage = collections.namedtuple(
'ChangedCipdPackage', 'path package current_version new_version')
@ -132,8 +131,11 @@ def ParseCommitPosition(commit_message):
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False,
extra_env=None, input_data=None):
def _RunCommand(command,
working_dir=None,
ignore_exit_code=False,
extra_env=None,
input_data=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
@ -151,15 +153,18 @@ def _RunCommand(command, working_dir=None, ignore_exit_code=False,
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
cwd=working_dir, universal_newlines=True)
stderr=subprocess.PIPE,
env=env,
cwd=working_dir,
universal_newlines=True)
std_output, err_output = p.communicate(input_data)
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
'stderr:\n%s\n', ' '.join(command), std_output,
err_output)
sys.exit(p.returncode)
return std_output, err_output
@ -194,8 +199,8 @@ def _ReadGitilesContent(url):
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision,
path_below_src))
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE %
(revision, path_below_src))
def ReadRemoteCrCommit(revision):
@ -264,8 +269,9 @@ def BuildDepsentryDict(deps_dict):
def _FindChangedCipdPackages(path, old_pkgs, new_pkgs):
pkgs_equal = ({p['package'] for p in old_pkgs} ==
{p['package'] for p in new_pkgs})
pkgs_equal = ({p['package']
for p in old_pkgs} == {p['package']
for p in new_pkgs})
assert pkgs_equal, ('Old: %s\n New: %s.\nYou need to do a manual roll '
'and remove/add entries in DEPS so the old and new '
'list match.' % (old_pkgs, new_pkgs))
@ -273,19 +279,21 @@ def _FindChangedCipdPackages(path, old_pkgs, new_pkgs):
for new_pkg in new_pkgs:
old_version = old_pkg['version']
new_version = new_pkg['version']
if (old_pkg['package'] == new_pkg['package'] and
old_version != new_version):
if (old_pkg['package'] == new_pkg['package']
and old_version != new_version):
logging.debug('Roll dependency %s to %s', path, new_version)
yield ChangedCipdPackage(path, old_pkg['package'],
old_version, new_version)
yield ChangedCipdPackage(path, old_pkg['package'], old_version,
new_version)
def _FindNewDeps(old, new):
""" Gather dependencies only in |new| and return corresponding paths. """
old_entries = set(BuildDepsentryDict(old))
new_entries = set(BuildDepsentryDict(new))
return [path for path in new_entries - old_entries
if path not in DONT_AUTOROLL_THESE]
return [
path for path in new_entries - old_entries
if path not in DONT_AUTOROLL_THESE
]
def FindAddedDeps(webrtc_deps, new_cr_deps):
@ -311,10 +319,12 @@ def FindAddedDeps(webrtc_deps, new_cr_deps):
A list of paths for other added dependencies.
"""
all_added_deps = _FindNewDeps(webrtc_deps, new_cr_deps)
generated_android_deps = [path for path in all_added_deps
if path.startswith(ANDROID_DEPS_PATH)]
other_deps = [path for path in all_added_deps
if path not in generated_android_deps]
generated_android_deps = [
path for path in all_added_deps if path.startswith(ANDROID_DEPS_PATH)
]
other_deps = [
path for path in all_added_deps if path not in generated_android_deps
]
return generated_android_deps, other_deps
@ -343,12 +353,14 @@ def FindRemovedDeps(webrtc_deps, new_cr_deps):
A list of paths of unexpected disappearing dependencies.
"""
all_removed_deps = _FindNewDeps(new_cr_deps, webrtc_deps)
generated_android_deps = [path for path in all_removed_deps
if path.startswith(ANDROID_DEPS_PATH)]
generated_android_deps = [
path for path in all_removed_deps if path.startswith(ANDROID_DEPS_PATH)
]
# Webrtc-only dependencies are handled in CalculateChangedDeps.
other_deps = [path for path in all_removed_deps
if path not in generated_android_deps and
path not in WEBRTC_ONLY_DEPS]
other_deps = [
path for path in all_removed_deps
if path not in generated_android_deps and path not in WEBRTC_ONLY_DEPS
]
return generated_android_deps, other_deps
@ -378,20 +390,21 @@ def CalculateChangedDeps(webrtc_deps, new_cr_deps):
assert type(cr_deps_entry) is type(webrtc_deps_entry)
if isinstance(cr_deps_entry, CipdDepsEntry):
result.extend(_FindChangedCipdPackages(path, webrtc_deps_entry.packages,
result.extend(
_FindChangedCipdPackages(path, webrtc_deps_entry.packages,
cr_deps_entry.packages))
continue
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert webrtc_deps_entry.url == cr_deps_entry.url, (
'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, webrtc_deps_entry.url, cr_deps_entry.url))
'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).'
% (path, webrtc_deps_entry.url, cr_deps_entry.url))
else:
if isinstance(webrtc_deps_entry, DepsEntry):
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', webrtc_deps_entry.url,
'HEAD'])
stdout, _ = _RunCommand(
['git', 'ls-remote', webrtc_deps_entry.url, 'HEAD'])
new_rev = stdout.strip().split('\t')[0]
else:
# The dependency has been removed from chromium.
@ -401,7 +414,8 @@ def CalculateChangedDeps(webrtc_deps, new_cr_deps):
# Check if an update is necessary.
if webrtc_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(ChangedDep(path, webrtc_deps_entry.url,
result.append(
ChangedDep(path, webrtc_deps_entry.url,
webrtc_deps_entry.revision, new_rev))
return sorted(result)
@ -421,10 +435,14 @@ def CalculateChangedClang(new_cr_rev):
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH,
new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev,
new_rev)
def GenerateCommitMessage(rev_update, current_commit_pos, new_commit_pos,
def GenerateCommitMessage(
rev_update,
current_commit_pos,
new_commit_pos,
changed_deps_list,
added_deps_paths=None,
removed_deps_paths=None,
@ -435,11 +453,12 @@ def GenerateCommitMessage(rev_update, current_commit_pos, new_commit_pos,
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval,
git_number_interval),
commit_msg = [
'Roll chromium_revision %s (%s)\n' %
(rev_interval, git_number_interval),
'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval),
'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE %
rev_interval)]
'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % rev_interval)
]
def Section(adjective, deps):
noun = 'dependency' if len(deps) == 1 else 'dependencies'
@ -451,12 +470,12 @@ def GenerateCommitMessage(rev_update, current_commit_pos, new_commit_pos,
for c in changed_deps_list:
if isinstance(c, ChangedCipdPackage):
commit_msg.append('* %s: %s..%s' % (c.path, c.current_version,
c.new_version))
commit_msg.append('* %s: %s..%s' %
(c.path, c.current_version, c.new_version))
else:
commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url,
c.current_rev[0:10],
c.new_rev[0:10]))
commit_msg.append(
'* %s: %s/+log/%s..%s' %
(c.path, c.url, c.current_rev[0:10], c.new_rev[0:10]))
if 'libvpx' in c.path:
tbr_authors += 'marpan@webrtc.org, jianj@chromium.org, '
@ -468,9 +487,7 @@ def GenerateCommitMessage(rev_update, current_commit_pos, new_commit_pos,
Section('Removed', removed_deps_paths)
commit_msg.extend('* %s' % p for p in removed_deps_paths)
if any([changed_deps_list,
added_deps_paths,
removed_deps_paths]):
if any([changed_deps_list, added_deps_paths, removed_deps_paths]):
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
@ -515,8 +532,8 @@ def UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content):
if not new_deps or not old_deps:
faulty = 'Chromium' if not new_deps else 'WebRTC'
raise RollError('Was expecting to find "%s" and "%s"\n'
'in %s DEPS'
% (ANDROID_DEPS_START, ANDROID_DEPS_END, faulty))
'in %s DEPS' %
(ANDROID_DEPS_START, ANDROID_DEPS_END, faulty))
deps_content = deps_re.sub(new_deps.group(0), deps_content)
with open(deps_filename, 'wb') as deps_file:
@ -552,10 +569,11 @@ def _IsTreeClean():
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref',
'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
logging.error(
'Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
@ -634,26 +652,43 @@ def GetRollRevisionRanges(opts, webrtc_deps):
def main():
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', default=False,
p.add_argument('--clean',
action='store_true',
default=False,
help='Removes any previous local roll branch.')
p.add_argument('-r', '--revision',
p.add_argument('-r',
'--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument('--dry-run', action='store_true', default=False,
p.add_argument(
'--dry-run',
action='store_true',
default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument('-i', '--ignore-unclean-workdir', action='store_true',
p.add_argument(
'-i',
'--ignore-unclean-workdir',
action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
grp = p.add_mutually_exclusive_group()
grp.add_argument('--skip-cq', action='store_true', default=False,
grp.add_argument(
'--skip-cq',
action='store_true',
default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
grp.add_argument('--cq-over', type=int, default=1,
grp.add_argument('--cq-over',
type=int,
default=1,
help=('Commit queue dry run if the revision difference '
'is below this number (default: %(default)s)'))
p.add_argument('-v', '--verbose', action='store_true', default=False,
p.add_argument('-v',
'--verbose',
action='store_true',
default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
@ -687,15 +722,19 @@ def main():
changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps)
# Discard other deps, assumed to be chromium-only dependencies.
new_generated_android_deps, _ = FindAddedDeps(webrtc_deps, new_cr_deps)
removed_generated_android_deps, other_deps = FindRemovedDeps(webrtc_deps,
new_cr_deps)
removed_generated_android_deps, other_deps = FindRemovedDeps(
webrtc_deps, new_cr_deps)
if other_deps:
raise RollError('WebRTC DEPS entries are missing from Chromium: %s.\n'
'Remove them or add them to either '
'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' % other_deps)
'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' %
other_deps)
clang_change = CalculateChangedClang(rev_update.new_chromium_rev)
commit_msg = GenerateCommitMessage(
rev_update, current_commit_pos, new_commit_pos, changed_deps,
rev_update,
current_commit_pos,
new_commit_pos,
changed_deps,
added_deps_paths=new_generated_android_deps,
removed_deps_paths=removed_generated_android_deps,
clang_change=clang_change)

View File

@ -14,7 +14,6 @@ import sys
import tempfile
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir)
sys.path.append(PARENT_DIR)
@ -66,7 +65,8 @@ class FakeCmd(object):
for item in ignores:
kwargs.pop(item, None)
if args != exp_args or kwargs != exp_kwargs:
message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args, exp_kwargs)
message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args,
exp_kwargs)
message += 'Got:\n args: %s\n kwargs: %s\n' % (args, kwargs)
raise TestError(message)
return exp_returns
@ -110,12 +110,12 @@ class TestRollChromiumRevision(unittest.TestCase):
new_cr_contents = deps_file.read()
UpdateDepsFile(self._webrtc_depsfile,
ChromiumRevisionUpdate(current_rev, new_rev),
[],
ChromiumRevisionUpdate(current_rev, new_rev), [],
new_cr_contents)
with open(self._webrtc_depsfile) as deps_file:
deps_contents = deps_file.read()
self.assertTrue(new_rev in deps_contents,
self.assertTrue(
new_rev in deps_contents,
'Failed to find %s in\n%s' % (new_rev, deps_contents))
def _UpdateDepsSetup(self):
@ -129,8 +129,7 @@ class TestRollChromiumRevision(unittest.TestCase):
changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps)
with mock.patch('roll_deps._RunCommand', NullCmd()):
UpdateDepsFile(self._webrtc_depsfile_android,
NO_CHROMIUM_REVISION_UPDATE,
changed_deps,
NO_CHROMIUM_REVISION_UPDATE, changed_deps,
new_cr_contents)
with open(self._webrtc_depsfile_android) as deps_file:
@ -167,7 +166,9 @@ class TestRollChromiumRevision(unittest.TestCase):
vars_dict = local_scope['vars']
def AssertVar(variable_name):
self.assertEquals(vars_dict[variable_name], TEST_DATA_VARS[variable_name])
self.assertEquals(vars_dict[variable_name],
TEST_DATA_VARS[variable_name])
AssertVar('chromium_git')
AssertVar('chromium_revision')
self.assertEquals(len(local_scope['deps']), 3)
@ -186,13 +187,13 @@ class TestRollChromiumRevision(unittest.TestCase):
entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build')
self.assertEquals(len(entries), 1)
def testCalculateChangedDeps(self):
webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile)
new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile)
with mock.patch('roll_deps._RunCommand', self.fake):
_SetupGitLsRemoteCall(
self.fake, 'https://chromium.googlesource.com/chromium/src/build',
self.fake,
'https://chromium.googlesource.com/chromium/src/build',
BUILD_NEW_REV)
changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps)
@ -206,7 +207,8 @@ class TestRollChromiumRevision(unittest.TestCase):
self.assertEquals(changed_deps[1].new_rev, DEPOTTOOLS_NEW_REV)
self.assertEquals(changed_deps[2].path, 'src/third_party/xstream')
self.assertEquals(changed_deps[2].package, 'chromium/third_party/xstream')
self.assertEquals(changed_deps[2].package,
'chromium/third_party/xstream')
self.assertEquals(changed_deps[2].current_version, 'version:1.4.8-cr0')
self.assertEquals(changed_deps[2].new_version, 'version:1.10.0-cr0')
@ -228,19 +230,21 @@ class TestRollChromiumRevision(unittest.TestCase):
def testFindAddedDeps(self):
webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android)
new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android)
added_android_paths, other_paths = FindAddedDeps(webrtc_deps, new_cr_deps)
self.assertEquals(
added_android_paths,
['src/third_party/android_deps/libs/android_arch_lifecycle_common'])
added_android_paths, other_paths = FindAddedDeps(
webrtc_deps, new_cr_deps)
self.assertEquals(added_android_paths, [
'src/third_party/android_deps/libs/android_arch_lifecycle_common'
])
self.assertEquals(other_paths, [])
def testFindRemovedDeps(self):
webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android)
new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android)
removed_android_paths, other_paths = FindRemovedDeps(webrtc_deps,
new_cr_deps)
self.assertEquals(removed_android_paths,
['src/third_party/android_deps/libs/android_arch_lifecycle_runtime'])
removed_android_paths, other_paths = FindRemovedDeps(
webrtc_deps, new_cr_deps)
self.assertEquals(removed_android_paths, [
'src/third_party/android_deps/libs/android_arch_lifecycle_runtime'
])
self.assertEquals(other_paths, [])
def testMissingDepsIsDetected(self):
@ -251,15 +255,16 @@ class TestRollChromiumRevision(unittest.TestCase):
webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile)
new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android)
_, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps)
self.assertEquals(other_paths, ['src/third_party/xstream',
'src/third_party/depot_tools'])
self.assertEquals(
other_paths,
['src/third_party/xstream', 'src/third_party/depot_tools'])
def testExpectedDepsIsNotReportedMissing(self):
"""Some deps musn't be seen as missing, even if absent from Chromium."""
webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile)
new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android)
removed_android_paths, other_paths = FindRemovedDeps(webrtc_deps,
new_cr_deps)
removed_android_paths, other_paths = FindRemovedDeps(
webrtc_deps, new_cr_deps)
self.assertTrue('src/build' not in removed_android_paths)
self.assertTrue('src/build' not in other_paths)
@ -280,9 +285,10 @@ class TestRollChromiumRevision(unittest.TestCase):
_returns=('nobody@nowhere.no', None),
_ignores=['working_dir'])
commit_msg = GenerateCommitMessage(
NO_CHROMIUM_REVISION_UPDATE, current_commit_pos, new_commit_pos,
changed_deps, added_paths, removed_paths)
commit_msg = GenerateCommitMessage(NO_CHROMIUM_REVISION_UPDATE,
current_commit_pos,
new_commit_pos, changed_deps,
added_paths, removed_paths)
return [l.strip() for l in commit_msg.split('\n')]

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Invoke clang-tidy tool.
Usage: clang_tidy.py file.cc [clang-tidy-args...]
@ -25,7 +24,6 @@ import tempfile
from presubmit_checks_lib.build_helpers import GetClangTidyPath, \
GetCompilationCommand
# We enable all checkers by default for investigation purpose.
# This includes clang-analyzer-* checks.
# Individual checkers can be disabled via command line options.
@ -46,20 +44,23 @@ def Process(filepath, args):
# Remove warning flags. They aren't needed and they cause trouble
# when clang-tidy doesn't match most recent clang.
# Same battle for -f (e.g. -fcomplete-member-pointers).
command = [arg for arg in command if not (arg.startswith('-W') or
arg.startswith('-f'))]
command = [
arg for arg in command
if not (arg.startswith('-W') or arg.startswith('-f'))
]
# Path from build dir.
rel_path = os.path.relpath(os.path.abspath(filepath), out_dir)
# Replace clang++ by clang-tidy
command[0:1] = [GetClangTidyPath(),
CHECKER_OPTION,
rel_path] + args + ['--'] # Separator for clang flags.
command[0:1] = [GetClangTidyPath(), CHECKER_OPTION, rel_path
] + args + ['--'] # Separator for clang flags.
print "Running: %s" % ' '.join(command)
# Run from build dir so that relative paths are correct.
p = subprocess.Popen(command, cwd=out_dir,
stdout=sys.stdout, stderr=sys.stderr)
p = subprocess.Popen(command,
cwd=out_dir,
stdout=sys.stdout,
stderr=sys.stderr)
p.communicate()
return p.returncode
finally:

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
@ -17,26 +16,16 @@ gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'slow_tests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'video_capture_tests', 'webrtc_nonparallel_tests', 'video_engine_tests',
'tools_unittests', 'test_support_unittests', 'slow_tests',
'system_wrappers_unittests', 'rtc_unittests', 'rtc_stats_unittests',
'rtc_pc_unittests', 'rtc_media_unittests', 'peerconnection_unittests',
'modules_unittests', 'modules_tests', 'low_bandwidth_audio_test',
'common_video_unittests', 'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
@ -45,11 +34,14 @@ def main():
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
cmd[cmd.index('-c \'%s\'' %
modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates command-line instructions to produce one-time iOS coverage using
coverage.py.
@ -88,11 +87,8 @@ def FormatIossimTest(test_name, is_xctest=False):
def GetGNArgs(is_simulator):
target_cpu = 'x64' if is_simulator else 'arm64'
return ([] +
['target_os="ios"'] +
['target_cpu="%s"' % target_cpu] +
['use_clang_coverage=true'] +
['is_component_build=false'] +
return ([] + ['target_os="ios"'] + ['target_cpu="%s"' % target_cpu] +
['use_clang_coverage=true'] + ['is_component_build=false'] +
['dcheck_always_on=true'])
@ -100,14 +96,12 @@ def GenerateIOSSimulatorCommand():
gn_args_string = ' '.join(GetGNArgs(is_simulator=True))
gn_cmd = ['gn', 'gen', DIRECTORY, '--args=\'%s\'' % gn_args_string]
coverage_cmd = (
[sys.executable, 'tools/code_coverage/coverage.py'] +
coverage_cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] +
["%s.app" % t for t in XC_TESTS + TESTS] +
['-b %s' % DIRECTORY, '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] +
[FormatIossimTest(t, is_xctest=True) for t in XC_TESTS] +
[FormatIossimTest(t, is_xctest=False) for t in TESTS]
)
[FormatIossimTest(t, is_xctest=False) for t in TESTS])
print 'To get code coverage using iOS simulator just run following commands:'
print ''
@ -122,12 +116,9 @@ def GenerateIOSDeviceCommand():
coverage_report_cmd = (
[sys.executable, 'tools/code_coverage/coverage.py'] +
['%s.app' % t for t in TESTS] +
['-b %s' % DIRECTORY] +
['-o out/report'] +
['-p %s/merged.profdata' % DIRECTORY] +
['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\'']
)
['%s.app' % t for t in TESTS] + ['-b %s' % DIRECTORY] +
['-o out/report'] + ['-p %s/merged.profdata' % DIRECTORY] +
['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''])
print 'Computing code coverage for real iOS device is a little bit tedious.'
print ''
@ -170,5 +161,6 @@ def Main():
return 0
if __name__ == '__main__':
sys.exit(Main())

View File

@ -8,7 +8,6 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import psutil
import sys
@ -30,8 +29,8 @@ class CpuSnapshot(object):
def Text(self):
return ('%s: avg=%s, median=%s, min=%s, max=%s' %
(self.label, numpy.average(self.samples),
numpy.median(self.samples),
numpy.min(self.samples), numpy.max(self.samples)))
numpy.median(self.samples), numpy.min(
self.samples), numpy.max(self.samples)))
def Max(self):
return numpy.max(self.samples)
@ -79,5 +78,6 @@ def main():
pyplot.show()
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Downloads precompiled tools.
These are checked into the repository as SHA-1 hashes (see *.sha1 files in
@ -17,12 +16,10 @@ so please download and compile these tools manually if this script fails.
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.append(os.path.join(SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
import gclient_utils
@ -40,7 +37,8 @@ def main(directories):
'download_from_google_storage.py'),
'--directory',
'--num_threads=10',
'--bucket', 'chrome-webrtc-resources',
'--bucket',
'chrome-webrtc-resources',
'--auto_platform',
'--recursive',
path,
@ -49,8 +47,9 @@ def main(directories):
# Perform download similar to how gclient hooks execute.
try:
gclient_utils.CheckCallAndFilter(
cmd, cwd=SRC_DIR, always_show_header=True)
gclient_utils.CheckCallAndFilter(cmd,
cwd=SRC_DIR,
always_show_header=True)
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
print 'Error: %s' % str(e)
return 2

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
@ -32,7 +31,6 @@ import psutil # pylint: disable=F0401
import subprocess
import sys
WEBCAM_WIN = ('schtasks', '/run', '/tn', 'ManyCam')
WEBCAM_MAC = ('open', '/Applications/ManyCam/ManyCam.app')
@ -52,8 +50,8 @@ def IsWebCamRunning():
for p in psutil.process_iter():
try:
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name,
p.pid)
print 'Found a running virtual webcam (%s with PID %s)' % (
p.name, p.pid)
return True
except psutil.AccessDenied:
pass # This is normal if we query sys processes, etc.

View File

@ -55,7 +55,6 @@ import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.append(os.path.join(SRC_DIR, 'build'))
@ -96,6 +95,7 @@ if __name__ == '__main__':
EXECUTABLE_FINAL_NAME = ARGS.executable_name + '_host'
with HostBuildDir() as build_dir:
_RunCommand([sys.executable, DepotToolPath('gn.py'), 'gen', build_dir])
_RunCommand([DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD])
_RunCommand(
[DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD])
shutil.copy(os.path.join(build_dir, EXECUTABLE_TO_BUILD),
EXECUTABLE_FINAL_NAME)

View File

@ -21,8 +21,10 @@ def main():
test_command = _ForcePythonInterpreter(unrecognized_args)
if args.isolated_script_test_perf_output:
test_command += ['--isolated_script_test_perf_output=' +
args.isolated_script_test_perf_output]
test_command += [
'--isolated_script_test_perf_output=' +
args.isolated_script_test_perf_output
]
logging.info('Running %r', test_command)
return subprocess.call(test_command)

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
@ -20,7 +19,6 @@ CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'build'))
import landmine_utils
host_os = landmine_utils.host_os # pylint: disable=invalid-name

View File

@ -7,7 +7,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
This tool tries to fix (some) errors reported by `gn gen --check` or
`gn check`.
@ -31,12 +30,14 @@ from collections import defaultdict
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHROMIUM_DIRS = ['base', 'build', 'buildtools',
'testing', 'third_party', 'tools']
CHROMIUM_DIRS = [
'base', 'build', 'buildtools', 'testing', 'third_party', 'tools'
]
TARGET_RE = re.compile(
r'(?P<indentation_level>\s*)\w*\("(?P<target_name>\w*)"\) {$')
class TemporaryDirectory(object):
def __init__(self):
self._closed = False
@ -57,6 +58,7 @@ def Run(cmd):
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return sub.communicate()
def FixErrors(filename, missing_deps, deleted_sources):
with open(filename) as f:
lines = f.readlines()
@ -72,14 +74,15 @@ def FixErrors(filename, missing_deps, deleted_sources):
elif indentation_level is not None:
match = re.match(indentation_level + '}$', line)
if match:
line = ('deps = [\n' +
''.join(' "' + dep + '",\n' for dep in missing_deps[target]) +
']\n') + line
line = ('deps = [\n' + ''.join(' "' + dep + '",\n'
for dep in missing_deps[target])
+ ']\n') + line
indentation_level = None
elif line.strip().startswith('deps'):
is_empty_deps = line.strip() == 'deps = []'
line = 'deps = [\n' if is_empty_deps else line
line += ''.join(' "' + dep + '",\n' for dep in missing_deps[target])
line += ''.join(' "' + dep + '",\n'
for dep in missing_deps[target])
line += ']\n' if is_empty_deps else ''
indentation_level = None
@ -91,10 +94,12 @@ def FixErrors(filename, missing_deps, deleted_sources):
Run(['gn', 'format', filename])
def FirstNonEmpty(iterable):
"""Return first item which evaluates to True, or fallback to None."""
return next((x for x in iterable if x), None)
def Rebase(base_path, dependency_path, dependency):
"""Adapt paths so they work both in stand-alone WebRTC and Chromium tree.
@ -134,6 +139,7 @@ def Rebase(base_path, dependency_path, dependency):
rebased = os.path.sep.join((['..'] * len(base_path)) + dependency_path)
return rebased + ':' + dependency
def main():
deleted_sources = set()
errors_by_file = defaultdict(lambda: defaultdict(set))
@ -142,9 +148,11 @@ def main():
mb_script_path = os.path.join(SCRIPT_DIR, 'mb', 'mb.py')
mb_config_file_path = os.path.join(SCRIPT_DIR, 'mb', 'mb_config.pyl')
mb_gen_command = ([
mb_script_path, 'gen',
mb_script_path,
'gen',
tmp_dir,
'--config-file', mb_config_file_path,
'--config-file',
mb_config_file_path,
] + sys.argv[1:])
mb_output = Run(mb_gen_command)
@ -174,7 +182,8 @@ def main():
path = os.path.join(path[2:], 'BUILD.gn')
errors_by_file[path][target].add(dep)
elif error[index + 1] == 'has a source file:':
deleted_file = '"' + os.path.basename(error[index+2].strip()) + '",'
deleted_file = '"' + os.path.basename(
error[index + 2].strip()) + '",'
deleted_sources.add(deleted_file)
else:
print '\n'.join(error)
@ -185,5 +194,6 @@ def main():
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -75,10 +75,9 @@ import shutil
import subprocess
import sys
Args = collections.namedtuple('Args',
['gtest_parallel_args', 'test_env', 'output_dir',
'test_artifacts_dir'])
Args = collections.namedtuple(
'Args',
['gtest_parallel_args', 'test_env', 'output_dir', 'test_artifacts_dir'])
def _CatFiles(file_list, output_file):
@ -88,6 +87,7 @@ def _CatFiles(file_list, output_file):
output_file.write(input_file.read())
os.remove(filename)
def _ParseWorkersOption(workers):
"""Interpret Nx syntax as N * cpu_count. Int value is left as is."""
base = float(workers.rstrip('x'))
@ -105,6 +105,7 @@ class ReconstructibleArgumentGroup(object):
to it are also kept in a list, so that parsed options from
ArgumentParser.parse_args can be reconstructed back into a command line (list
of args) based on the list of wanted keys."""
def __init__(self, parser, *args, **kwargs):
self._group = parser.add_argument_group(*args, **kwargs)
self._keys = []
@ -136,7 +137,8 @@ def ParseArgs(argv=None):
gtest_group.AddArgument('--gtest_color')
gtest_group.AddArgument('--gtest_filter')
gtest_group.AddArgument('--gtest_also_run_disabled_tests',
action='store_true', default=None)
action='store_true',
default=None)
gtest_group.AddArgument('--timeout')
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
@ -148,7 +150,8 @@ def ParseArgs(argv=None):
# No-sandbox is a Chromium-specific flag, ignore it.
# TODO(oprypin): Remove (bugs.webrtc.org/8115)
parser.add_argument('--no-sandbox', action='store_true',
parser.add_argument('--no-sandbox',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('executable')
@ -160,8 +163,10 @@ def ParseArgs(argv=None):
for arg in unrecognized_args:
if arg.startswith('--isolated-script-test-perf-output'):
arg_split = arg.split('=')
assert len(arg_split) == 2, 'You must use the = syntax for this flag.'
args_to_pass.append('--isolated_script_test_perf_output=' + arg_split[1])
assert len(
arg_split) == 2, 'You must use the = syntax for this flag.'
args_to_pass.append('--isolated_script_test_perf_output=' +
arg_split[1])
else:
args_to_pass.append(arg)
@ -172,7 +177,8 @@ def ParseArgs(argv=None):
'--output_dir must be specified for storing test artifacts.')
test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts')
executable_args.insert(0, '--test_artifacts_dir=%s' % test_artifacts_dir)
executable_args.insert(0,
'--test_artifacts_dir=%s' % test_artifacts_dir)
else:
test_artifacts_dir = None
@ -198,8 +204,8 @@ def ParseArgs(argv=None):
def main():
webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
gtest_parallel_path = os.path.join(
webrtc_root, 'third_party', 'gtest-parallel', 'gtest-parallel')
gtest_parallel_path = os.path.join(webrtc_root, 'third_party',
'gtest-parallel', 'gtest-parallel')
gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs()
@ -220,10 +226,13 @@ def main():
if output_dir:
for test_status in 'passed', 'failed', 'interrupted':
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs', test_status)
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs',
test_status)
if not os.path.isdir(logs_dir):
continue
logs = [os.path.join(logs_dir, log) for log in os.listdir(logs_dir)]
logs = [
os.path.join(logs_dir, log) for log in os.listdir(logs_dir)
]
log_file = os.path.join(output_dir, '%s-tests.log' % test_status)
_CatFiles(logs, log_file)
os.rmdir(logs_dir)

View File

@ -27,7 +27,6 @@ def TemporaryDirectory():
class GtestParallelWrapperHelpersTest(unittest.TestCase):
def testGetWorkersAsIs(self):
# pylint: disable=protected-access
self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('12'), 12)
@ -35,17 +34,17 @@ class GtestParallelWrapperHelpersTest(unittest.TestCase):
def testGetTwiceWorkers(self):
expected = 2 * multiprocessing.cpu_count()
# pylint: disable=protected-access
self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'), expected)
self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'),
expected)
def testGetHalfWorkers(self):
expected = max(multiprocessing.cpu_count() // 2, 1)
# pylint: disable=protected-access
self.assertEqual(
gtest_parallel_wrapper._ParseWorkersOption('0.5x'), expected)
self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('0.5x'),
expected)
class GtestParallelWrapperTest(unittest.TestCase):
@classmethod
def _Expected(cls, gtest_parallel_args):
return ['--shard_index=0', '--shard_count=1'] + gtest_parallel_args
@ -57,19 +56,21 @@ class GtestParallelWrapperTest(unittest.TestCase):
self.assertEqual(result.gtest_parallel_args, expected)
def testMixing(self):
result = gtest_parallel_wrapper.ParseArgs(
['--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124'])
result = gtest_parallel_wrapper.ParseArgs([
'--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124'
])
expected = self._Expected(
['--timeout=124', 'exec', '--', '--param1', '--param2'])
self.assertEqual(result.gtest_parallel_args, expected)
def testMixingPositional(self):
result = gtest_parallel_wrapper.ParseArgs([
'--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124', '--foo2',
'bar2'
'--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124',
'--foo2', 'bar2'
])
expected = self._Expected([
'--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2'
])
expected = self._Expected(
['--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2'])
self.assertEqual(result.gtest_parallel_args, expected)
def testDoubleDash1(self):
@ -82,7 +83,8 @@ class GtestParallelWrapperTest(unittest.TestCase):
def testDoubleDash2(self):
result = gtest_parallel_wrapper.ParseArgs(
['--timeout=123', '--', 'exec', '--timeout=124'])
expected = self._Expected(['--timeout=123', 'exec', '--', '--timeout=124'])
expected = self._Expected(
['--timeout=123', 'exec', '--', '--timeout=124'])
self.assertEqual(result.gtest_parallel_args, expected)
def testArtifacts(self):
@ -133,16 +135,16 @@ class GtestParallelWrapperTest(unittest.TestCase):
result = gtest_parallel_wrapper.ParseArgs([
'some_test', '--some_flag=some_value', '--another_flag',
'--output_dir=' + output_dir, '--store-test-artifacts',
'--isolated-script-test-perf-output=SOME_OTHER_DIR', '--foo=bar',
'--baz'
'--isolated-script-test-perf-output=SOME_OTHER_DIR',
'--foo=bar', '--baz'
])
expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts')
expected = self._Expected([
'--output_dir=' + output_dir,
'some_test', '--', '--test_artifacts_dir=' + expected_artifacts_dir,
'--output_dir=' + output_dir, 'some_test', '--',
'--test_artifacts_dir=' + expected_artifacts_dir,
'--some_flag=some_value', '--another_flag',
'--isolated_script_test_perf_output=SOME_OTHER_DIR', '--foo=bar',
'--baz'
'--isolated_script_test_perf_output=SOME_OTHER_DIR',
'--foo=bar', '--baz'
])
self.assertEqual(result.gtest_parallel_args, expected)
@ -159,7 +161,8 @@ class GtestParallelWrapperTest(unittest.TestCase):
self.assertEqual(result.gtest_parallel_args, expected)
def testUseHalfTheCpuCores(self):
result = gtest_parallel_wrapper.ParseArgs(['--workers', '0.5x', 'exec'])
result = gtest_parallel_wrapper.ParseArgs(
['--workers', '0.5x', 'exec'])
workers = max(multiprocessing.cpu_count() // 2, 1)
expected = self._Expected(['--workers=%s' % workers, 'exec'])
self.assertEqual(result.gtest_parallel_args, expected)

View File

@ -7,7 +7,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""WebRTC iOS FAT libraries build script.
Each architecture is compiled separately before being merged together.
By default, the library is created in out_ios_libs/. (Change with -o.)
@ -21,7 +20,6 @@ import shutil
import subprocess
import sys
os.environ['PATH'] = '/usr/libexec' + os.pathsep + os.environ['PATH']
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -42,30 +40,59 @@ from generate_licenses import LicenseBuilder
def _ParseArgs():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--build_config', default='release',
parser.add_argument('--build_config',
default='release',
choices=['debug', 'release'],
help='The build config. Can be "debug" or "release". '
'Defaults to "release".')
parser.add_argument('--arch', nargs='+', default=DEFAULT_ARCHS,
parser.add_argument(
'--arch',
nargs='+',
default=DEFAULT_ARCHS,
choices=ENABLED_ARCHS,
help='Architectures to build. Defaults to %(default)s.')
parser.add_argument('-c', '--clean', action='store_true', default=False,
parser.add_argument(
'-c',
'--clean',
action='store_true',
default=False,
help='Removes the previously generated build output, if any.')
parser.add_argument('-p', '--purify', action='store_true', default=False,
parser.add_argument(
'-p',
'--purify',
action='store_true',
default=False,
help='Purifies the previously generated build output by '
'removing the temporary results used when (re)building.')
parser.add_argument('-o', '--output-dir', default=SDK_OUTPUT_DIR,
parser.add_argument(
'-o',
'--output-dir',
default=SDK_OUTPUT_DIR,
help='Specifies a directory to output the build artifacts to. '
'If specified together with -c, deletes the dir.')
parser.add_argument('-r', '--revision', type=int, default=0,
parser.add_argument(
'-r',
'--revision',
type=int,
default=0,
help='Specifies a revision number to embed if building the framework.')
parser.add_argument('-e', '--bitcode', action='store_true', default=False,
parser.add_argument('-e',
'--bitcode',
action='store_true',
default=False,
help='Compile with bitcode.')
parser.add_argument('--verbose', action='store_true', default=False,
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Debug logging.')
parser.add_argument('--use-goma', action='store_true', default=False,
parser.add_argument('--use-goma',
action='store_true',
default=False,
help='Use goma to build.')
parser.add_argument('--extra-gn-args', default=[], nargs='*',
parser.add_argument(
'--extra-gn-args',
default=[],
nargs='*',
help='Additional GN args to be used during Ninja generation.')
return parser.parse_args()
@ -92,11 +119,13 @@ def _CleanTemporary(output_dir, architectures):
def BuildWebRTC(output_dir, target_arch, flavor, gn_target_name,
ios_deployment_target, libvpx_build_vp9, use_bitcode,
use_goma, extra_gn_args):
ios_deployment_target, libvpx_build_vp9, use_bitcode, use_goma,
extra_gn_args):
output_dir = os.path.join(output_dir, target_arch + '_libs')
gn_args = ['target_os="ios"', 'ios_enable_code_signing=false',
'use_xcode_clang=true', 'is_component_build=false']
gn_args = [
'target_os="ios"', 'ios_enable_code_signing=false',
'use_xcode_clang=true', 'is_component_build=false'
]
# Add flavor option.
if flavor == 'debug':
@ -140,6 +169,7 @@ def BuildWebRTC(output_dir, target_arch, flavor, gn_target_name,
cmd.extend(['-j', '200'])
_RunCommand(cmd)
def main():
args = _ParseArgs()
@ -161,7 +191,6 @@ def main():
gn_args.append('enable_dsyms=true')
gn_args.append('enable_stripping=true')
# Build all architectures.
for arch in architectures:
BuildWebRTC(args.output_dir, arch, args.build_config, gn_target_name,
@ -169,8 +198,9 @@ def main():
args.use_goma, gn_args)
# Create FAT archive.
lib_paths = [os.path.join(args.output_dir, arch + '_libs')
for arch in architectures]
lib_paths = [
os.path.join(args.output_dir, arch + '_libs') for arch in architectures
]
# Combine the slices.
dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC')
@ -192,11 +222,11 @@ def main():
# Merge the dSYM slices.
lib_dsym_dir_path = os.path.join(lib_paths[0], 'WebRTC.dSYM')
if os.path.isdir(lib_dsym_dir_path):
distutils.dir_util.copy_tree(lib_dsym_dir_path,
os.path.join(args.output_dir, 'WebRTC.dSYM'))
distutils.dir_util.copy_tree(
lib_dsym_dir_path, os.path.join(args.output_dir, 'WebRTC.dSYM'))
logging.info('Merging dSYM slices.')
dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources', 'DWARF',
'WebRTC')
dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources',
'DWARF', 'WebRTC')
lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths]
out_dsym_path = os.path.join(args.output_dir, dsym_path)
try:
@ -207,26 +237,31 @@ def main():
_RunCommand(cmd)
# Generate the license file.
ninja_dirs = [os.path.join(args.output_dir, arch + '_libs')
for arch in architectures]
ninja_dirs = [
os.path.join(args.output_dir, arch + '_libs')
for arch in architectures
]
gn_target_full_name = '//sdk:' + gn_target_name
builder = LicenseBuilder(ninja_dirs, [gn_target_full_name])
builder.GenerateLicenseText(
os.path.join(args.output_dir, SDK_FRAMEWORK_NAME))
# Modify the version number.
# Format should be <Branch cut MXX>.<Hotfix #>.<Rev #>.
# e.g. 55.0.14986 means branch cut 55, no hotfixes, and revision 14986.
infoplist_path = os.path.join(args.output_dir, SDK_FRAMEWORK_NAME,
'Info.plist')
cmd = ['PlistBuddy', '-c',
'Print :CFBundleShortVersionString', infoplist_path]
cmd = [
'PlistBuddy', '-c', 'Print :CFBundleShortVersionString',
infoplist_path
]
major_minor = subprocess.check_output(cmd).strip()
version_number = '%s.%s' % (major_minor, args.revision)
logging.info('Substituting revision number: %s', version_number)
cmd = ['PlistBuddy', '-c',
'Set :CFBundleVersion ' + version_number, infoplist_path]
cmd = [
'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number,
infoplist_path
]
_RunCommand(cmd)
_RunCommand(['plutil', '-convert', 'binary1', infoplist_path])

View File

@ -9,6 +9,7 @@
import argparse
import sys
def GenerateModulemap():
parser = argparse.ArgumentParser(description='Generate modulemap')
parser.add_argument("-o", "--out", type=str, help="Output file.")
@ -29,4 +30,3 @@ def GenerateModulemap():
if __name__ == '__main__':
sys.exit(GenerateModulemap())

View File

@ -16,13 +16,18 @@ import textwrap
def GenerateUmbrellaHeader():
parser = argparse.ArgumentParser(description='Generate umbrella header')
parser.add_argument("-o", "--out", type=str, help="Output file.")
parser.add_argument("-s", "--sources", default=[], type=str, nargs='+',
parser.add_argument("-s",
"--sources",
default=[],
type=str,
nargs='+',
help="Headers to include.")
args = parser.parse_args()
with open(args.out, "w") as outfile:
outfile.write(textwrap.dedent("""\
outfile.write(
textwrap.dedent("""\
/*
* Copyright %d The WebRTC project authors. All Rights Reserved.
*

View File

@ -7,7 +7,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for merging generated iOS libraries."""
import sys
@ -34,8 +33,7 @@ def MergeLibs(lib_base_dir):
Exit code of libtool.
"""
output_dir_name = 'fat_libs'
archs = [arch for arch in os.listdir(lib_base_dir)
if arch in VALID_ARCHS]
archs = [arch for arch in os.listdir(lib_base_dir) if arch in VALID_ARCHS]
# For each arch, find (library name, libary path) for arch. We will merge
# all libraries with the same name.
libs = {}
@ -92,9 +90,13 @@ def MergeLibs(lib_base_dir):
# Merge libraries using libtool.
libtool_returncode = 0
for library, paths in valid_libs.items():
cmd_list = ['libtool', '-static', '-v', '-o',
os.path.join(output_dir_path, library)] + paths
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
cmd_list = [
'libtool', '-static', '-v', '-o',
os.path.join(output_dir_path, library)
] + paths
libtoolout = subprocess.Popen(cmd_list,
stderr=subprocess.PIPE,
env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line):
@ -120,5 +122,6 @@ def Main():
lib_base_dir = args.lib_base_dir
MergeLibs(lib_base_dir)
if __name__ == '__main__':
sys.exit(Main())

View File

@ -36,12 +36,16 @@ LIB_TO_LICENSES_DICT = {
'abseil-cpp': ['third_party/abseil-cpp/LICENSE'],
'android_ndk': ['third_party/android_ndk/NOTICE'],
'android_sdk': ['third_party/android_sdk/LICENSE'],
'auto': ['third_party/android_deps/libs/'
'com_google_auto_service_auto_service/LICENSE'],
'auto': [
'third_party/android_deps/libs/'
'com_google_auto_service_auto_service/LICENSE'
],
'bazel': ['third_party/bazel/LICENSE'],
'boringssl': ['third_party/boringssl/src/LICENSE'],
'errorprone': ['third_party/android_deps/libs/'
'com_google_errorprone_error_prone_core/LICENSE'],
'errorprone': [
'third_party/android_deps/libs/'
'com_google_errorprone_error_prone_core/LICENSE'
],
'fiat': ['third_party/boringssl/src/third_party/fiat/LICENSE'],
'guava': ['third_party/guava/LICENSE'],
'ijar': ['third_party/ijar/LICENSE'],
@ -113,7 +117,6 @@ THIRD_PARTY_LIB_REGEX_TEMPLATE = r'^.*/third_party/%s$'
class LicenseBuilder(object):
def __init__(self,
buildfile_dirs,
targets,
@ -184,8 +187,8 @@ class LicenseBuilder(object):
output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target))
libraries = set()
for described_target in output.values():
third_party_libs = (
self._ParseLibrary(dep) for dep in described_target['deps'])
third_party_libs = (self._ParseLibrary(dep)
for dep in described_target['deps'])
libraries |= set(lib for lib in third_party_libs if lib)
return libraries
@ -195,10 +198,12 @@ class LicenseBuilder(object):
third_party_libs = set()
for buildfile in self.buildfile_dirs:
for target in self.targets:
third_party_libs |= self._GetThirdPartyLibraries(buildfile, target)
third_party_libs |= self._GetThirdPartyLibraries(
buildfile, target)
assert len(third_party_libs) > 0
missing_licenses = third_party_libs - set(self.common_licenses_dict.keys())
missing_licenses = third_party_libs - set(
self.common_licenses_dict.keys())
if missing_licenses:
error_msg = 'Missing licenses for following third_party targets: %s' % \
', '.join(missing_licenses)
@ -212,10 +217,12 @@ class LicenseBuilder(object):
logging.info('List of licenses: %s', ', '.join(license_libs))
# Generate markdown.
output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), 'w+')
output_license_file = open(os.path.join(output_dir, 'LICENSE.md'),
'w+')
for license_lib in license_libs:
if len(self.common_licenses_dict[license_lib]) == 0:
logging.info('Skipping compile time or internal dependency: %s',
logging.info(
'Skipping compile time or internal dependency: %s',
license_lib)
continue # Compile time dependency
@ -234,17 +241,18 @@ class LicenseBuilder(object):
def main():
parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md')
parser.add_argument(
'--verbose', action='store_true', default=False, help='Debug logging.')
parser.add_argument(
'--target',
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Debug logging.')
parser.add_argument('--target',
required=True,
action='append',
default=[],
help='Name of the GN target to generate a license for')
parser.add_argument('output_dir', help='Directory to output LICENSE.md to.')
parser.add_argument(
'buildfile_dirs',
parser.add_argument('output_dir',
help='Directory to output LICENSE.md to.')
parser.add_argument('buildfile_dirs',
nargs='+',
help='Directories containing gn generated ninja files')
args = parser.parse_args()

View File

@ -16,7 +16,6 @@ from generate_licenses import LicenseBuilder
class TestLicenseBuilder(unittest.TestCase):
@staticmethod
def _FakeRunGN(buildfile_dir, target):
return """
@ -37,18 +36,18 @@ class TestLicenseBuilder(unittest.TestCase):
LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'),
'libname1')
self.assertEquals(
LicenseBuilder._ParseLibraryName('//a/b/third_party/libname2:c(d)'),
'libname2')
LicenseBuilder._ParseLibraryName(
'//a/b/third_party/libname2:c(d)'), 'libname2')
self.assertEquals(
LicenseBuilder._ParseLibraryName('//a/b/third_party/libname3/c:d(e)'),
'libname3')
LicenseBuilder._ParseLibraryName(
'//a/b/third_party/libname3/c:d(e)'), 'libname3')
self.assertEquals(
LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None)
def testParseLibrarySimpleMatch(self):
builder = LicenseBuilder([], [], {}, {})
self.assertEquals(
builder._ParseLibrary('//a/b/third_party/libname:c'), 'libname')
self.assertEquals(builder._ParseLibrary('//a/b/third_party/libname:c'),
'libname')
def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self):
lib_dict = {
@ -56,7 +55,8 @@ class TestLicenseBuilder(unittest.TestCase):
}
builder = LicenseBuilder([], [], lib_dict, {})
self.assertEquals(
builder._ParseLibrary('//a/b/third_party/libname:bar_java'), 'libname')
builder._ParseLibrary('//a/b/third_party/libname:bar_java'),
'libname')
def testParseLibraryRegExMatch(self):
lib_regex_dict = {
@ -82,7 +82,8 @@ class TestLicenseBuilder(unittest.TestCase):
}
builder = LicenseBuilder([], [], {}, lib_regex_dict)
self.assertEquals(
builder._ParseLibrary('//a/b/third_party/libname/fooHAHA:bar_java'),
builder._ParseLibrary(
'//a/b/third_party/libname/fooHAHA:bar_java'),
'libname/foo.*bar.*')
@mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN)

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Configuration class for network emulation."""
@ -32,5 +31,6 @@ class ConnectionConfig(object):
"""
left_aligned_name = self.name.ljust(24, ' ')
return '%2s %24s %5s kbps %5s kbps %4s %5s ms %3s %%' % (
self.num, left_aligned_name, self.receive_bw_kbps, self.send_bw_kbps,
self.queue_slots, self.delay_ms, self.packet_loss_percent)
self.num, left_aligned_name, self.receive_bw_kbps,
self.send_bw_kbps, self.queue_slots, self.delay_ms,
self.packet_loss_percent)

View File

@ -6,10 +6,8 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for constraining traffic on the local machine."""
import logging
import optparse
import socket
@ -18,7 +16,6 @@ import sys
import config
import network_emulator
_DEFAULT_LOG_LEVEL = logging.INFO
# Default port range to apply network constraints on.
@ -71,31 +68,53 @@ def _ParseArgs():
'ID Name Receive Send Queue Delay loss \n'
'-- ---- --------- -------- ----- ------- ------\n'
'%s\n' % presets_string))
parser.add_option('-p', '--preset', type='int', default=_DEFAULT_PRESET_ID,
parser.add_option('-p',
'--preset',
type='int',
default=_DEFAULT_PRESET_ID,
help=('ConnectionConfig configuration, specified by ID. '
'Default: %default'))
parser.add_option('-r', '--receive-bw', type='int',
parser.add_option(
'-r',
'--receive-bw',
type='int',
default=_DEFAULT_PRESET.receive_bw_kbps,
help=('Receive bandwidth in kilobit/s. Default: %default'))
parser.add_option('-s', '--send-bw', type='int',
parser.add_option('-s',
'--send-bw',
type='int',
default=_DEFAULT_PRESET.send_bw_kbps,
help=('Send bandwidth in kilobit/s. Default: %default'))
parser.add_option('-d', '--delay', type='int',
parser.add_option('-d',
'--delay',
type='int',
default=_DEFAULT_PRESET.delay_ms,
help=('Delay in ms. Default: %default'))
parser.add_option('-l', '--packet-loss', type='float',
parser.add_option('-l',
'--packet-loss',
type='float',
default=_DEFAULT_PRESET.packet_loss_percent,
help=('Packet loss in %. Default: %default'))
parser.add_option('-q', '--queue', type='int',
parser.add_option(
'-q',
'--queue',
type='int',
default=_DEFAULT_PRESET.queue_slots,
help=('Queue size as number of slots. Default: %default'))
parser.add_option('--port-range', default='%s,%s' % _DEFAULT_PORT_RANGE,
parser.add_option(
'--port-range',
default='%s,%s' % _DEFAULT_PORT_RANGE,
help=('Range of ports for constrained network. Specify as '
'two comma separated integers. Default: %default'))
parser.add_option('--target-ip', default=None,
parser.add_option(
'--target-ip',
default=None,
help=('The interface IP address to apply the rules for. '
'Default: the external facing interface IP address.'))
parser.add_option('-v', '--verbose', action='store_true', default=False,
parser.add_option('-v',
'--verbose',
action='store_true',
default=False,
help=('Turn on verbose output. Will print all \'ipfw\' '
'commands that are executed.'))
@ -110,15 +129,17 @@ def _ParseArgs():
try:
socket.inet_aton(options.target_ip)
except socket.error:
parser.error('Invalid IP address specified: %s' % options.target_ip)
parser.error('Invalid IP address specified: %s' %
options.target_ip)
# Convert port range into the desired tuple format.
try:
if isinstance(options.port_range, str):
options.port_range = tuple(int(port) for port in
options.port_range.split(','))
options.port_range = tuple(
int(port) for port in options.port_range.split(','))
if len(options.port_range) != 2:
parser.error('Invalid port range specified, please specify two '
parser.error(
'Invalid port range specified, please specify two '
'integers separated by a comma.')
except ValueError:
parser.error('Invalid port range specified.')
@ -167,18 +188,16 @@ def main():
logging.info('Constraining traffic to/from IP: %s', external_ip)
try:
emulator.Emulate(external_ip)
logging.info('Started network emulation with the following configuration:\n'
logging.info(
'Started network emulation with the following configuration:\n'
' Receive bandwidth: %s kbps (%s kB/s)\n'
' Send bandwidth : %s kbps (%s kB/s)\n'
' Delay : %s ms\n'
' Packet loss : %s %%\n'
' Queue slots : %s',
connection_config.receive_bw_kbps,
' Queue slots : %s', connection_config.receive_bw_kbps,
connection_config.receive_bw_kbps / 8,
connection_config.send_bw_kbps,
connection_config.send_bw_kbps/8,
connection_config.delay_ms,
connection_config.packet_loss_percent,
connection_config.send_bw_kbps, connection_config.send_bw_kbps / 8,
connection_config.delay_ms, connection_config.packet_loss_percent,
connection_config.queue_slots)
logging.info('Affected traffic: IP traffic on ports %s-%s',
options.port_range[0], options.port_range[1])
@ -191,5 +210,6 @@ def main():
logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error)
return -2
if __name__ == '__main__':
sys.exit(main())

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for constraining traffic on the local machine."""
import ctypes
@ -27,7 +26,11 @@ class NetworkEmulatorError(BaseException):
stderr: Error output of running the command.
"""
def __init__(self, fail_msg, cmd=None, returncode=None, output=None,
def __init__(self,
fail_msg,
cmd=None,
returncode=None,
output=None,
error=None):
BaseException.__init__(self, fail_msg)
self.fail_msg = fail_msg
@ -75,7 +78,8 @@ class NetworkEmulator(object):
# Adding the rules will start the emulation.
incoming_rule_id = self._CreateDummynetRule(receive_pipe_id, 'any',
target_ip, self._port_range)
target_ip,
self._port_range)
logging.debug('Created incoming rule: %s', incoming_rule_id)
outgoing_rule_id = self._CreateDummynetRule(send_pipe_id, target_ip,
'any', self._port_range)
@ -91,12 +95,14 @@ class NetworkEmulator(object):
"""
try:
if os.getuid() != 0:
raise NetworkEmulatorError('You must run this script with sudo.')
raise NetworkEmulatorError(
'You must run this script with sudo.')
except AttributeError:
# AttributeError will be raised on Windows.
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
raise NetworkEmulatorError('You must run this script with administrator'
raise NetworkEmulatorError(
'You must run this script with administrator'
' privileges.')
def _CreateDummynetRule(self, pipe_id, from_address, to_address,
@ -116,16 +122,20 @@ class NetworkEmulator(object):
each rule being added.
"""
self._rule_counter += 100
add_part = ['add', self._rule_counter, 'pipe', pipe_id,
'ip', 'from', from_address, 'to', to_address]
_RunIpfwCommand(add_part + ['src-port', '%s-%s' % port_range],
add_part = [
'add', self._rule_counter, 'pipe', pipe_id, 'ip', 'from',
from_address, 'to', to_address
]
_RunIpfwCommand(add_part +
['src-port', '%s-%s' % port_range],
'Failed to add Dummynet src-port rule.')
_RunIpfwCommand(add_part + ['dst-port', '%s-%s' % port_range],
_RunIpfwCommand(add_part +
['dst-port', '%s-%s' % port_range],
'Failed to add Dummynet dst-port rule.')
return self._rule_counter
def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, packet_loss_percent,
queue_slots):
def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms,
packet_loss_percent, queue_slots):
"""Creates a Dummynet pipe and return its ID.
Args:
@ -137,28 +147,30 @@ class NetworkEmulator(object):
The ID of the pipe, starting at 1.
"""
self._pipe_counter += 1
cmd = ['pipe', self._pipe_counter, 'config',
'bw', str(bandwidth_kbps/8) + 'KByte/s',
'delay', '%sms' % delay_ms,
'plr', (packet_loss_percent/100.0),
'queue', queue_slots]
cmd = [
'pipe', self._pipe_counter, 'config', 'bw',
str(bandwidth_kbps / 8) + 'KByte/s', 'delay',
'%sms' % delay_ms, 'plr', (packet_loss_percent / 100.0), 'queue',
queue_slots
]
error_message = 'Failed to create Dummynet pipe. '
if sys.platform.startswith('linux'):
error_message += ('Make sure you have loaded the ipfw_mod.ko module to '
error_message += (
'Make sure you have loaded the ipfw_mod.ko module to '
'your kernel (sudo insmod /path/to/ipfw_mod.ko).')
_RunIpfwCommand(cmd, error_message)
return self._pipe_counter
def Cleanup():
"""Stops the network emulation by flushing all Dummynet rules.
Notice that this will flush any rules that may have been created previously
before starting the emulation.
"""
_RunIpfwCommand(['-f', 'flush'],
'Failed to flush Dummynet rules!')
_RunIpfwCommand(['-f', 'pipe', 'flush'],
'Failed to flush Dummynet pipes!')
_RunIpfwCommand(['-f', 'flush'], 'Failed to flush Dummynet rules!')
_RunIpfwCommand(['-f', 'pipe', 'flush'], 'Failed to flush Dummynet pipes!')
def _RunIpfwCommand(command, fail_msg=None):
"""Executes a command and prefixes the appropriate command for
@ -180,10 +192,11 @@ def _RunIpfwCommand(command, fail_msg=None):
cmd_list = ipfw_command[:] + [str(x) for x in command]
cmd_string = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd_string)
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
process = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode != 0:
raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, output,
error)
raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode,
output, error)
return output.strip()

View File

@ -7,7 +7,6 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import httplib2
import json
import subprocess
@ -53,8 +52,10 @@ def _SendHistogramSet(url, histograms, oauth_token):
print 'Sending %d bytes to %s.' % (len(data), url + '/add_histograms')
http = httplib2.Http()
response, content = http.request(url + '/add_histograms', method='POST',
body=data, headers=headers)
response, content = http.request(url + '/add_histograms',
method='POST',
body=data,
headers=headers)
return response, content
@ -63,12 +64,14 @@ def _SendHistogramSet(url, histograms, oauth_token):
def _ApplyHacks(dicts):
for d in dicts:
if 'running' in d:
def _NoInf(value):
if value == float('inf'):
return histogram.JS_MAX_VALUE
if value == float('-inf'):
return -histogram.JS_MAX_VALUE
return value
d['running'] = [_NoInf(value) for value in d['running']]
return dicts
@ -110,13 +113,13 @@ def UploadToDashboard(options):
_DumpOutput(histograms, options.output_json_file)
oauth_token = _GenerateOauthToken()
response, content = _SendHistogramSet(
options.dashboard_url, histograms, oauth_token)
response, content = _SendHistogramSet(options.dashboard_url, histograms,
oauth_token)
if response.status == 200:
print 'Received 200 from dashboard.'
return 0
else:
print('Upload failed with %d: %s\n\n%s' % (response.status, response.reason,
content))
print('Upload failed with %d: %s\n\n%s' %
(response.status, response.reason, content))
return 1

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Adds build info to perf results and uploads them.
The tests don't know which bot executed the tests or at what revision, so we
@ -25,31 +24,44 @@ import sys
def _CreateParser():
parser = argparse.ArgumentParser()
parser.add_argument('--perf-dashboard-machine-group', required=True,
parser.add_argument('--perf-dashboard-machine-group',
required=True,
help='The "master" the bots are grouped under. This '
'string is the group in the the perf dashboard path '
'group/bot/perf_id/metric/subtest.')
parser.add_argument('--bot', required=True,
parser.add_argument('--bot',
required=True,
help='The bot running the test (e.g. '
'webrtc-win-large-tests).')
parser.add_argument('--test-suite', required=True,
parser.add_argument(
'--test-suite',
required=True,
help='The key for the test in the dashboard (i.e. what '
'you select in the top-level test suite selector in the '
'dashboard')
parser.add_argument('--webrtc-git-hash', required=True,
parser.add_argument('--webrtc-git-hash',
required=True,
help='webrtc.googlesource.com commit hash.')
parser.add_argument('--commit-position', type=int, required=True,
parser.add_argument('--commit-position',
type=int,
required=True,
help='Commit pos corresponding to the git hash.')
parser.add_argument('--build-page-url', required=True,
parser.add_argument('--build-page-url',
required=True,
help='URL to the build page for this build.')
parser.add_argument('--dashboard-url', required=True,
parser.add_argument('--dashboard-url',
required=True,
help='Which dashboard to use.')
parser.add_argument('--input-results-file', type=argparse.FileType(),
parser.add_argument('--input-results-file',
type=argparse.FileType(),
required=True,
help='A JSON file with output from WebRTC tests.')
parser.add_argument('--output-json-file', type=argparse.FileType('w'),
parser.add_argument('--output-json-file',
type=argparse.FileType('w'),
help='Where to write the output (for debugging).')
parser.add_argument('--outdir', required=True,
parser.add_argument(
'--outdir',
required=True,
help='Path to the local out/ dir (usually out/Default)')
return parser
@ -67,21 +79,22 @@ def _ConfigurePythonPath(options):
checkout_root = os.path.abspath(
os.path.join(script_dir, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'catapult',
'tracing'))
sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'protobuf',
'python'))
sys.path.insert(
0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing'))
sys.path.insert(
0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python'))
# The webrtc_dashboard_upload gn rule will build the protobuf stub for python,
# so put it in the path for this script before we attempt to import it.
histogram_proto_path = os.path.join(
options.outdir, 'pyproto', 'tracing', 'tracing', 'proto')
histogram_proto_path = os.path.join(options.outdir, 'pyproto', 'tracing',
'tracing', 'proto')
sys.path.insert(0, histogram_proto_path)
# Fail early in case the proto hasn't been built.
from tracing.proto import histogram_proto
if not histogram_proto.HAS_PROTO:
raise ImportError('Could not find histogram_pb2. You need to build the '
raise ImportError(
'Could not find histogram_pb2. You need to build the '
'webrtc_dashboard_upload target before invoking this '
'script. Expected to find '
'histogram_pb2.py in %s.' % histogram_proto_path)
@ -97,5 +110,6 @@ def main(args):
return catapult_uploader.UploadToDashboard(options)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -5,7 +5,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This script helps to invoke gn and ninja
which lie in depot_tools repository."""
@ -68,11 +67,11 @@ def RunGnCheck(root_dir=None):
def RunNinjaCommand(args, root_dir=None):
"""Runs ninja quietly. Any failure (e.g. clang not found) is
silently discarded, since this is unlikely an error in submitted CL."""
command = [
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja')
] + args
p = subprocess.Popen(command, cwd=root_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
command = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja')] + args
p = subprocess.Popen(command,
cwd=root_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, _ = p.communicate()
return out
@ -106,8 +105,7 @@ def GetCompilationCommand(filepath, gn_args, work_dir):
"""
gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir])
if gn_errors:
raise(RuntimeError(
'FYI, cannot complete check due to gn error:\n%s\n'
raise (RuntimeError('FYI, cannot complete check due to gn error:\n%s\n'
'Please open a bug.' % gn_errors))
# Needed for single file compilation.

View File

@ -14,7 +14,6 @@ import unittest
#pylint: disable=relative-import
import build_helpers
TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
@ -22,8 +21,10 @@ TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
class GnCheckTest(unittest.TestCase):
def testCircularDependencyError(self):
test_dir = os.path.join(TESTDATA_DIR, 'circular_dependency')
expected_errors = ['ERROR Dependency cycle:\n'
' //:bar ->\n //:foo ->\n //:bar']
expected_errors = [
'ERROR Dependency cycle:\n'
' //:bar ->\n //:foo ->\n //:bar'
]
self.assertListEqual(expected_errors,
build_helpers.RunGnCheck(test_dir))

View File

@ -11,12 +11,11 @@ import os
import re
import string
# TARGET_RE matches a GN target, and extracts the target name and the contents.
TARGET_RE = re.compile(r'(?P<indent>\s*)\w+\("(?P<target_name>\w+)"\) {'
TARGET_RE = re.compile(
r'(?P<indent>\s*)\w+\("(?P<target_name>\w+)"\) {'
r'(?P<target_contents>.*?)'
r'(?P=indent)}',
re.MULTILINE | re.DOTALL)
r'(?P=indent)}', re.MULTILINE | re.DOTALL)
# SOURCES_RE matches a block of sources inside a GN target.
SOURCES_RE = re.compile(
@ -68,8 +67,8 @@ def GetBuildGnPathFromFilePath(file_path, file_exists_check, root_dir_path):
if file_exists_check(candidate_build_gn_path):
return candidate_build_gn_path
else:
candidate_dir = os.path.abspath(os.path.join(candidate_dir,
os.pardir))
candidate_dir = os.path.abspath(
os.path.join(candidate_dir, os.pardir))
raise NoBuildGnFoundError(
'No BUILD.gn file found for file: `{}`'.format(file_path))
@ -88,8 +87,8 @@ def IsHeaderInBuildGn(header_path, build_gn_path):
"""
target_abs_path = os.path.dirname(build_gn_path)
build_gn_content = _ReadFile(build_gn_path)
headers_in_build_gn = GetHeadersInBuildGnFileSources(build_gn_content,
target_abs_path)
headers_in_build_gn = GetHeadersInBuildGnFileSources(
build_gn_content, target_abs_path)
return header_path in headers_in_build_gn
@ -114,6 +113,6 @@ def GetHeadersInBuildGnFileSources(file_content, target_abs_path):
source_file = source_file_match.group('source_file')
if source_file.endswith('.h'):
source_file_tokens = string.split(source_file, '/')
headers_in_sources.add(os.path.join(target_abs_path,
*source_file_tokens))
headers_in_sources.add(
os.path.join(target_abs_path, *source_file_tokens))
return headers_in_sources

View File

@ -23,12 +23,10 @@ def _GetRootBasedOnPlatform():
def _GetPath(*path_chunks):
return os.path.join(_GetRootBasedOnPlatform(),
*path_chunks)
return os.path.join(_GetRootBasedOnPlatform(), *path_chunks)
class GetBuildGnPathFromFilePathTest(unittest.TestCase):
def testGetBuildGnFromSameDirectory(self):
file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h')
expected_build_path = _GetPath('home', 'projects', 'webrtc', 'base',
@ -38,9 +36,8 @@ class GetBuildGnPathFromFilePathTest(unittest.TestCase):
src_dir_path = _GetPath('home', 'projects', 'webrtc')
self.assertEqual(
expected_build_path,
check_orphan_headers.GetBuildGnPathFromFilePath(file_path,
file_exists,
src_dir_path))
check_orphan_headers.GetBuildGnPathFromFilePath(
file_path, file_exists, src_dir_path))
def testGetBuildPathFromParentDirectory(self):
file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h')
@ -51,31 +48,28 @@ class GetBuildGnPathFromFilePathTest(unittest.TestCase):
src_dir_path = _GetPath('home', 'projects', 'webrtc')
self.assertEqual(
expected_build_path,
check_orphan_headers.GetBuildGnPathFromFilePath(file_path,
file_exists,
src_dir_path))
check_orphan_headers.GetBuildGnPathFromFilePath(
file_path, file_exists, src_dir_path))
def testExceptionIfNoBuildGnFilesAreFound(self):
with self.assertRaises(check_orphan_headers.NoBuildGnFoundError):
file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h')
file_exists = lambda p: False
src_dir_path = _GetPath('home', 'projects', 'webrtc')
check_orphan_headers.GetBuildGnPathFromFilePath(file_path,
file_exists,
src_dir_path)
check_orphan_headers.GetBuildGnPathFromFilePath(
file_path, file_exists, src_dir_path)
def testExceptionIfFilePathIsNotAnHeader(self):
with self.assertRaises(check_orphan_headers.WrongFileTypeError):
file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.cc')
file_path = _GetPath('home', 'projects', 'webrtc', 'base',
'foo.cc')
file_exists = lambda p: False
src_dir_path = _GetPath('home', 'projects', 'webrtc')
check_orphan_headers.GetBuildGnPathFromFilePath(file_path,
file_exists,
src_dir_path)
check_orphan_headers.GetBuildGnPathFromFilePath(
file_path, file_exists, src_dir_path)
class GetHeadersInBuildGnFileSourcesTest(unittest.TestCase):
def testEmptyFileReturnsEmptySet(self):
self.assertEqual(
set([]),
@ -115,8 +109,8 @@ class GetHeadersInBuildGnFileSourcesTest(unittest.TestCase):
_GetPath('a', 'b', 'public_foo.h'),
_GetPath('a', 'b', 'baz', 'foo.h'),
]),
check_orphan_headers.GetHeadersInBuildGnFileSources(file_content,
target_abs_path))
check_orphan_headers.GetHeadersInBuildGnFileSources(
file_content, target_abs_path))
if __name__ == '__main__':

View File

@ -14,12 +14,11 @@ import os
import re
import sys
# TARGET_RE matches a GN target, and extracts the target name and the contents.
TARGET_RE = re.compile(r'(?P<indent>\s*)\w+\("(?P<target_name>\w+)"\) {'
TARGET_RE = re.compile(
r'(?P<indent>\s*)\w+\("(?P<target_name>\w+)"\) {'
r'(?P<target_contents>.*?)'
r'(?P=indent)}',
re.MULTILINE | re.DOTALL)
r'(?P=indent)}', re.MULTILINE | re.DOTALL)
# SOURCES_RE matches a block of sources inside a GN target.
SOURCES_RE = re.compile(r'sources \+?= \[(?P<sources>.*?)\]',
@ -31,7 +30,8 @@ ERROR_MESSAGE = ("{build_file_path} in target '{target_name}':\n"
class PackageBoundaryViolation(
collections.namedtuple('PackageBoundaryViolation',
collections.namedtuple(
'PackageBoundaryViolation',
'build_file_path target_name source_file subpackage')):
def __str__(self):
return ERROR_MESSAGE.format(**self._asdict())
@ -43,7 +43,8 @@ def _BuildSubpackagesPattern(packages, query):
query += os.path.sep
length = len(query)
pattern = r'\s*"(?P<source_file>(?P<subpackage>'
pattern += '|'.join(re.escape(package[length:].replace(os.path.sep, '/'))
pattern += '|'.join(
re.escape(package[length:].replace(os.path.sep, '/'))
for package in packages if package.startswith(query))
pattern += r')/[\w\./]*)"'
return re.compile(pattern)
@ -74,18 +75,22 @@ def _CheckBuildFile(build_file_path, packages):
source_file = subpackages_match.group('source_file')
if subpackage:
yield PackageBoundaryViolation(build_file_path,
target_name, source_file, subpackage)
target_name, source_file,
subpackage)
def CheckPackageBoundaries(root_dir, build_files=None):
packages = [root for root, _, files in os.walk(root_dir)
if 'BUILD.gn' in files]
packages = [
root for root, _, files in os.walk(root_dir) if 'BUILD.gn' in files
]
if build_files is not None:
for build_file_path in build_files:
assert build_file_path.startswith(root_dir)
else:
build_files = [os.path.join(package, 'BUILD.gn') for package in packages]
build_files = [
os.path.join(package, 'BUILD.gn') for package in packages
]
messages = []
for build_file_path in build_files:
@ -98,14 +103,19 @@ def main(argv):
description='Script that checks package boundary violations in GN '
'build files.')
parser.add_argument('root_dir', metavar='ROOT_DIR',
parser.add_argument('root_dir',
metavar='ROOT_DIR',
help='The root directory that contains all BUILD.gn '
'files to be processed.')
parser.add_argument('build_files', metavar='BUILD_FILE', nargs='*',
parser.add_argument('build_files',
metavar='BUILD_FILE',
nargs='*',
help='A list of BUILD.gn files to be processed. If no '
'files are given, all BUILD.gn files under ROOT_DIR '
'will be processed.')
parser.add_argument('--max_messages', type=int, default=None,
parser.add_argument('--max_messages',
type=int,
default=None,
help='If set, the maximum number of violations to be '
'displayed.')

View File

@ -15,7 +15,6 @@ import unittest
#pylint: disable=relative-import
from check_package_boundaries import CheckPackageBoundaries
MSG_FORMAT = 'ERROR:check_package_boundaries.py: Unexpected %s.'
TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
@ -34,9 +33,11 @@ class UnitTest(unittest.TestCase):
messages = []
for violation in CheckPackageBoundaries(test_dir, build_files):
build_file_path = os.path.relpath(violation.build_file_path, test_dir)
build_file_path = os.path.relpath(violation.build_file_path,
test_dir)
build_file_path = build_file_path.replace(os.path.sep, '/')
messages.append(violation._replace(build_file_path=build_file_path))
messages.append(
violation._replace(build_file_path=build_file_path))
expected_messages = ReadPylFile(os.path.join(test_dir, 'expected.pyl'))
self.assertListEqual(sorted(expected_messages), sorted(messages))
@ -45,11 +46,12 @@ class UnitTest(unittest.TestCase):
self._RunTest(os.path.join(TESTDATA_DIR, 'no_errors'))
def testMultipleErrorsSingleTarget(self):
self._RunTest(os.path.join(TESTDATA_DIR, 'multiple_errors_single_target'))
self._RunTest(
os.path.join(TESTDATA_DIR, 'multiple_errors_single_target'))
def testMultipleErrorsMultipleTargets(self):
self._RunTest(os.path.join(TESTDATA_DIR,
'multiple_errors_multiple_targets'))
self._RunTest(
os.path.join(TESTDATA_DIR, 'multiple_errors_multiple_targets'))
def testCommonPrefix(self):
self._RunTest(os.path.join(TESTDATA_DIR, 'common_prefix'))

View File

@ -6,8 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This is a tool to transform a crt file into a C/C++ header.
Usage:
@ -44,7 +42,10 @@ def main():
"""The main entrypoint."""
parser = OptionParser('usage %prog FILE')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true')
parser.add_option('-f', '--full_cert', dest='full_cert', action='store_true')
parser.add_option('-f',
'--full_cert',
dest='full_cert',
action='store_true')
options, args = parser.parse_args()
if len(args) < 1:
parser.error('No crt file specified.')
@ -92,8 +93,8 @@ def _GenCFiles(root_dir, options):
_CHAR_TYPE, options)
certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE,
_CHAR_TYPE, options)
certificate_size_list = _CreateArraySectionHeader(_CERTIFICATE_SIZE_VARIABLE,
_INT_TYPE, options)
certificate_size_list = _CreateArraySectionHeader(
_CERTIFICATE_SIZE_VARIABLE, _INT_TYPE, options)
for _, _, files in os.walk(root_dir):
for current_file in files:
@ -101,12 +102,14 @@ def _GenCFiles(root_dir, options):
prefix_length = len(_PREFIX)
length = len(current_file) - len(_EXTENSION)
label = current_file[prefix_length:length]
filtered_output, cert_size = _CreateCertSection(root_dir, current_file,
label, options)
filtered_output, cert_size = _CreateCertSection(
root_dir, current_file, label, options)
output_header_file.write(filtered_output + '\n\n\n')
if options.full_cert:
subject_name_list += _AddLabelToArray(label, _SUBJECT_NAME_ARRAY)
public_key_list += _AddLabelToArray(label, _PUBLIC_KEY_ARRAY)
subject_name_list += _AddLabelToArray(
label, _SUBJECT_NAME_ARRAY)
public_key_list += _AddLabelToArray(
label, _PUBLIC_KEY_ARRAY)
certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY)
certificate_size_list += (' %s,\n') % (cert_size)
@ -152,7 +155,8 @@ def _CreateCertSection(root_dir, source_file, label, options):
def _CreateOutputHeader():
output = ('/*\n'
output = (
'/*\n'
' * Copyright 2004 The WebRTC Project Authors. All rights '
'reserved.\n'
' *\n'
@ -176,11 +180,12 @@ def _CreateOutputHeader():
'// also it would breaks subject/issuer lines.\n\n')
return output
def _CreateOutputFooter():
output = ('// clang-format on\n\n'
'#endif // RTC_BASE_SSL_ROOTS_H_\n')
output = ('// clang-format on\n\n' '#endif // RTC_BASE_SSL_ROOTS_H_\n')
return output
def _CreateArraySectionHeader(type_name, type_type, options):
output = ('const %s kSSLCert%sList[] = {\n') % (type_type, type_name)
_PrintOutput(output, options)
@ -208,5 +213,6 @@ def _PrintOutput(output, options):
if options.verbose:
print output
if __name__ == '__main__':
main()

View File

@ -53,7 +53,6 @@
#
# * This has only been tested on gPrecise.
import os
import os.path
import shlex
@ -75,6 +74,7 @@ _EXTENSION_FLAGS = {
'.mm': ['-x', 'objective-c++'],
}
def PathExists(*args):
return os.path.exists(os.path.join(*args))
@ -92,8 +92,8 @@ def FindWebrtcSrcFromFilename(filename):
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (os.path.basename(curdir) == 'src'
and PathExists(curdir, 'DEPS')
and (PathExists(curdir, '..', '.gclient')
and PathExists(curdir, 'DEPS') and
(PathExists(curdir, '..', '.gclient')
or PathExists(curdir, '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
@ -140,7 +140,8 @@ def GetNinjaBuildOutputsForSourceFile(out_dir, filename):
rel_filename = os.path.relpath(filename, out_dir)
p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
stdout, _ = p.communicate()
if p.returncode != 0:
@ -155,8 +156,10 @@ def GetNinjaBuildOutputsForSourceFile(out_dir, filename):
#
outputs_text = stdout.partition('\n outputs:\n')[2]
output_lines = [line.strip() for line in outputs_text.split('\n')]
return [target for target in output_lines
if target and (target.endswith('.o') or target.endswith('.obj'))]
return [
target for target in output_lines
if target and (target.endswith('.o') or target.endswith('.obj'))
]
def GetClangCommandLineForNinjaOutput(out_dir, build_target):
@ -173,9 +176,10 @@ def GetClangCommandLineForNinjaOutput(out_dir, build_target):
(String or None) Clang command line or None if a Clang command line couldn't
be determined.
"""
p = subprocess.Popen(['ninja', '-v', '-C', out_dir,
'-t', 'commands', build_target],
stdout=subprocess.PIPE, universal_newlines=True)
p = subprocess.Popen(
['ninja', '-v', '-C', out_dir, '-t', 'commands', build_target],
stdout=subprocess.PIPE,
universal_newlines=True)
stdout, _ = p.communicate()
if p.returncode != 0:
return None
@ -261,7 +265,8 @@ def GetClangOptionsFromCommandLine(clang_commandline, out_dir,
if sysroot_path.startswith('/'):
clang_flags.append(flag)
else:
abs_path = os.path.normpath(os.path.join(out_dir, sysroot_path))
abs_path = os.path.normpath(os.path.join(
out_dir, sysroot_path))
clang_flags.append('--sysroot=' + abs_path)
return clang_flags
@ -326,7 +331,8 @@ def GetClangOptionsFromNinjaForFilename(webrtc_root, filename):
if not clang_line:
return additional_flags
return GetClangOptionsFromCommandLine(clang_line, out_dir, additional_flags)
return GetClangOptionsFromCommandLine(clang_line, out_dir,
additional_flags)
def FlagsForFile(filename):
@ -342,7 +348,8 @@ def FlagsForFile(filename):
"""
abs_filename = os.path.abspath(filename)
webrtc_root = FindWebrtcSrcFromFilename(abs_filename)
clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root, abs_filename)
clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root,
abs_filename)
# If clang_flags could not be determined, then assume that was due to a
# transient failure. Preventing YCM from caching the flags allows us to try to
@ -351,7 +358,4 @@ def FlagsForFile(filename):
final_flags = _DEFAULT_FLAGS + clang_flags
return {
'flags': final_flags,
'do_cache': should_cache_flags_for_file
}
return {'flags': final_flags, 'do_cache': should_cache_flags_for_file}

View File

@ -6,7 +6,6 @@
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generate graphs for data generated by loopback tests.
Usage examples:
@ -78,6 +77,7 @@ _FIELDS = [
NAME_TO_ID = {field[1]: field[0] for field in _FIELDS}
ID_TO_TITLE = {field[0]: field[2] for field in _FIELDS}
def FieldArgToId(arg):
if arg == "none":
return None
@ -139,7 +139,9 @@ class Data(object):
"""Calculates sender time, receiver time etc. from the raw data."""
s = self.samples
last_render_time = 0
for field_id in [SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA]:
for field_id in [
SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA
]:
s[field_id] = [0] * self.length
for k in range(self.length):
@ -159,8 +161,10 @@ class Data(object):
These values are then skipped by the Plot() method.
"""
return [None if self.samples[DROPPED][k] else values[k]
for k in range(len(values))]
return [
None if self.samples[DROPPED][k] else values[k]
for k in range(len(values))
]
def AddSamples(self, config, target_lines_list):
"""Creates graph lines from the current data set with given config."""
@ -176,9 +180,9 @@ class Data(object):
if field & HIDE_DROPPED:
values = self._Hide(values)
target_lines_list.append(PlotLine(
self.title + " " + ID_TO_TITLE[field_id],
values, field & ~FIELD_MASK))
target_lines_list.append(
PlotLine(self.title + " " + ID_TO_TITLE[field_id], values,
field & ~FIELD_MASK))
def AverageOverCycle(values, length):
@ -210,8 +214,14 @@ def AverageOverCycle(values, length):
class PlotConfig(object):
"""Object representing a single graph."""
def __init__(self, fields, data_list, cycle_length=None, frames=None,
offset=0, output_filename=None, title="Graph"):
def __init__(self,
fields,
data_list,
cycle_length=None,
frames=None,
offset=0,
output_filename=None,
title="Graph"):
self.fields = fields
self.data_list = data_list
self.cycle_length = cycle_length
@ -269,10 +279,16 @@ class PlotConfig(object):
if self.cycle_length:
x = numpy.array(range(self.cycle_length))
else:
x = numpy.array(range(self.offset, self.offset + len(line.values)))
x = numpy.array(
range(self.offset, self.offset + len(line.values)))
y = numpy.array(line.values)
ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1
ax.Plot(x, y, "o-", label=line.label, markersize=3.0, linewidth=1.0,
ax.Plot(x,
y,
"o-",
label=line.label,
markersize=3.0,
linewidth=1.0,
color=color_iter.next())
ax1.grid(True)
@ -293,6 +309,8 @@ def LoadFiles(filenames):
LoadFiles.cache[filename] = data
result.append(data)
return result
LoadFiles.cache = {}
@ -304,33 +322,63 @@ def GetParser():
namespace.ordered_args.append((self.dest, values))
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-c",
"--cycle_length",
nargs=1,
action=CustomAction,
type=int,
help="Cycle length over which to average the values.")
parser.add_argument(
"-c", "--cycle_length", nargs=1, action=CustomAction,
type=int, help="Cycle length over which to average the values.")
parser.add_argument(
"-f", "--field", nargs=1, action=CustomAction,
"-f",
"--field",
nargs=1,
action=CustomAction,
help="Name of the field to show. Use 'none' to skip a color.")
parser.add_argument("-r", "--right", nargs=0, action=CustomAction,
parser.add_argument("-r",
"--right",
nargs=0,
action=CustomAction,
help="Use right Y axis for given field.")
parser.add_argument("-d", "--drop", nargs=0, action=CustomAction,
parser.add_argument("-d",
"--drop",
nargs=0,
action=CustomAction,
help="Hide values for dropped frames.")
parser.add_argument("-o", "--offset", nargs=1, action=CustomAction, type=int,
parser.add_argument("-o",
"--offset",
nargs=1,
action=CustomAction,
type=int,
help="Frame offset.")
parser.add_argument("-n", "--next", nargs=0, action=CustomAction,
parser.add_argument("-n",
"--next",
nargs=0,
action=CustomAction,
help="Separator for multiple graphs.")
parser.add_argument(
"--frames", nargs=1, action=CustomAction, type=int,
"--frames",
nargs=1,
action=CustomAction,
type=int,
help="Frame count to show or take into account while averaging.")
parser.add_argument("-t", "--title", nargs=1, action=CustomAction,
parser.add_argument("-t",
"--title",
nargs=1,
action=CustomAction,
help="Title of the graph.")
parser.add_argument(
"-O", "--output_filename", nargs=1, action=CustomAction,
parser.add_argument("-O",
"--output_filename",
nargs=1,
action=CustomAction,
help="Use to save the graph into a file. "
"Otherwise, a window will be shown.")
parser.add_argument(
"files", nargs="+", action=CustomAction,
"files",
nargs="+",
action=CustomAction,
help="List of text-based files generated by loopback tests.")
return parser
@ -369,12 +417,18 @@ def _PlotConfigFromArgs(args, graph_num):
files.extend(values)
if not files:
raise Exception("Missing file argument(s) for graph #{}".format(graph_num))
raise Exception(
"Missing file argument(s) for graph #{}".format(graph_num))
if not fields:
raise Exception("Missing field argument(s) for graph #{}".format(graph_num))
raise Exception(
"Missing field argument(s) for graph #{}".format(graph_num))
return PlotConfig(fields, LoadFiles(files), cycle_length=cycle_length,
frames=frames, offset=offset, output_filename=output_filename,
return PlotConfig(fields,
LoadFiles(files),
cycle_length=cycle_length,
frames=frames,
offset=offset,
output_filename=output_filename,
title=title)
@ -410,5 +464,6 @@ def ShowOrSavePlots(plot_configs):
plt.show()
if __name__ == "__main__":
ShowOrSavePlots(PlotConfigsFromArgs(sys.argv[1:]))