Use backticks not vertical bars to denote variables in comments for /modules/audio_processing

Bug: webrtc:12338
Change-Id: I85bff694dd2ead83c939c4d1945eff82e1296001
No-Presubmit: True
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227161
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34690}
This commit is contained in:
Artem Titov
2021-07-28 20:50:03 +02:00
committed by WebRTC LUCI CQ
parent dc6801c618
commit 0b489303d2
102 changed files with 483 additions and 483 deletions

View File

@ -206,7 +206,7 @@ void AudioProcessingSimulator::ProcessStream(bool fixed_interface) {
if (settings_.simulate_mic_gain) {
if (settings_.aec_dump_input_filename) {
// When the analog gain is simulated and an AEC dump is used as input, set
// the undo level to |aec_dump_mic_level_| to virtually restore the
// the undo level to `aec_dump_mic_level_` to virtually restore the
// unmodified microphone signal level.
fake_recording_device_.SetUndoMicLevel(aec_dump_mic_level_);
}
@ -261,7 +261,7 @@ void AudioProcessingSimulator::ProcessStream(bool fixed_interface) {
// Store the mic level suggested by AGC.
// Note that when the analog gain is simulated and an AEC dump is used as
// input, |analog_mic_level_| will not be used with set_stream_analog_level().
// input, `analog_mic_level_` will not be used with set_stream_analog_level().
analog_mic_level_ = ap_->recommended_stream_analog_level();
if (settings_.simulate_mic_gain) {
fake_recording_device_.SetMicLevel(analog_mic_level_);

View File

@ -19,11 +19,11 @@ namespace webrtc {
namespace test {
// This function implements the audio processing simulation utility. Pass
// |input_aecdump| to provide the content of an AEC dump file as a string; if
// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified
// via the |argv| argument. Pass |processed_capture_samples| to write in it the
// samples processed on the capture side; if |processed_capture_samples| is not
// passed, the output file can optionally be specified via the |argv| argument.
// `input_aecdump` to provide the content of an AEC dump file as a string; if
// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
// via the `argv` argument. Pass `processed_capture_samples` to write in it the
// samples processed on the capture side; if `processed_capture_samples` is not
// passed, the output file can optionally be specified via the `argv` argument.
// Any audio_processing object specified in the input is used for the
// simulation. Note that when the audio_processing object is specified all
// functionality that relies on using the internal builder is deactivated,
@ -34,11 +34,11 @@ int AudioprocFloatImpl(rtc::scoped_refptr<AudioProcessing> audio_processing,
char* argv[]);
// This function implements the audio processing simulation utility. Pass
// |input_aecdump| to provide the content of an AEC dump file as a string; if
// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified
// via the |argv| argument. Pass |processed_capture_samples| to write in it the
// samples processed on the capture side; if |processed_capture_samples| is not
// passed, the output file can optionally be specified via the |argv| argument.
// `input_aecdump` to provide the content of an AEC dump file as a string; if
// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
// via the `argv` argument. Pass `processed_capture_samples` to write in it the
// samples processed on the capture side; if `processed_capture_samples` is not
// passed, the output file can optionally be specified via the `argv` argument.
int AudioprocFloatImpl(std::unique_ptr<AudioProcessingBuilder> ap_builder,
int argc,
char* argv[],

View File

@ -125,8 +125,8 @@ std::unique_ptr<std::map<std::string, std::vector<int16_t>>> PreloadAudioTracks(
return audiotracks_map;
}
// Writes all the values in |source_samples| via |wav_writer|. If the number of
// previously written samples in |wav_writer| is less than |interval_begin|, it
// Writes all the values in `source_samples` via `wav_writer`. If the number of
// previously written samples in `wav_writer` is less than `interval_begin`, it
// adds zeros as left padding. The padding corresponds to intervals during which
// a speaker is not active.
void PadLeftWriteChunk(rtc::ArrayView<const int16_t> source_samples,
@ -145,9 +145,9 @@ void PadLeftWriteChunk(rtc::ArrayView<const int16_t> source_samples,
wav_writer->WriteSamples(source_samples.data(), source_samples.size());
}
// Appends zeros via |wav_writer|. The number of zeros is always non-negative
// Appends zeros via `wav_writer`. The number of zeros is always non-negative
// and equal to the difference between the previously written samples and
// |pad_samples|.
// `pad_samples`.
void PadRightWrite(WavWriter* wav_writer, size_t pad_samples) {
RTC_CHECK(wav_writer);
RTC_CHECK_GE(pad_samples, wav_writer->num_samples());

View File

@ -52,14 +52,14 @@ class FakeRecordingDevice final {
void SetUndoMicLevel(const int level);
// Simulates the analog gain.
// If |real_device_level| is a valid level, the unmodified mic signal is
// virtually restored. To skip the latter step set |real_device_level| to
// If `real_device_level` is a valid level, the unmodified mic signal is
// virtually restored. To skip the latter step set `real_device_level` to
// an empty value.
void SimulateAnalogGain(rtc::ArrayView<int16_t> buffer);
// Simulates the analog gain.
// If |real_device_level| is a valid level, the unmodified mic signal is
// virtually restored. To skip the latter step set |real_device_level| to
// If `real_device_level` is a valid level, the unmodified mic signal is
// virtually restored. To skip the latter step set `real_device_level` to
// an empty value.
void SimulateAnalogGain(ChannelBuffer<float>* buffer);

View File

@ -75,7 +75,7 @@ void CheckIfMonotoneSamplesModules(const ChannelBuffer<float>* prev,
}
// Checks that the samples in each pair have the same sign unless the sample in
// |dst| is zero (because of zero gain).
// `dst` is zero (because of zero gain).
void CheckSameSign(const ChannelBuffer<float>* src,
const ChannelBuffer<float>* dst) {
RTC_DCHECK_EQ(src->num_channels(), dst->num_channels());

View File

@ -31,7 +31,7 @@ class PerformanceTimer {
double GetDurationStandardDeviation() const;
// These methods are the same as those above, but they ignore the first
// |number_of_warmup_samples| measurements.
// `number_of_warmup_samples` measurements.
double GetDurationAverage(size_t number_of_warmup_samples) const;
double GetDurationStandardDeviation(size_t number_of_warmup_samples) const;

View File

@ -88,7 +88,7 @@ def FilterScoresByParams(data_frame, filter_params, score_name, config_dir):
data_cell_scores = data_with_config[data_with_config.eval_score_name ==
score_name]
# Exactly one of |params_to_plot| must match:
# Exactly one of `params_to_plot` must match:
(matching_param, ) = [
x for x in filter_params if '-' + x in config_json
]

View File

@ -133,7 +133,7 @@ def _FindOptimalParameter(configs_and_scores, score_weighting):
{score1: value1, ...}}] into a numeric
value
Returns:
the config that has the largest values of |score_weighting| applied
the config that has the largest values of `score_weighting` applied
to its scores.
"""

View File

@ -397,7 +397,7 @@ class TotalHarmonicDistorsionScore(EvaluationScore):
# TODO(alessiob): Fix or remove if not needed.
# thd = np.sqrt(np.sum(b_terms[1:]**2)) / b_terms[0]
# TODO(alessiob): Check the range of |thd_plus_noise| and update the class
# TODO(alessiob): Check the range of `thd_plus_noise` and update the class
# docstring above if accordingly.
thd_plus_noise = distortion_and_noise / b_terms[0]

View File

@ -363,7 +363,7 @@ class HtmlExport(object):
@classmethod
def _SliceDataForScoreStatsTableCell(cls, scores, capture, render,
echo_simulator):
"""Slices |scores| to extract the data for a tab."""
"""Slices `scores` to extract the data for a tab."""
masks = []
masks.append(scores.capture == capture)
@ -378,7 +378,7 @@ class HtmlExport(object):
@classmethod
def _FindUniqueTuples(cls, data_frame, fields):
"""Slices |data_frame| to a list of fields and finds unique tuples."""
"""Slices `data_frame` to a list of fields and finds unique tuples."""
return data_frame[fields].drop_duplicates().values.tolist()
@classmethod

View File

@ -47,7 +47,7 @@ class ApmInputMixer(object):
Hard-clipping may occur in the mix; a warning is raised when this happens.
If |echo_filepath| is None, nothing is done and |capture_input_filepath| is
If `echo_filepath` is None, nothing is done and `capture_input_filepath` is
returned.
Args:

View File

@ -174,7 +174,7 @@ class SignalProcessingUtils(object):
"""Detects hard clipping.
Hard clipping is simply detected by counting samples that touch either the
lower or upper bound too many times in a row (according to |threshold|).
lower or upper bound too many times in a row (according to `threshold`).
The presence of a single sequence of samples meeting such property is enough
to label the signal as hard clipped.
@ -295,16 +295,16 @@ class SignalProcessingUtils(object):
noise,
target_snr=0.0,
pad_noise=MixPadding.NO_PADDING):
"""Mixes |signal| and |noise| with a target SNR.
"""Mixes `signal` and `noise` with a target SNR.
Mix |signal| and |noise| with a desired SNR by scaling |noise|.
Mix `signal` and `noise` with a desired SNR by scaling `noise`.
If the target SNR is +/- infinite, a copy of signal/noise is returned.
If |signal| is shorter than |noise|, the length of the mix equals that of
|signal|. Otherwise, the mix length depends on whether padding is applied.
When padding is not applied, that is |pad_noise| is set to NO_PADDING
(default), the mix length equals that of |noise| - i.e., |signal| is
truncated. Otherwise, |noise| is extended and the resulting mix has the same
length of |signal|.
If `signal` is shorter than `noise`, the length of the mix equals that of
`signal`. Otherwise, the mix length depends on whether padding is applied.
When padding is not applied, that is `pad_noise` is set to NO_PADDING
(default), the mix length equals that of `noise` - i.e., `signal` is
truncated. Otherwise, `noise` is extended and the resulting mix has the same
length of `signal`.
Args:
signal: AudioSegment instance (signal).
@ -342,18 +342,18 @@ class SignalProcessingUtils(object):
signal_duration = len(signal)
noise_duration = len(noise)
if signal_duration <= noise_duration:
# Ignore |pad_noise|, |noise| is truncated if longer that |signal|, the
# mix will have the same length of |signal|.
# Ignore `pad_noise`, `noise` is truncated if longer that `signal`, the
# mix will have the same length of `signal`.
return signal.overlay(noise.apply_gain(gain_db))
elif pad_noise == cls.MixPadding.NO_PADDING:
# |signal| is longer than |noise|, but no padding is applied to |noise|.
# Truncate |signal|.
# `signal` is longer than `noise`, but no padding is applied to `noise`.
# Truncate `signal`.
return noise.overlay(signal, gain_during_overlay=gain_db)
elif pad_noise == cls.MixPadding.ZERO_PADDING:
# TODO(alessiob): Check that this works as expected.
return signal.overlay(noise.apply_gain(gain_db))
elif pad_noise == cls.MixPadding.LOOP:
# |signal| is longer than |noise|, extend |noise| by looping.
# `signal` is longer than `noise`, extend `noise` by looping.
return signal.overlay(noise.apply_gain(gain_db), loop=True)
else:
raise exceptions.SignalProcessingException('invalid padding type')

View File

@ -264,7 +264,7 @@ class ApmModuleSimulator(object):
The file name is parsed to extract input signal creator and params. If a
creator is matched and the parameters are valid, a new signal is generated
and written in |input_signal_filepath|.
and written in `input_signal_filepath`.
Args:
input_signal_filepath: Path to the input signal audio file to write.

View File

@ -116,7 +116,7 @@ class TestTestDataGenerators(unittest.TestCase):
key = noisy_signal_filepaths.keys()[0]
return noisy_signal_filepaths[key], reference_signal_filepaths[key]
# Test the |copy_with_identity| flag.
# Test the `copy_with_identity` flag.
for copy_with_identity in [False, True]:
# Instance the generator through the factory.
factory = test_data_generation_factory.TestDataGeneratorFactory(
@ -126,7 +126,7 @@ class TestTestDataGenerators(unittest.TestCase):
factory.SetOutputDirectoryPrefix('datagen-')
generator = factory.GetInstance(
test_data_generation.IdentityTestDataGenerator)
# Check |copy_with_identity| is set correctly.
# Check `copy_with_identity` is set correctly.
self.assertEqual(copy_with_identity, generator.copy_with_identity)
# Generate test data and extract the paths to the noise and the reference
@ -137,7 +137,7 @@ class TestTestDataGenerators(unittest.TestCase):
noisy_signal_filepath, reference_signal_filepath = (
GetNoiseReferenceFilePaths(generator))
# Check that a copy is made if and only if |copy_with_identity| is True.
# Check that a copy is made if and only if `copy_with_identity` is True.
if copy_with_identity:
self.assertNotEqual(noisy_signal_filepath,
input_signal_filepath)

View File

@ -63,7 +63,7 @@ int main(int argc, char* argv[]) {
std::unique_ptr<Vad> vad = CreateVad(Vad::Aggressiveness::kVadNormal);
std::array<int16_t, kMaxFrameLen> samples;
char buff = 0; // Buffer to write one bit per frame.
uint8_t next = 0; // Points to the next bit to write in |buff|.
uint8_t next = 0; // Points to the next bit to write in `buff`.
while (true) {
// Process frame.
const auto read_samples =

View File

@ -78,7 +78,7 @@ class ChannelBufferWavReader final {
explicit ChannelBufferWavReader(std::unique_ptr<WavReader> file);
~ChannelBufferWavReader();
// Reads data from the file according to the |buffer| format. Returns false if
// Reads data from the file according to the `buffer` format. Returns false if
// a full buffer can't be read from the file.
bool Read(ChannelBuffer<float>* buffer);
@ -115,7 +115,7 @@ class ChannelBufferVectorWriter final {
delete;
~ChannelBufferVectorWriter();
// Creates an interleaved copy of |buffer|, converts the samples to float S16
// Creates an interleaved copy of `buffer`, converts the samples to float S16
// and appends the result to output_.
void Write(const ChannelBuffer<float>& buffer);