Use backticks not vertical bars to denote variables in comments for /modules/audio_coding

Bug: webrtc:12338
Change-Id: I02613d9fca45d00e2477f334b7a0416e7912e26b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227037
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34621}
This commit is contained in:
Artem Titov
2021-07-28 20:00:17 +02:00
committed by WebRTC LUCI CQ
parent 0146a34b3f
commit d00ce747c7
143 changed files with 809 additions and 809 deletions

View File

@ -92,8 +92,8 @@ class AudioEncoderCngTest : public ::testing::Test {
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
// Expect |num_calls| calls to the encoder, all successful. The last call
// claims to have encoded |kMockReturnEncodedBytes| bytes, and all the
// Expect `num_calls` calls to the encoder, all successful. The last call
// claims to have encoded `kMockReturnEncodedBytes` bytes, and all the
// preceding ones 0 bytes.
void ExpectEncodeCalls(size_t num_calls) {
InSequence s;
@ -108,7 +108,7 @@ class AudioEncoderCngTest : public ::testing::Test {
}
// Verifies that the cng_ object waits until it has collected
// |blocks_per_frame| blocks of audio, and then dispatches all of them to
// `blocks_per_frame` blocks of audio, and then dispatches all of them to
// the underlying codec (speech or cng).
void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
@ -169,7 +169,7 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillOnce(Return(Vad::kPassive));
}
// With this call to Encode(), |mock_vad_| should be called according to the
// With this call to Encode(), `mock_vad_` should be called according to the
// above expectations.
Encode();
}
@ -201,7 +201,7 @@ class AudioEncoderCngTest : public ::testing::Test {
std::unique_ptr<AudioEncoder> cng_;
std::unique_ptr<MockAudioEncoder> mock_encoder_owner_;
MockAudioEncoder* mock_encoder_;
MockVad* mock_vad_; // Ownership is transferred to |cng_|.
MockVad* mock_vad_; // Ownership is transferred to `cng_`.
uint32_t timestamp_;
int16_t audio_[kMaxNumSamples];
size_t num_audio_samples_10ms_;
@ -294,7 +294,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
for (size_t i = 0; i < 100; ++i) {
Encode();
// Check if it was time to call the cng encoder. This is done once every
// |kBlocksPerFrame| calls.
// `kBlocksPerFrame` calls.
if ((i + 1) % kBlocksPerFrame == 0) {
// Now check if a SID interval has elapsed.
if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) {
@ -334,7 +334,7 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
EXPECT_TRUE(encoded_info_.speech);
// All of the frame is passive speech. Expect no calls to |mock_encoder_|.
// All of the frame is passive speech. Expect no calls to `mock_encoder_`.
EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive));
EXPECT_FALSE(encoded_info_.speech);
}
@ -442,7 +442,7 @@ class AudioEncoderCngDeathTest : public AudioEncoderCngTest {
}
// Override AudioEncoderCngTest::TearDown, since that one expects a call to
// the destructor of |mock_vad_|. In this case, that object is already
// the destructor of `mock_vad_`. In this case, that object is already
// deleted.
void TearDown() override { cng_.reset(); }

View File

@ -193,10 +193,10 @@ bool ComfortNoiseDecoder::Generate(rtc::ArrayView<int16_t> out_data,
WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_,
num_samples, 13);
/* |lpPoly| - Coefficients in Q12.
* |excitation| - Speech samples.
/* `lpPoly` - Coefficients in Q12.
* `excitation` - Speech samples.
* |nst->dec_filtstate| - State preservation.
* |out_data| - Filtered speech samples. */
* `out_data` - Filtered speech samples. */
WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER,
@ -395,7 +395,7 @@ size_t ComfortNoiseEncoder::Encode(rtc::ArrayView<const int16_t> speech,
}
namespace {
/* Values in |k| are Q15, and |a| Q12. */
/* Values in `k` are Q15, and `a` Q12. */
void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) {
int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
int16_t* aptr;

View File

@ -33,13 +33,13 @@ class ComfortNoiseDecoder {
void Reset();
// Updates the CN state when a new SID packet arrives.
// |sid| is a view of the SID packet without the headers.
// `sid` is a view of the SID packet without the headers.
void UpdateSid(rtc::ArrayView<const uint8_t> sid);
// Generates comfort noise.
// |out_data| will be filled with samples - its size determines the number of
// samples generated. When |new_period| is true, CNG history will be reset
// before any audio is generated. Returns |false| if outData is too large -
// `out_data` will be filled with samples - its size determines the number of
// samples generated. When `new_period` is true, CNG history will be reset
// before any audio is generated. Returns `false` if outData is too large -
// currently 640 bytes (equalling 10ms at 64kHz).
// TODO(ossu): Specify better limits for the size of out_data. Either let it
// be unbounded or limit to 10ms in the current sample rate.
@ -61,9 +61,9 @@ class ComfortNoiseDecoder {
class ComfortNoiseEncoder {
public:
// Creates a comfort noise encoder.
// |fs| selects sample rate: 8000 for narrowband or 16000 for wideband.
// |interval| sets the interval at which to generate SID data (in ms).
// |quality| selects the number of refl. coeffs. Maximum allowed is 12.
// `fs` selects sample rate: 8000 for narrowband or 16000 for wideband.
// `interval` sets the interval at which to generate SID data (in ms).
// `quality` selects the number of refl. coeffs. Maximum allowed is 12.
ComfortNoiseEncoder(int fs, int interval, int quality);
~ComfortNoiseEncoder() = default;
@ -74,8 +74,8 @@ class ComfortNoiseEncoder {
// Parameters are set as during construction.
void Reset(int fs, int interval, int quality);
// Analyzes background noise from |speech| and appends coefficients to
// |output|. Returns the number of coefficients generated. If |force_sid| is
// Analyzes background noise from `speech` and appends coefficients to
// `output`. Returns the number of coefficients generated. If `force_sid` is
// true, a SID frame is forced and the internal sid interval counter is reset.
// Will fail if the input size is too large (> 640 samples, see
// ComfortNoiseDecoder::Generate).

View File

@ -60,11 +60,11 @@ class AudioDecoderG722StereoImpl final : public AudioDecoder {
SpeechType* speech_type) override;
private:
// Splits the stereo-interleaved payload in |encoded| into separate payloads
// Splits the stereo-interleaved payload in `encoded` into separate payloads
// for left and right channels. The separated payloads are written to
// |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
// `encoded_deinterleaved`, which must hold at least `encoded_len` samples.
// The left channel starts at offset 0, while the right channel starts at
// offset encoded_len / 2 into |encoded_deinterleaved|.
// offset encoded_len / 2 into `encoded_deinterleaved`.
void SplitStereoPacket(const uint8_t* encoded,
size_t encoded_len,
uint8_t* encoded_deinterleaved);

View File

@ -39,7 +39,7 @@ void WebRtcIlbcfix_CreateAugmentedVec(
const int16_t *ppo, *ppi;
int16_t cbVecTmp[4];
/* Interpolation starts 4 elements before cbVec+index, but must not start
outside |cbVec|; clamping interp_len to stay within |cbVec|.
outside `cbVec`; clamping interp_len to stay within `cbVec`.
*/
size_t interp_len = WEBRTC_SPL_MIN(index, 4);
@ -69,12 +69,12 @@ void WebRtcIlbcfix_CreateAugmentedVec(
/* copy the second noninterpolated part */
ppo = buffer - index;
/* |tempbuff2| is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
long. |buffer| points one element past the end of that vector, i.e., at
/* `tempbuff2` is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
long. `buffer` points one element past the end of that vector, i.e., at
tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than
|index| elements from |ppo|.
`index` elements from `ppo`.
|cbVec| is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
`cbVec` is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
Therefore, we can only write SUBL-index elements to cbVec+index.
These two conditions limit the number of elements to copy.

View File

@ -99,7 +99,7 @@ bool WebRtcIlbcfix_GetCbVec(
// We're going to fill in cbveclen + 5 elements of tempbuff2 in
// WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be
// using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by
// bad values in |index| (which come from the encoded stream). Tell the
// bad values in `index` (which come from the encoded stream). Tell the
// caller that things went south, and that the decoder state is now
// corrupt (because it's half-way through an update that we can't
// complete).

View File

@ -831,15 +831,15 @@ It first shifts input data of one matrix, determines the right indexes for the
two matrixes, multiply them, and write the results into an output buffer.
Note that two factors (or, multipliers) determine the initialization values of
the variable |matrix1_index| in the code. The relationship is
|matrix1_index| = |matrix1_index_factor1| * |matrix1_index_factor2|, where
|matrix1_index_factor1| is given by the argument while |matrix1_index_factor2|
is determined by the value of argument |matrix1_index_init_case|;
|matrix1_index_factor2| is the value of the outmost loop counter j (when
|matrix1_index_init_case| is 0), or the value of the middle loop counter k (when
|matrix1_index_init_case| is non-zero).
the variable `matrix1_index` in the code. The relationship is
`matrix1_index` = `matrix1_index_factor1` * `matrix1_index_factor2`, where
`matrix1_index_factor1` is given by the argument while `matrix1_index_factor2`
is determined by the value of argument `matrix1_index_init_case`;
`matrix1_index_factor2` is the value of the outmost loop counter j (when
`matrix1_index_init_case` is 0), or the value of the middle loop counter k (when
`matrix1_index_init_case` is non-zero).
|matrix0_index| is determined the same way.
`matrix0_index` is determined the same way.
Arguments:
matrix0[]: matrix0 data in Q15 domain.

View File

@ -75,7 +75,7 @@ static void AllpassFilterForDec32(int16_t *InOut16, //Q0
a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
a <<= 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, FilterState[j]); //Q16+Q16=Q16
// |a| in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
// `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
// FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);

View File

@ -34,7 +34,7 @@ Time2Spec WebRtcIsacfix_Time2Spec;
MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
/* This method assumes that |stream_size_bytes| is in valid range,
/* This method assumes that `stream_size_bytes` is in valid range,
* i.e. >= 0 && <= STREAM_MAXW16_60MS
*/
static void InitializeDecoderBitstream(size_t stream_size_bytes,
@ -294,8 +294,8 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
return statusInit;
}
/* Read the given number of bytes of big-endian 16-bit integers from |src| and
write them to |dest| in host endian. If |nbytes| is odd, the number of
/* Read the given number of bytes of big-endian 16-bit integers from `src` and
write them to `dest` in host endian. If `nbytes` is odd, the number of
output elements is rounded up, and the least significant byte of the last
element is set to 0. */
static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
@ -306,8 +306,8 @@ static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
dest[nbytes / 2] = src[nbytes - 1] << 8;
}
/* Read the given number of bytes of host-endian 16-bit integers from |src| and
write them to |dest| in big endian. If |nbytes| is odd, the number of source
/* Read the given number of bytes of host-endian 16-bit integers from `src` and
write them to `dest` in big endian. If `nbytes` is odd, the number of source
elements is rounded up (but only the most significant byte of the last
element is used), and the number of output bytes written will be
nbytes + 1. */

View File

@ -663,7 +663,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
/* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
/* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
// |corrlo2QQ| in Q(QdomLO-5).
// `corrlo2QQ` in Q(QdomLO-5).
corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
(WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
@ -721,12 +721,12 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
} else if ((sh-shMem)<7){
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
// Shift |alpha| the number of times required to get |tmp| in QdomLO.
// Shift `alpha` the number of times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| as much as possible without overflow the number of
// times required to get |tmp| in QdomLO.
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
@ -774,7 +774,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
maskdata->CorrBufHiQdom[n] = QdomHI;
} else if ((sh-shMem)<7) {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| the number of times required to get |tmp| in QdomHI.
// Shift `alpha` the number of times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
tmpCorr = corrhiQQ[n];
tmp = tmp + tmpCorr;
@ -782,8 +782,8 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
maskdata->CorrBufHiQdom[n] = QdomHI;
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| as much as possible without overflow the number of
// times required to get |tmp| in QdomHI.
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
@ -919,7 +919,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_hi >> 1; // |sqrt_nrg| is in Qssh.
ssh = sh_hi >> 1; // `sqrt_nrg` is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)

View File

@ -203,7 +203,7 @@ TEST_P(EncoderTest, DoNotOvershootTargetBitrate) {
e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
num_bytes += encoded.size();
}
// Inverse of the duration of |kNumFrames| 10 ms frames (unit: seconds^-1).
// Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1).
constexpr float kAudioDurationInv = 100.f / kNumFrames;
const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv;
EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2000); // Max 2 kbps extra.

View File

@ -606,7 +606,7 @@ int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
int16_t* decoded,
int16_t* speechType);
/* If |inst| is a decoder but not an encoder: tell it what sample rate the
/* If `inst` is a decoder but not an encoder: tell it what sample rate the
encoder is using, for bandwidth estimation purposes. */
void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);

View File

@ -1446,7 +1446,7 @@ void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata) {
index[k] = WebRtcIsac_kQArRcInitIndex[k];
// The safe-guards in following while conditions are to suppress gcc 4.8.3
// warnings, Issue 2888. Otherwise, first and last elements of
// |WebRtcIsac_kQArBoundaryLevels| are such that the following search
// `WebRtcIsac_kQArBoundaryLevels` are such that the following search
// *never* cause an out-of-boundary read.
if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) {
while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY &&

View File

@ -25,8 +25,8 @@
* Post-filtering:
* y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
*
* Note that |lag| is a floating number so we perform an interpolation to
* obtain the correct |lag|.
* Note that `lag` is a floating number so we perform an interpolation to
* obtain the correct `lag`.
*
*/
@ -86,7 +86,7 @@ typedef enum {
* buffer : a buffer where the sum of previous inputs and outputs
* are stored.
* damper_state : the state of the damping filter. The filter is defined by
* |kDampFilter|.
* `kDampFilter`.
* interpol_coeff : pointer to a set of coefficient which are used to utilize
* fractional pitch by interpolation.
* gain : pitch-gain to be applied to the current segment of input.
@ -353,7 +353,7 @@ static void FilterFrame(const double* in_data, PitchFiltstr* filter_state,
if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
/* Filter the lookahead segment, this is treated as the last sub-frame. So
* set |pf_param| to last sub-frame. */
* set `pf_param` to last sub-frame. */
filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
filter_parameters.num_samples = QLOOKAHEAD;
FilterSegment(in_data, &filter_parameters, out_data, out_dg);

View File

@ -59,7 +59,7 @@ std::vector<AudioDecoder::ParseResult> LegacyEncodedAudioFrame::SplitBySamples(
new LegacyEncodedAudioFrame(decoder, std::move(payload)));
results.emplace_back(timestamp, 0, std::move(frame));
} else {
// Reduce the split size by half as long as |split_size_bytes| is at least
// Reduce the split size by half as long as `split_size_bytes` is at least
// twice the minimum chunk size (so that the resulting size is at least as
// large as the minimum chunk size).
while (split_size_bytes >= 2 * min_chunk_size) {

View File

@ -564,9 +564,9 @@ void AudioEncoderOpusImpl::OnReceivedOverhead(
void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
int min_frame_length_ms,
int max_frame_length_ms) {
// Ensure that |SetReceiverFrameLengthRange| is called before
// |EnableAudioNetworkAdaptor|, otherwise we need to recreate
// |audio_network_adaptor_|, which is not a needed use case.
// Ensure that `SetReceiverFrameLengthRange` is called before
// `EnableAudioNetworkAdaptor`, otherwise we need to recreate
// `audio_network_adaptor_`, which is not a needed use case.
RTC_DCHECK(!audio_network_adaptor_);
FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
&config_.supported_frame_lengths_ms);

View File

@ -228,8 +228,8 @@ TEST_P(AudioEncoderOpusTest,
TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
auto states = CreateCodec(sample_rate_hz_, 2);
// Before calling to |SetReceiverFrameLengthRange|,
// |supported_frame_lengths_ms| should contain only the frame length being
// Before calling to `SetReceiverFrameLengthRange`,
// `supported_frame_lengths_ms` should contain only the frame length being
// used.
using ::testing::ElementsAre;
EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
@ -348,7 +348,7 @@ TEST_P(AudioEncoderOpusTest,
// will fail.
constexpr float kPacketLossFraction_1 = 0.02f;
constexpr float kPacketLossFraction_2 = 0.198f;
// |kSecondSampleTimeMs| is chosen to ease the calculation since
// `kSecondSampleTimeMs` is chosen to ease the calculation since
// 0.9999 ^ 6931 = 0.5.
constexpr int64_t kSecondSampleTimeMs = 6931;
@ -380,7 +380,7 @@ TEST_P(AudioEncoderOpusTest, DoNotInvokeSetTargetBitrateIfOverheadUnknown) {
states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2,
absl::nullopt);
// Since |OnReceivedOverhead| has not been called, the codec bitrate should
// Since `OnReceivedOverhead` has not been called, the codec bitrate should
// not change.
EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate());
}

View File

@ -218,8 +218,8 @@ TEST_P(OpusFecTest, RandomPacketLossTest) {
time_now_ms += block_duration_ms_;
// |data_pointer_| is incremented and wrapped across
// |loop_length_samples_|.
// `data_pointer_` is incremented and wrapped across
// `loop_length_samples_`.
data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
loop_length_samples_;
}

View File

@ -574,8 +574,8 @@ void WebRtcOpus_DecoderInit(OpusDecInst* inst) {
/* For decoder to determine if it is to output speech or comfort noise. */
static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
// Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
// to be so if the following |encoded_byte| are 0 or 1.
// Audio type becomes comfort noise if `encoded_byte` is 1 and keeps
// to be so if the following `encoded_byte` are 0 or 1.
if (encoded_bytes == 0 && inst->in_dtx_mode) {
return 2; // Comfort noise.
} else if (encoded_bytes == 1 || encoded_bytes == 2) {
@ -595,7 +595,7 @@ static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
}
}
/* |frame_size| is set to maximum Opus frame size in the normal case, and
/* `frame_size` is set to maximum Opus frame size in the normal case, and
* is set to the number of samples needed for PLC in case of losses.
* It is up to the caller to make sure the value is correct. */
static int DecodeNative(OpusDecInst* inst,
@ -632,9 +632,9 @@ static int DecodePlc(OpusDecInst* inst, int16_t* decoded) {
FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
if (inst->plc_use_prev_decoded_samples) {
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |MaxFrameSizePerChannel()|. */
/* The number of samples we ask for is `number_of_lost_frames` times
* `prev_decoded_samples_`. Limit the number of samples to maximum
* `MaxFrameSizePerChannel()`. */
plc_samples = inst->prev_decoded_samples;
const int max_samples_per_channel =
MaxFrameSizePerChannel(inst->sample_rate_hz);
@ -729,9 +729,9 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst,
int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
if (inst->plc_use_prev_decoded_samples) {
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |MaxFrameSizePerChannel()|. */
/* The number of samples we ask for is `number_of_lost_frames` times
* `prev_decoded_samples_`. Limit the number of samples to maximum
* `MaxFrameSizePerChannel()`. */
const int plc_samples = inst->prev_decoded_samples;
const int max_samples_per_channel =
MaxFrameSizePerChannel(inst->sample_rate_hz);
@ -826,8 +826,8 @@ int WebRtcOpus_PacketHasFec(const uint8_t* payload,
// as binary values with uniform probability, they can be extracted directly
// from the most significant bits of the first byte of compressed data.
for (int n = 0; n < channels; n++) {
// The LBRR bit for channel 1 is on the (|silk_frames| + 1)-th bit, and
// that of channel 2 is on the |(|silk_frames| + 1) * 2 + 1|-th bit.
// The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and
// that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit.
if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1)))
return 1;
}

View File

@ -115,10 +115,10 @@ class OpusTest
void TestCbrEffect(bool dtx, int block_length_ms);
// Prepare |speech_data_| for encoding, read from a hard-coded file.
// Prepare `speech_data_` for encoding, read from a hard-coded file.
// After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
// block of |block_length_ms| milliseconds. The data is looped every
// |loop_length_ms| milliseconds.
// block of `block_length_ms` milliseconds. The data is looped every
// `loop_length_ms` milliseconds.
void PrepareSpeechData(int block_length_ms, int loop_length_ms);
int EncodeDecode(WebRtcOpusEncInst* encoder,
@ -310,24 +310,24 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
// one with an arbitrary size and the other of 1-byte, then stops sending for
// a certain number of frames.
// |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
// `max_dtx_frames` is the maximum number of frames Opus can stay in DTX.
// TODO(kwiberg): Why does this number depend on the encoding sample rate?
const int max_dtx_frames =
(encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1;
// We run |kRunTimeMs| milliseconds of pure silence.
// We run `kRunTimeMs` milliseconds of pure silence.
const int kRunTimeMs = 4500;
// We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
// We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in
// Opus needs time to adapt), the absolute values of DTX decoded signal are
// bounded by |kOutputValueBound|.
// bounded by `kOutputValueBound`.
const int kCheckTimeMs = 4000;
#if defined(OPUS_FIXED_POINT)
// Fixed-point Opus generates a random (comfort) noise, which has a less
// predictable value bound than its floating-point Opus. This value depends on
// input signal, and the time window for checking the output values (between
// |kCheckTimeMs| and |kRunTimeMs|).
// `kCheckTimeMs` and `kRunTimeMs`).
const uint16_t kOutputValueBound = 30;
#else
@ -336,7 +336,7 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
int time = 0;
while (time < kRunTimeMs) {
// DTX mode is maintained for maximum |max_dtx_frames| frames.
// DTX mode is maintained for maximum `max_dtx_frames` frames.
int i = 0;
for (; i < max_dtx_frames; ++i) {
time += block_length_ms;

View File

@ -29,11 +29,11 @@ class AudioRingBuffer final {
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copies |data| to the buffer and advances the write pointer. |channels| must
// Copies `data` to the buffer and advances the write pointer. `channels` must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copies from the buffer to |data| and advances the read pointer. |channels|
// Copies from the buffer to `data` and advances the read pointer. `channels`
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);

View File

@ -16,7 +16,7 @@
namespace {
// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
// Adds `a` and `b` frame by frame into `result` (basically matrix addition).
void AddFrames(const float* const* a,
size_t a_start_index,
const float* const* b,
@ -33,7 +33,7 @@ void AddFrames(const float* const* a,
}
}
// Copies |src| into |dst| channel by channel.
// Copies `src` into `dst` channel by channel.
void CopyFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
@ -46,7 +46,7 @@ void CopyFrames(const float* const* src,
}
}
// Moves |src| into |dst| channel by channel.
// Moves `src` into `dst` channel by channel.
void MoveFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
@ -69,8 +69,8 @@ void ZeroOut(float* const* buffer,
}
}
// Pointwise multiplies each channel of |frames| with |window|. Results are
// stored in |frames|.
// Pointwise multiplies each channel of `frames` with `window`. Results are
// stored in `frames`.
void ApplyWindow(const float* window,
size_t num_frames,
size_t num_channels,
@ -134,7 +134,7 @@ Blocker::~Blocker() = default;
// On each call to ProcessChunk():
// 1. New input gets read into sections _b_ and _c_ of the input buffer.
// 2. We block starting from frame_offset.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// 3. We block until we reach a block `bl` that doesn't contain any frames
// from sections _a_ or _b_ of the input buffer.
// 4. We window the current block, fire the callback for processing, window
// again, and overlap/add to the output buffer.
@ -142,7 +142,7 @@ Blocker::~Blocker() = default;
// 6. For both the input and the output buffers, we copy section _c_ into
// section _a_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _b_ and _c_.
// of `bl` and the border between sections _b_ and _c_.
//
// When block_size > chunk_size the input and output buffers look like this:
//
@ -153,13 +153,13 @@ Blocker::~Blocker() = default;
// On each call to ProcessChunk():
// The procedure is the same as above, except for:
// 1. New input gets read into section _c_ of the input buffer.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// 3. We block until we reach a block `bl` that doesn't contain any frames
// from section _a_ of the input buffer.
// 5. We copy section _a_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy sections _b_ and _c_
// into section _a_ and _b_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _a_ and _b_.
// of `bl` and the border between sections _a_ and _b_.
//
// * delay here refers to inintial_delay_
//

View File

@ -39,7 +39,7 @@ class BlockerCallback {
// of audio, which is not a power of 2. Blocker allows us to specify the
// transform and all other necessary processing via the Process() callback
// function without any constraints on the transform-size
// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
// (read: `block_size_`) or received-audio-size (read: `chunk_size_`).
// We handle this for the multichannel audio case, allowing for different
// numbers of input and output channels (for example, beamforming takes 2 or
// more input channels and returns 1 output channel). Audio signals are
@ -53,8 +53,8 @@ class BlockerCallback {
// sending back a processed chunk
//
// To use blocker:
// 1. Impelment a BlockerCallback object |bc|.
// 2. Instantiate a Blocker object |b|, passing in |bc|.
// 1. Impelment a BlockerCallback object `bc`.
// 2. Instantiate a Blocker object `b`, passing in `bc`.
// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
//
// A small amount of delay is added to the first received chunk to deal with
@ -101,7 +101,7 @@ class Blocker {
// input and output buffers are responsible for saving those frames between
// calls to ProcessChunk().
//
// Both contain |initial delay| + |chunk_size| frames. The input is a fairly
// Both contain |initial delay| + `chunk_size` frames. The input is a fairly
// standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
@ -116,7 +116,7 @@ class Blocker {
std::unique_ptr<float[]> window_;
// The amount of frames between the start of contiguous blocks. For example,
// |shift_amount_| = |block_size_| / 2 for a Hann window.
// `shift_amount_` = `block_size_` / 2 for a Hann window.
size_t shift_amount_;
BlockerCallback* callback_;

View File

@ -84,11 +84,11 @@ class LappedTransform {
std::complex<float>* const* out_block) = 0;
};
// Construct a transform instance. |chunk_length| is the number of samples in
// each channel. |window| defines the window, owned by the caller (a copy is
// made internally); |window| should have length equal to |block_length|.
// |block_length| defines the length of a block, in samples.
// |shift_amount| is in samples. |callback| is the caller-owned audio
// Construct a transform instance. `chunk_length` is the number of samples in
// each channel. `window` defines the window, owned by the caller (a copy is
// made internally); `window` should have length equal to `block_length`.
// `block_length` defines the length of a block, in samples.
// `shift_amount` is in samples. `callback` is the caller-owned audio
// processing function called for each block of the input chunk.
LappedTransform(size_t num_in_channels,
size_t num_out_channels,
@ -99,10 +99,10 @@ class LappedTransform {
Callback* callback);
~LappedTransform();
// Main audio processing helper method. Internally slices |in_chunk| into
// Main audio processing helper method. Internally slices `in_chunk` into
// blocks, transforms them to frequency domain, calls the callback for each
// block and returns a de-blocked time domain chunk of audio through
// |out_chunk|. Both buffers are caller-owned.
// `out_chunk`. Both buffers are caller-owned.
void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
// Get the chunk length.
@ -132,8 +132,8 @@ class LappedTransform {
// Returns the initial delay.
//
// This is the delay introduced by the |blocker_| to be able to get and return
// chunks of |chunk_length|, but process blocks of |block_length|.
// This is the delay introduced by the `blocker_` to be able to get and return
// chunks of `chunk_length`, but process blocks of `block_length`.
size_t initial_delay() const { return blocker_.initial_delay(); }
private:

View File

@ -145,7 +145,7 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl(
info.redundant.push_back(it->first);
}
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
// `info` will be implicitly cast to an EncodedInfoLeaf struct, effectively
// discarding the (empty) vector of redundant information. This is
// intentional.
if (header_length_bytes > 0) {

View File

@ -31,9 +31,9 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam<coding_param> {
virtual void TearDown();
// EncodeABlock(...) does the following:
// 1. encodes a block of audio, saved in |in_data|,
// 2. save the bit stream to |bit_stream| of |max_bytes| bytes in size,
// 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
// 1. encodes a block of audio, saved in `in_data`,
// 2. save the bit stream to `bit_stream` of `max_bytes` bytes in size,
// 3. assign `encoded_bytes` with the length of the bit stream (in bytes),
// 4. return the cost of time (in millisecond) spent on actual encoding.
virtual float EncodeABlock(int16_t* in_data,
uint8_t* bit_stream,
@ -41,15 +41,15 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam<coding_param> {
size_t* encoded_bytes) = 0;
// DecodeABlock(...) does the following:
// 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
// 1. decodes the bit stream in `bit_stream` with a length of `encoded_bytes`
// (in bytes),
// 2. save the decoded audio in |out_data|,
// 2. save the decoded audio in `out_data`,
// 3. return the cost of time (in millisecond) spent on actual decoding.
virtual float DecodeABlock(const uint8_t* bit_stream,
size_t encoded_bytes,
int16_t* out_data) = 0;
// Encoding and decode an audio of |audio_duration| (in seconds) and
// Encoding and decode an audio of `audio_duration` (in seconds) and
// record the runtime for encoding and decoding separately.
void EncodeDecode(size_t audio_duration);