Refactor audio_coding/codecs/isac/fix: Removed usage of macro WEBRTC_SPL_MUL_16_16_RSFT

The macro is defined as
#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \
(WEBRTC_SPL_MUL_16_16(a, b) >> (c))

where the latter macro is in C defined as
#define WEBRTC_SPL_MUL_16_16(a, b) \
((int32_t) (((int16_t)(a)) * ((int16_t)(b))))
(For definitions on ARMv7 and MIPS, see common_audio/signal_processing/include/spl_inl_{armv7,mips}.h)

The replacement consists of
- avoiding casts to int16_t if inputs already are int16_t
- adding explicit cast to <type> if result is assigned to <type> (other than int or int32_t)
- minor cleanups like remove of unnecessary parentheses and style changes
- removed commented code lines used during development
- excluded fft.c since there are neon optimizations used and a removal may cause a performance regression

BUG=3348, 3353
TESTED=locally on linux and trybots
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/48799004

Cr-Commit-Position: refs/heads/master@{#8967}
This commit is contained in:
Bjorn Volcker
2015-04-10 08:06:45 +02:00
parent f6a99e63b6
commit f2822edf61
9 changed files with 86 additions and 106 deletions

View File

@ -1005,13 +1005,17 @@ int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples)
/* find new SNR value */
//consider BottleNeck to be in Q10 ( * 1 in Q10)
switch(framesamples) {
// TODO(bjornv): The comments below confuses me. I don't know if there is a
// difference between frame lengths (in which case the implementation is
// wrong), or if it is frame length independent in which case we should
// correct the comment and simplify the implementation.
case 480:
/*s2nr = -1*(a_30 << 10) + ((b_30 * bottle_neck) >> 10);*/
s2nr = -22500 + (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(500, bottle_neck, 10); //* 0.001; //+ c_30 * bottle_neck * bottle_neck * 0.000001;
s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
break;
case 960:
/*s2nr = -1*(a_60 << 10) + ((b_60 * bottle_neck) >> 10);*/
s2nr = -22500 + (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(500, bottle_neck, 10); //* 0.001; //+ c_30 * bottle_neck * bottle_neck * 0.000001;
s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
break;
default:
s2nr = -1; /* Error */

View File

@ -130,14 +130,15 @@ int16_t WebRtcIsacfix_DecodeImpl(int16_t *signal_out16,
ISACdec_obj->plcstr_obj.decayCoeffNoise = WEBRTC_SPL_WORD16_MAX; /* DECAY_RATE is in Q15 */
ISACdec_obj->plcstr_obj.pitchCycles = 0;
PitchGains_Q12[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(PitchGains_Q12[0], 700, 10 );
PitchGains_Q12[0] = (int16_t)(PitchGains_Q12[0] * 700 >> 10);
/* ---- Add-overlap ---- */
WebRtcSpl_GetHanningWindow( overlapWin, RECOVERY_OVERLAP );
for( k = 0; k < RECOVERY_OVERLAP; k++ )
Vector_Word16_1[k] = WebRtcSpl_AddSatW16(
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT( (ISACdec_obj->plcstr_obj).overlapLP[k], overlapWin[RECOVERY_OVERLAP - k - 1], 14),
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT( Vector_Word16_1[k], overlapWin[k], 14) );
(int16_t)(ISACdec_obj->plcstr_obj.overlapLP[k] *
overlapWin[RECOVERY_OVERLAP - k - 1] >> 14),
(int16_t)(Vector_Word16_1[k] * overlapWin[k] >> 14));
@ -176,7 +177,7 @@ int16_t WebRtcIsacfix_DecodeImpl(int16_t *signal_out16,
/* reduce gain to compensate for pitch enhancer */
/* gain = 1.0f - 0.45f * AvgPitchGain; */
tmp32a = WEBRTC_SPL_MUL_16_16_RSFT(AvgPitchGain_Q12, 29, 0); // Q18
tmp32a = AvgPitchGain_Q12 * 29; // Q18
gainQ13 = (int16_t)((262144 - tmp32a) >> 5); // Q18 -> Q13.
for (k = 0; k < FRAMESAMPLES/2; k++)

View File

@ -72,7 +72,7 @@ static int16_t plc_filterma_Fast(
o >>= rshift;
/* decay the output signal; this is specific to plc */
*Out++ = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT( (int16_t)o, decay, 15); // ((o + (int32_t)2048) >> 12);
*Out++ = (int16_t)((int16_t)o * decay >> 15);
/* change the decay */
decay -= reduceDecay;
@ -139,7 +139,7 @@ static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
x*15 + (x*983)/(2^12); note that 983/2^12 = 0.23999 */
/* we are sure that x is in the range of int16_t */
x = (int16_t)(in * 15 + WEBRTC_SPL_MUL_16_16_RSFT(in, 983, 12));
x = (int16_t)(in * 15 + (in * 983 >> 12));
/* b = x^2 / 2 {in Q15} so a shift of 16 is required to
be in correct domain and one more for the division by 2 */
*B = (int16_t)((x * x + 0x00010000) >> 17);
@ -157,7 +157,7 @@ static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
{
/* This is a mirror case of the above */
in = 4300 - in;
x = (int16_t)(in * 15 + WEBRTC_SPL_MUL_16_16_RSFT(in, 983, 12));
x = (int16_t)(in * 15 + (in * 983 >> 12));
/* b = x^2 / 2 {in Q15} so a shift of 16 is required to
be in correct domain and one more for the division by 2 */
*A = (int16_t)((x * x + 0x00010000) >> 17);
@ -220,7 +220,7 @@ static void LinearResampler( int16_t *in, int16_t *out, int16_t lenIn, int16_t l
else
{
diff = in[ j + 1 ] - in[ j ];
out[ i ] = in[ j ] + (int16_t)WEBRTC_SPL_MUL_16_16_RSFT( diff, relativePos, RESAMP_RES_BIT );
out[i] = in[j] + (int16_t)(diff * relativePos >> RESAMP_RES_BIT);
}
}
}
@ -449,12 +449,9 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
((ISACdec_obj->plcstr_obj).stretchLag<<7);
pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
pitchGains_Q12[2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
pitchGains_Q12[3], 1010, 10 );
pitchGains_Q12[1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
pitchGains_Q12[2], 1010, 10 );
pitchGains_Q12[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
pitchGains_Q12[1], 1010, 10 );
pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
pitchGains_Q12[0] = (int16_t)(pitchGains_Q12[1] * 1010 >> 10);
/* most of the time either B or A are zero so seperating */
@ -527,9 +524,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
{
/* --- Lowpass */
pLP = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
stretchPitchLP[(ISACdec_obj->plcstr_obj).pitchIndex],
(ISACdec_obj->plcstr_obj).decayCoeffPriodic, 15 );
pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
/* --- Highpass */
pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
@ -626,9 +622,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
noise1 = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
nLP = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
(int16_t)((noise1)*(ISACdec_obj->plcstr_obj).std),
(ISACdec_obj->plcstr_obj).decayCoeffNoise, 15 );
nLP = (int16_t)((int16_t)(noise1 * ISACdec_obj->plcstr_obj.std) *
ISACdec_obj->plcstr_obj.decayCoeffNoise >> 15);
/* --- Highpass */
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
@ -646,9 +641,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
/* ------ Periodic Vector --- */
/* --- Lowpass */
pLP = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
stretchPitchLP[(ISACdec_obj->plcstr_obj).pitchIndex],
(ISACdec_obj->plcstr_obj).decayCoeffPriodic, 15 );
pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
/* --- Highpass */
pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
@ -665,13 +659,11 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
}
/* ------ Weighting the noisy and periodic vectors ------- */
wNoisyLP = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT(
(ISACdec_obj->plcstr_obj).A, nLP, 15 ) );
wNoisyLP = (int16_t)(ISACdec_obj->plcstr_obj.A * nLP >> 15);
wNoisyHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).A, (nHP) ) );
wPriodicLP = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT(
(ISACdec_obj->plcstr_obj).B, pLP, 15));
wPriodicLP = (int16_t)(ISACdec_obj->plcstr_obj.B * pLP >> 15);
wPriodicHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).B, pHP));
@ -752,9 +744,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
for( i = 0; i < RECOVERY_OVERLAP; i++ )
{
(ISACdec_obj->plcstr_obj).overlapLP[i] = (int16_t)(
WEBRTC_SPL_MUL_16_16_RSFT(stretchPitchLP[k],
(ISACdec_obj->plcstr_obj).decayCoeffPriodic, 15) );
ISACdec_obj->plcstr_obj.overlapLP[i] = (int16_t)(
stretchPitchLP[k] * ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
}
@ -767,8 +758,7 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
/* reduce gain to compensate for pitch enhancer */
/* gain = 1.0f - 0.45f * AvgPitchGain; */
tmp32a = WEBRTC_SPL_MUL_16_16_RSFT((ISACdec_obj->plcstr_obj).AvgPitchGain_Q12,
29, 0); // Q18
tmp32a = ISACdec_obj->plcstr_obj.AvgPitchGain_Q12 * 29; // Q18
tmp32b = 262144 - tmp32a; // Q18
gainQ13 = (int16_t) (tmp32b >> 5); // Q13

View File

@ -115,8 +115,9 @@ int WebRtcIsacfix_EncodeImpl(int16_t *in,
// multiply the bottleneck by 0.88 before computing SNR, 0.88 is tuned by experimenting on TIMIT
// 901/1024 is 0.87988281250000
ISACenc_obj->s2nr = WebRtcIsacfix_GetSnr((int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ISACenc_obj->BottleNeck, 901, 10),
ISACenc_obj->current_framesamples);
ISACenc_obj->s2nr = WebRtcIsacfix_GetSnr(
(int16_t)(ISACenc_obj->BottleNeck * 901 >> 10),
ISACenc_obj->current_framesamples);
/* encode frame length */
status = WebRtcIsacfix_EncodeFrameLen(ISACenc_obj->current_framesamples, &ISACenc_obj->bitstr_obj);
@ -352,8 +353,8 @@ int WebRtcIsacfix_EncodeImpl(int16_t *in,
// scale FFT coefficients to reduce the bit-rate
for(k = 0; k < FRAMESAMPLES_HALF; k++)
{
LP16a[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(LP16a[k], scaleQ14[idx], 14);
LPandHP[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(LPandHP[k], scaleQ14[idx], 14);
LP16a[k] = (int16_t)(LP16a[k] * scaleQ14[idx] >> 14);
LPandHP[k] = (int16_t)(LPandHP[k] * scaleQ14[idx] >> 14);
}
// Save data for multiple packets memory

View File

@ -110,7 +110,7 @@ static int16_t CalcLogN(int32_t arg) {
zeros=WebRtcSpl_NormU32(arg);
frac = (int16_t)((uint32_t)((arg << zeros) & 0x7FFFFFFF) >> 23);
log2 = (int16_t)(((31 - zeros) << 8) + frac); // log2(x) in Q8
logN=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(log2,22713,15); //Q8*Q15 log(2) = 0.693147 = 22713 in Q15
logN = (int16_t)(log2 * 22713 >> 15); // log(2) = 0.693147 = 22713 in Q15
logN=logN+11; //Scalar compensation which minimizes the (log(x)-logN(x))^2 error over all x.
return logN;
@ -129,13 +129,12 @@ static int16_t CalcLogN(int32_t arg) {
*/
static int32_t CalcExpN(int16_t x) {
int16_t ax, axINT, axFRAC;
int16_t axINT, axFRAC;
int16_t exp16;
int32_t exp;
int16_t ax = (int16_t)(x * 23637 >> 14); // Q8
if (x>=0) {
// ax=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(x, 23637-700, 14); //Q8
ax=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(x, 23637, 14); //Q8
axINT = ax >> 8; //Q0
axFRAC = ax&0x00FF;
exp16 = 1 << axINT; // Q0
@ -143,8 +142,6 @@ static int32_t CalcExpN(int16_t x) {
exp = exp16 * axFRAC; // Q0*Q8 = Q8
exp <<= 9; // Q17
} else {
// ax=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(x, 23637+700, 14); //Q8
ax=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(x, 23637, 14); //Q8
ax = -ax;
axINT = 1 + (ax >> 8); //Q0
axFRAC = 0x00FF - (ax&0x00FF);
@ -679,16 +676,16 @@ static void Rc2LarFix(const int16_t *rcQ15, int32_t *larQ17, int16_t order) {
if (rc<24956) { //0.7615966 in Q15
// (Q15*Q13)>>11 = Q17
larAbsQ17 = WEBRTC_SPL_MUL_16_16_RSFT(rc, 21512, 11);
larAbsQ17 = rc * 21512 >> 11;
} else if (rc<30000) { //0.91552734375 in Q15
// Q17 + (Q15*Q12)>>10 = Q17
larAbsQ17 = -465024 + WEBRTC_SPL_MUL_16_16_RSFT(rc, 29837, 10);
larAbsQ17 = -465024 + (rc * 29837 >> 10);
} else if (rc<32500) { //0.99182128906250 in Q15
// Q17 + (Q15*Q10)>>8 = Q17
larAbsQ17 = -3324784 + WEBRTC_SPL_MUL_16_16_RSFT(rc, 31863, 8);
larAbsQ17 = -3324784 + (rc * 31863 >> 8);
} else {
// Q17 + (Q15*Q5)>>3 = Q17
larAbsQ17 = -88546020 + WEBRTC_SPL_MUL_16_16_RSFT(rc, 21973, 3);
larAbsQ17 = -88546020 + (rc * 21973 >> 3);
}
if (rcQ15[k]>0) {
@ -717,7 +714,7 @@ static void Lar2RcFix(const int32_t *larQ17, int16_t *rcQ15, int16_t order) {
if (larAbsQ11<4097) { //2.000012018559 in Q11
// Q11*Q16>>12 = Q15
rc = WEBRTC_SPL_MUL_16_16_RSFT(larAbsQ11, 24957, 12);
rc = larAbsQ11 * 24957 >> 12;
} else if (larAbsQ11<6393) { //3.121320351712 in Q11
// (Q11*Q17 + Q13)>>13 = Q15
rc = (larAbsQ11 * 17993 + 130738688) >> 13;
@ -995,7 +992,8 @@ int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec *streamdata,
pos = LPC_SHAPE_ORDER * j;
pos2 = LPC_SHAPE_ORDER * k;
for (n=0; n<LPC_SHAPE_ORDER; n++) {
sumQQ += WEBRTC_SPL_MUL_16_16_RSFT(tmpcoeffs_sQ10[pos], WebRtcIsacfix_kT1ShapeQ15[model][pos2], 7); // (Q10*Q15)>>7 = Q18
sumQQ += tmpcoeffs_sQ10[pos] *
WebRtcIsacfix_kT1ShapeQ15[model][pos2] >> 7; // (Q10*Q15)>>7 = Q18
pos++;
pos2++;
}
@ -1609,7 +1607,7 @@ int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
/* get the approximate arcsine (almost linear)*/
for (k=0; k<PITCH_SUBFRAMES; k++)
SQ15[k] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(PitchGains_Q12[k],33,2); //Q15
SQ15[k] = (int16_t)(PitchGains_Q12[k] * 33 >> 2); // Q15
/* find quantization index; only for the first three transform coefficients */
@ -1618,7 +1616,7 @@ int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
/* transform */
CQ17=0;
for (j=0; j<PITCH_SUBFRAMES; j++) {
CQ17 += WEBRTC_SPL_MUL_16_16_RSFT(WebRtcIsacfix_kTransform[k][j], SQ15[j],10); // Q17
CQ17 += WebRtcIsacfix_kTransform[k][j] * SQ15[j] >> 10; // Q17
}
index[k] = (int16_t)((CQ17 + 8192)>>14); // Rounding and scaling with stepsize (=1/0.125=8)
@ -1733,14 +1731,14 @@ int WebRtcIsacfix_DecodePitchLag(Bitstr_dec *streamdata,
CQ10 = mean_val2Q10[index[1]];
for (k=0; k<PITCH_SUBFRAMES; k++) {
tmp32b = (int32_t) WEBRTC_SPL_MUL_16_16_RSFT((int16_t) WebRtcIsacfix_kTransform[1][k], (int16_t) CQ10,10);
tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
tmp16c = (int16_t)(tmp32b >> 5);
PitchLags_Q7[k] += tmp16c;
}
CQ10 = mean_val4Q10[index[3]];
for (k=0; k<PITCH_SUBFRAMES; k++) {
tmp32b = (int32_t) WEBRTC_SPL_MUL_16_16_RSFT((int16_t) WebRtcIsacfix_kTransform[3][k], (int16_t) CQ10,10);
tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
tmp16c = (int16_t)(tmp32b >> 5);
PitchLags_Q7[k] += tmp16c;
}
@ -1809,7 +1807,7 @@ int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagsQ7,
/* transform */
CQ17=0;
for (j=0; j<PITCH_SUBFRAMES; j++)
CQ17 += WEBRTC_SPL_MUL_16_16_RSFT(WebRtcIsacfix_kTransform[k][j], PitchLagsQ7[j],2); // Q17
CQ17 += WebRtcIsacfix_kTransform[k][j] * PitchLagsQ7[j] >> 2; // Q17
CQ17 = WEBRTC_SPL_SHIFT_W32(CQ17,shft); // Scale with StepSize
@ -1840,14 +1838,14 @@ int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagsQ7,
CQ10 = mean_val2Q10[index[1]];
for (k=0; k<PITCH_SUBFRAMES; k++) {
tmp32b = (int32_t) WEBRTC_SPL_MUL_16_16_RSFT((int16_t) WebRtcIsacfix_kTransform[1][k], (int16_t) CQ10,10);
tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
tmp16c = (int16_t)(tmp32b >> 5); // Q7.
PitchLagsQ7[k] += tmp16c;
}
CQ10 = mean_val4Q10[index[3]];
for (k=0; k<PITCH_SUBFRAMES; k++) {
tmp32b = (int32_t) WEBRTC_SPL_MUL_16_16_RSFT((int16_t) WebRtcIsacfix_kTransform[3][k], (int16_t) CQ10,10);
tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
tmp16c = (int16_t)(tmp32b >> 5); // Q7.
PitchLagsQ7[k] += tmp16c;
}

View File

@ -393,7 +393,7 @@ void WebRtcIsacfix_GetVars(const int16_t *input, const int16_t *pitchGains_Q12,
chng3 = WEBRTC_SPL_ABS_W16(nrgQlog[1]-nrgQlog[0]);
chng4 = WEBRTC_SPL_ABS_W16(nrgQlog[0]-oldNrgQlog);
tmp = chng1+chng2+chng3+chng4;
chngQ = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp, kChngFactor, 10); /* Q12 */
chngQ = (int16_t)(tmp * kChngFactor >> 10); /* Q12 */
chngQ += 2926; /* + 1.0/1.4 in Q12 */
/* Find average pitch gain */
@ -403,10 +403,10 @@ void WebRtcIsacfix_GetVars(const int16_t *input, const int16_t *pitchGains_Q12,
pgQ += pitchGains_Q12[k];
}
pg3 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(pgQ, pgQ,11); /* pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17 */
pg3 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(pgQ, pg3,13); /* Q17*Q14>>13 =>Q18 */
pg3 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(pg3, kMulPitchGain ,5); /* Q10 kMulPitchGain = -25 = -200 in Q-3. */
pg3 = (int16_t)(pgQ * pgQ >> 11); // pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17
pg3 = (int16_t)(pgQ * pg3 >> 13); /* Q14*Q17>>13 =>Q18 */
/* kMulPitchGain = -25 = -200 in Q-3. */
pg3 = (int16_t)(pg3 * kMulPitchGain >> 5); // Q10
tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,pg3,13);/* Q13*Q10>>13 => Q10*/
if (tmp16<0) {
tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
@ -580,9 +580,9 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
snrq=snrQ10;
/* SNR= C * 2 ^ (D * snrq) ; C=0.289, D=0.05*log2(10)=0.166 (~=172 in Q10)*/
tmp16 = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(snrq, 172, 10); // Q10
tmp16 = (int16_t)(snrq * 172 >> 10); // Q10
tmp16b = exp2_Q10_T(tmp16); // Q10
snrq = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(tmp16b, 285, 10); // Q10
snrq = (int16_t)(tmp16b * 285 >> 10); // Q10
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsacfix_GetVars(inLoQ0, pitchGains_Q12, &(maskdata->OldEnergy), &varscaleQ14);
@ -595,12 +595,12 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
aaQ14 = (int16_t)((22938 * (8192 + (varscaleQ14 >> 1)) + 32768) >> 16);
/* Calculate tmp = (1.0 + aa*aa); in Q12 */
tmp16 = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(aaQ14, aaQ14, 15); //Q14*Q14>>15 = Q13
tmp16 = (int16_t)(aaQ14 * aaQ14 >> 15); // Q14*Q14>>15 = Q13
tmpQQlo = 4096 + (tmp16 >> 1); // Q12 + Q13>>1 = Q12.
/* Calculate tmp = (1.0+aa) * (1.0+aa); */
tmp16 = 8192 + (aaQ14 >> 1); // 1+a in Q13.
tmpQQhi = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(tmp16, tmp16, 14); //Q13*Q13>>14 = Q12
tmpQQhi = (int16_t)(tmp16 * tmp16 >> 14); // Q13*Q13>>14 = Q12
/* replace data in buffer by new look-ahead data */
for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++) {
@ -613,19 +613,19 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
maskdata->DataBufferLoQ0[pos1] = maskdata->DataBufferLoQ0[pos1 + UPDATE/2];
maskdata->DataBufferHiQ0[pos1] = maskdata->DataBufferHiQ0[pos1 + UPDATE/2];
DataLoQ6[pos1] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
maskdata->DataBufferLoQ0[pos1], kWindowAutocorr[pos1], 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
maskdata->DataBufferHiQ0[pos1], kWindowAutocorr[pos1], 15); // Q0*Q21>>15 = Q6
DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
}
pos2 = (int16_t)(k * UPDATE / 2);
for (n = 0; n < UPDATE/2; n++, pos1++) {
maskdata->DataBufferLoQ0[pos1] = inLoQ0[QLOOKAHEAD + pos2];
maskdata->DataBufferHiQ0[pos1] = inHiQ0[pos2++];
DataLoQ6[pos1] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
maskdata->DataBufferLoQ0[pos1], kWindowAutocorr[pos1], 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
maskdata->DataBufferHiQ0[pos1], kWindowAutocorr[pos1], 15); // Q0*Q21>>15 = Q6
DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
}
/* Get correlation coefficients */
@ -868,14 +868,12 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
/* add hearing threshold and compute the gain */
/* lo_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
//tmp32a=WEBRTC_SPL_MUL_16_16_RSFT(varscaleQ14, H_T_HQ19, 17); // Q14
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_lo >> 1; // sqrt_nrg is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
tmp32a = WEBRTC_SPL_MUL_16_16_RSFT(varscaleQ14, snrq, 0); //Q24 (numerator)
tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
sh = WebRtcSpl_NormW32(tmp32c);
shft = 16 - sh;
@ -918,14 +916,13 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
/* add hearing threshold and compute the gain */
/* hi_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
//tmp32a=WEBRTC_SPL_MUL_16_16_RSFT(varscaleQ14, H_T_HQ19, 17); // Q14
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_hi >> 1; // |sqrt_nrg| is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
tmp32a = WEBRTC_SPL_MUL_16_16_RSFT(varscaleQ14, snrq, 0); //Q24 (numerator)
tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
sh = WebRtcSpl_NormW32(tmp32c);
shft = 16 - sh;

View File

@ -212,7 +212,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
/* bias towards pitch lag of previous frame */
tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
// log2(0.5*oldlag) in Q8
tmp32b = WEBRTC_SPL_MUL_16_16_RSFT(oldgQ12,oldgQ12, 10); //Q12 & * 4.0;
tmp32b = oldgQ12 * oldgQ12 >> 10; // Q12 & * 4.0;
gain_bias16 = (int16_t) tmp32b; //Q12
if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
@ -222,12 +222,12 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
if (crrvecQ8_1[k]>0) {
tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(tmp16a,tmp16a, 6); //Q10
tmp32c = tmp16a * tmp16a >> 6; // Q10
tmp16b = (int16_t) tmp32c; // Q10 & <8
tmp32d = WEBRTC_SPL_MUL_16_16_RSFT(tmp16b, 177 , 8); // mult with ln2 in Q8
tmp32d = tmp16b * 177 >> 8; // mult with ln2 in Q8
tmp16c = (int16_t) tmp32d; // Q10 & <4
tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(gain_bias16,tmp16d,13); // Q10 & * 0.5
tmp32c = gain_bias16 * tmp16d >> 13; // Q10 & * 0.5
bias16 = (int16_t) (1024 + tmp32c); // Q10
tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
// Q10 in -> Q8 out with 10*2^8 offset
@ -306,7 +306,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
/* Bias towards short lags */
/* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -42, 8);
tmp32b = (int16_t)tmp32a * -42 >> 8;
tmp32c= tmp32b + 256;
*fyq += tmp32c;
if (*fyq > corr_max32) {
@ -330,7 +330,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
{
tmp32a = k << 7; // 0.5*k Q8
tmp32b = tmp32a * 2 - ratq; // Q8
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32b, (int16_t) tmp32b, 8); // Q8
tmp32c = (int16_t)tmp32b * (int16_t)tmp32b >> 8; // Q8
tmp32b = tmp32c + (ratq >> 1);
// (k-r)^2 + 0.5 * r Q8
@ -380,7 +380,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
/* Bias towards short lags */
/* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -82, 8);
tmp32b = (int16_t)tmp32a * -82 >> 8;
tmp32c= tmp32b + 256;
*fyq += tmp32c;
if (*fyq > corr_max32) {

View File

@ -34,12 +34,8 @@ void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
csum32 = 0;
x = in + PITCH_MAX_LAG / 2 + 2;
for (n = 0; n < PITCH_CORR_LEN2; n++) {
ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[n],
(int16_t)in[n],
scaling); // Q0
csum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)x[n],
(int16_t)in[n],
scaling); // Q0
ysum32 += in[n] * in[n] >> scaling; // Q0
csum32 += x[n] * in[n] >> scaling; // Q0
}
logcorQ8 += PITCH_LAG_SPAN2 - 1;
lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
@ -57,12 +53,9 @@ void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
for (k = 1; k < PITCH_LAG_SPAN2; k++) {
inptr = &in[k];
ysum32 -= WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[k - 1],
(int16_t)in[k - 1],
scaling);
ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[PITCH_CORR_LEN2 + k - 1],
(int16_t)in[PITCH_CORR_LEN2 + k - 1],
scaling);
ysum32 -= in[k - 1] * in[k - 1] >> scaling;
ysum32 += in[PITCH_CORR_LEN2 + k - 1] * in[PITCH_CORR_LEN2 + k - 1] >>
scaling;
#ifdef WEBRTC_ARCH_ARM_NEON
{
int32_t vbuff[4];

View File

@ -89,14 +89,12 @@ void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
// Make output more periodic.
for (k = 0; k < PITCH_SUBFRAMES; k++) {
gainsQ12[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
gainsQ12[k], Gain, 14);
gainsQ12[k] = (int16_t)(gainsQ12[k] * Gain >> 14);
}
}
// No interpolation if pitch lag step is big.
if ((WEBRTC_SPL_MUL_16_16_RSFT(lagsQ7[0], 3, 1) < oldLagQ7) ||
(lagsQ7[0] > WEBRTC_SPL_MUL_16_16_RSFT(oldLagQ7, 3, 1))) {
if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
oldLagQ7 = lagsQ7[0];
oldGainQ12 = gainsQ12[0];
}
@ -110,8 +108,7 @@ void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
lagdeltaQ7, kDivFactor, 15);
curLagQ7 = oldLagQ7;
gaindeltaQ12 = gainsQ12[k] - oldGainQ12;
gaindeltaQ12 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
gaindeltaQ12, kDivFactor, 15);
gaindeltaQ12 = (int16_t)(gaindeltaQ12 * kDivFactor >> 15);
curGainQ12 = oldGainQ12;
oldLagQ7 = lagsQ7[k];
@ -173,8 +170,7 @@ void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
oldLagQ7 = pfp->oldlagQ7;
// No interpolation if pitch lag step is big.
if ((WEBRTC_SPL_MUL_16_16_RSFT(lagsQ7[0], 3, 1) < oldLagQ7) ||
(lagsQ7[0] > WEBRTC_SPL_MUL_16_16_RSFT(oldLagQ7, 3, 1))) {
if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
oldLagQ7 = lagsQ7[0];
}