diff --git a/webrtc/common_audio/signal_processing/levinson_durbin.c b/webrtc/common_audio/signal_processing/levinson_durbin.c index d46e551367..e890c806cb 100644 --- a/webrtc/common_audio/signal_processing/levinson_durbin.c +++ b/webrtc/common_audio/signal_processing/levinson_durbin.c @@ -43,10 +43,10 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, for (i = 0; i <= order; ++i) { - temp1W32 = WEBRTC_SPL_LSHIFT_W32(R[i], norm); + temp1W32 = R[i] * (1 << norm); // Put R in hi and low format R_hi[i] = (int16_t)(temp1W32 >> 16); - R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] << 16)) >> 1); + R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] * 65536)) >> 1); } // K = A[1] = -R[1] / R[0] @@ -63,7 +63,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, // Put K in hi and low format K_hi = (int16_t)(temp1W32 >> 16); - K_low = (int16_t)((temp1W32 - ((int32_t)K_hi << 16)) >> 1); + K_low = (int16_t)((temp1W32 - ((int32_t)K_hi * 65536)) >> 1); // Store first reflection coefficient K[0] = K_hi; @@ -72,7 +72,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, // Put A[1] in hi and low format A_hi[1] = (int16_t)(temp1W32 >> 16); - A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] << 16)) >> 1); + A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] * 65536)) >> 1); // Alpha = R[0] * (1-K^2) @@ -112,14 +112,14 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, for (j = 1; j < i; j++) { // temp1W32 is in Q31 - temp1W32 += (R_hi[j] * A_hi[i - j] << 1) + + temp1W32 += (R_hi[j] * A_hi[i - j] * 2) + (((R_hi[j] * A_low[i - j] >> 15) + - (R_low[j] * A_hi[i - j] >> 15)) << 1); + (R_low[j] * A_hi[i - j] >> 15)) * 2); } - temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, 4); - temp1W32 += (WEBRTC_SPL_LSHIFT_W32((int32_t)R_hi[i], 16) - + WEBRTC_SPL_LSHIFT_W32((int32_t)R_low[i], 1)); + temp1W32 = temp1W32 * 16; + temp1W32 += ((int32_t)R_hi[i] * 65536) + + WEBRTC_SPL_LSHIFT_W32((int32_t)R_low[i], 1); // K = -temp1W32 / Alpha temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32); // abs(temp1W32) @@ -135,7 +135,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, norm = WebRtcSpl_NormW32(temp3W32); if ((Alpha_exp <= norm) || (temp3W32 == 0)) { - temp3W32 = WEBRTC_SPL_LSHIFT_W32(temp3W32, Alpha_exp); + temp3W32 = temp3W32 * (1 << Alpha_exp); } else { if (temp3W32 > 0) @@ -149,7 +149,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, // Put K on hi and low format K_hi = (int16_t)(temp3W32 >> 16); - K_low = (int16_t)((temp3W32 - ((int32_t)K_hi << 16)) >> 1); + K_low = (int16_t)((temp3W32 - ((int32_t)K_hi * 65536)) >> 1); // Store Reflection coefficient in Q15 K[i - 1] = K_hi; @@ -171,17 +171,17 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, for (j = 1; j < i; j++) { // temp1W32 = A[j] in Q27 - temp1W32 = WEBRTC_SPL_LSHIFT_W32((int32_t)A_hi[j],16) + temp1W32 = (int32_t)A_hi[j] * 65536 + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[j],1); // temp1W32 += K*A[i-j] in Q27 temp1W32 += (K_hi * A_hi[i - j] + (K_hi * A_low[i - j] >> 15) + - (K_low * A_hi[i - j] >> 15)) << 1; + (K_low * A_hi[i - j] >> 15)) * 2; // Put Anew in hi and low format A_upd_hi[j] = (int16_t)(temp1W32 >> 16); A_upd_low[j] = (int16_t)( - (temp1W32 - ((int32_t)A_upd_hi[j] << 16)) >> 1); + (temp1W32 - ((int32_t)A_upd_hi[j] * 65536)) >> 1); } // temp3W32 = K in Q27 (Convert from Q31 to Q27) @@ -190,7 +190,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, // Store Anew in hi and low format A_upd_hi[i] = (int16_t)(temp3W32 >> 16); A_upd_low[i] = (int16_t)( - (temp3W32 - ((int32_t)A_upd_hi[i] << 16)) >> 1); + (temp3W32 - ((int32_t)A_upd_hi[i] * 65536)) >> 1); // Alpha = Alpha * (1-K^2) @@ -237,10 +237,10 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, for (i = 1; i <= order; i++) { // temp1W32 in Q27 - temp1W32 = WEBRTC_SPL_LSHIFT_W32((int32_t)A_hi[i], 16) + temp1W32 = (int32_t)A_hi[i] * 65536 + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[i], 1); // Round and store upper word - A[i] = (int16_t)(((temp1W32 << 1) + 32768) >> 16); + A[i] = (int16_t)(((temp1W32 * 2) + 32768) >> 16); } return 1; // Stable filters } diff --git a/webrtc/common_audio/signal_processing/vector_scaling_operations.c b/webrtc/common_audio/signal_processing/vector_scaling_operations.c index fdefd06760..e1f391d10a 100644 --- a/webrtc/common_audio/signal_processing/vector_scaling_operations.c +++ b/webrtc/common_audio/signal_processing/vector_scaling_operations.c @@ -37,7 +37,7 @@ void WebRtcSpl_VectorBitShiftW16(int16_t *res, size_t length, { for (i = length; i > 0; i--) { - (*res++) = ((*in++) << (-right_shifts)); + (*res++) = ((*in++) * (1 << (-right_shifts))); } } } diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc index 963f4bdb6c..ffe4370e4e 100644 --- a/webrtc/modules/audio_coding/neteq/expand.cc +++ b/webrtc/modules/audio_coding/neteq/expand.cc @@ -712,7 +712,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. x3 = (x1 * x2) >> 14; static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 }; - int32_t temp_sum = kCoefficients[0] << 14; + int32_t temp_sum = kCoefficients[0] * 16384; temp_sum += kCoefficients[1] * x1; temp_sum += kCoefficients[2] * x2; temp_sum += kCoefficients[3] * x3; @@ -751,7 +751,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { // Calculate (1 - slope) / distortion_lag. // Shift |slope| by 7 to Q20 before the division. The result is in Q20. parameters.mute_slope = WebRtcSpl_DivW32W16( - (8192 - slope) << 7, static_cast(distortion_lag)); + (8192 - slope) * 128, static_cast(distortion_lag)); if (parameters.voice_mix_factor <= 13107) { // Make sure the mute factor decreases from 1.0 to 0.9 in no more than // 6.25 ms.