Match existing type usage better.

This makes a variety of small changes to synchronize bits of code using different types, remove useless code or casts, and add explicit casts in some places previously doing implicit ones.  For example:

* Change a few type declarations to better match how the majority of code uses those objects.
* Eliminate "< 0" check for unsigned values.
* Replace "(float)sin(x)", where |x| is also a float, with "sinf(x)", and similar.
* Add casts to uint32_t in many places timestamps were used and the existing code stored signed values into the unsigned objects.
* Remove downcasts when the results would be passed to a larger type, e.g. calling "foo((int16_t)x)" with an int |x| when foo() takes an int instead of an int16_t.
* Similarly, add casts when passing a larger type to a function taking a smaller one.
* Add casts to int16_t when doing something like "int16_t = int16_t + int16_t" as the "+" operation would implicitly upconvert to int, and similar.
* Use "false" instead of "0" for setting a bool.
* Shift a few temp types when doing a multi-stage calculation involving typecasts, so as to put the most logical/semantically correct type possible into the temps.  For example, when doing "int foo = int + int; size_t bar = (size_t)foo + size_t;", we might change |foo| to a size_t and move the cast if it makes more sense for |foo| to be represented as a size_t.

BUG=none
R=andrew@webrtc.org, asapersson@webrtc.org, henrika@webrtc.org, juberti@webrtc.org, kwiberg@webrtc.org
TBR=andrew, asapersson, henrika

Review URL: https://codereview.webrtc.org/1168753002

Cr-Commit-Position: refs/heads/master@{#9419}
This commit is contained in:
Peter Kasting
2015-06-11 12:55:50 -07:00
parent cb180976dd
commit b7e5054414
57 changed files with 175 additions and 151 deletions

View File

@ -77,7 +77,7 @@ class AudioEncoderCngTest : public ::testing::Test {
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
encoded_.size(), &encoded_[0]);
timestamp_ += num_audio_samples_10ms_;
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
// Expect |num_calls| calls to the encoder, all successful. The last call

View File

@ -370,7 +370,7 @@ int16_t WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
}
if ((i == 93) && (index == 0))
index = 94;
SIDdata[0] = index;
SIDdata[0] = (uint8_t)index;
/* Quantize coefficients with tweak for WebRtc implementation of RFC3389. */
if (inst->enc_nrOfCoefs == WEBRTC_CNG_MAX_LPC_ORDER) {

View File

@ -108,8 +108,8 @@ void WebRtcIlbcfix_CbSearch(
/* Find the highest absolute value to calculate proper
vector scale factor (so that it uses 12 bits) */
temp1 = WebRtcSpl_MaxAbsValueW16(buf, (int16_t)lMem);
temp2 = WebRtcSpl_MaxAbsValueW16(target, (int16_t)lTarget);
temp1 = WebRtcSpl_MaxAbsValueW16(buf, lMem);
temp2 = WebRtcSpl_MaxAbsValueW16(target, lTarget);
if ((temp1>0)&&(temp2>0)) {
temp1 = WEBRTC_SPL_MAX(temp1, temp2);
@ -332,7 +332,8 @@ void WebRtcIlbcfix_CbSearch(
/* Subtract the best codebook vector, according
to measure, from the target vector */
WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain), (int32_t)8192, (int16_t)14, (int)lTarget);
WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain),
(int32_t)8192, (int16_t)14, lTarget);
/* record quantized gain */
gains[stage+1] = bestGain;

View File

@ -206,7 +206,7 @@ void WebRtcIlbcfix_DecodeImpl(
}
/* Store lag (it is needed if next packet is lost) */
(*iLBCdec_inst).last_lag = (int)lag;
(*iLBCdec_inst).last_lag = lag;
/* copy data and run synthesis filter */
WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);

View File

@ -66,7 +66,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCdec_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCdec_inst->state_short_len);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
iLBCdec_inst->state_short_len);
@ -76,8 +76,7 @@ void WebRtcIlbcfix_DecodeResidual(
&decresidual[start_pos+iLBCdec_inst->state_short_len],
iLBC_encbits->cb_index, iLBC_encbits->gain_index,
mem+CB_MEML-ST_MEM_L_TBL,
ST_MEM_L_TBL, (int16_t)diff
);
ST_MEM_L_TBL, diff);
}
else {/* put adaptive part in the beginning */
@ -87,7 +86,7 @@ void WebRtcIlbcfix_DecodeResidual(
meml_gotten = iLBCdec_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+start_pos, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
/* construct decoded vector */
@ -153,7 +152,7 @@ void WebRtcIlbcfix_DecodeResidual(
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
/* loop over subframes to decode */

View File

@ -193,7 +193,7 @@ void WebRtcIlbcfix_EncodeImpl(
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
decresidual+start_pos, iLBCenc_inst->state_short_len);
@ -224,7 +224,7 @@ void WebRtcIlbcfix_EncodeImpl(
meml_gotten = iLBCenc_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
/* encode subframes */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
@ -397,7 +397,7 @@ void WebRtcIlbcfix_EncodeImpl(
}
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
#ifdef SPLIT_10MS
if (iLBCenc_inst->Nback_flag > 0)

View File

@ -96,11 +96,11 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
memmove(enh_period, &enh_period[new_blocks],
(ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
k=WebRtcSpl_DownsampleFast(
k = WebRtcSpl_DownsampleFast(
enh_buf+ENH_BUFL-inLen, /* Input samples */
(int16_t)(inLen+ENH_BUFL_FILTEROVERHEAD),
inLen + ENH_BUFL_FILTEROVERHEAD,
downsampled,
(int16_t)(inLen / 2),
inLen / 2,
(int16_t*)WebRtcIlbcfix_kLpFiltCoefs, /* Coefficients in Q12 */
FILTERORDER_DS_PLUS1, /* Length of filter (order-1) */
FACTOR_DS,
@ -114,8 +114,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
regressor = target - 10;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50],
(int16_t)(ENH_BLOCKL_HALF+50-1));
max16 = WebRtcSpl_MaxAbsValueW16(&regressor[-50], ENH_BLOCKL_HALF + 50 - 1);
shifts = WebRtcSpl_GetSizeInBits((uint32_t)(max16 * max16)) - 25;
shifts = WEBRTC_SPL_MAX(0, shifts);
@ -199,7 +198,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
regressor=in+tlag-1;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(plc_blockl+3-1));
max16 = WebRtcSpl_MaxAbsValueW16(regressor, plc_blockl + 3 - 1);
if (max16>5000)
shifts=2;
else
@ -338,7 +337,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
synt,
&iLBCdec_inst->old_syntdenum[
(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
LPC_FILTERORDER+1, (int16_t)lag);
LPC_FILTERORDER+1, lag);
WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER],
LPC_FILTERORDER);
@ -349,7 +348,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[
(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
LPC_FILTERORDER+1, (int16_t)lag);
LPC_FILTERORDER+1, lag);
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER],
LPC_FILTERORDER);

View File

@ -62,7 +62,7 @@ int16_t WebRtcIlbcfix_FrameClassify(
}
/* Scale to maximum 20 bits in order to allow for the 11 bit window */
maxW32 = WebRtcSpl_MaxValueW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1));
maxW32 = WebRtcSpl_MaxValueW32(ssqEn, iLBCenc_inst->nsub - 1);
scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
scale1 = WEBRTC_SPL_MAX(0, scale);
@ -82,7 +82,7 @@ int16_t WebRtcIlbcfix_FrameClassify(
}
/* Extract the best choise of start state */
pos = WebRtcSpl_MaxIndexW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1)) + 1;
pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
return(pos);
}

View File

@ -45,7 +45,7 @@ void WebRtcIlbcfix_MyCorr(
loops=dim1-dim2+1;
/* Calculate the cross correlations */
WebRtcSpl_CrossCorrelation(corr, (int16_t*)seq2, seq1, dim2, loops, scale, 1);
WebRtcSpl_CrossCorrelation(corr, seq2, seq1, dim2, loops, scale, 1);
return;
}

View File

@ -42,5 +42,5 @@ void WebRtcIlbcfix_NearestNeighbor(
}
/* Find the minimum square distance */
*index=WebRtcSpl_MinIndexW32(crit, (int16_t)arlength);
*index=WebRtcSpl_MinIndexW32(crit, arlength);
}

View File

@ -75,7 +75,7 @@ void WebRtcIlbcfix_Refiner(
/* Calculate the rescaling factor for the correlation in order to
put the correlation in a int16_t vector instead */
maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, (int16_t)corrdim);
maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
scalefact=WebRtcSpl_GetSizeInBits(maxtemp)-15;
@ -97,7 +97,7 @@ void WebRtcIlbcfix_Refiner(
WebRtcIlbcfix_EnhUpsample(corrVecUps,corrVec);
/* Find maximum */
tloc=WebRtcSpl_MaxIndexW32(corrVecUps, (int16_t) (ENH_UPS0*corrdim));
tloc=WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
/* make vector can be upsampled without ever running outside
bounds */

View File

@ -100,7 +100,7 @@ void WebRtcIlbcfix_StateConstruct(
WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
syntDenum, LPC_FILTERORDER+1, (int16_t)(2*len));
syntDenum, LPC_FILTERORDER+1, 2 * len);
tmp1 = &sampleAr[len-1];
tmp2 = &sampleAr[2*len-1];

View File

@ -71,7 +71,7 @@ void WebRtcIlbcfix_StateSearch(
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
syntDenum, LPC_FILTERORDER+1, (int16_t)(2*iLBCenc_inst->state_short_len));
syntDenum, LPC_FILTERORDER+1, 2 * iLBCenc_inst->state_short_len);
for(k=0;k<iLBCenc_inst->state_short_len;k++){
sampleAr[k] += sampleAr[k+iLBCenc_inst->state_short_len];

View File

@ -55,11 +55,11 @@ int WebRtcIlbcfix_XcorrCoef(
/* Find scale value and start position */
if (step==1) {
max=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(subl+searchLen-1));
max=WebRtcSpl_MaxAbsValueW16(regressor, subl + searchLen - 1);
rp_beg = regressor;
rp_end = &regressor[subl];
} else { /* step==-1 */
max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], (int16_t)(subl+searchLen-1));
max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], subl + searchLen - 1);
rp_beg = &regressor[-1];
rp_end = &regressor[subl-1];
}

View File

@ -374,7 +374,7 @@ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
/* compute inverse receiving rate for last packet, in Q19 */
numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
524288 + ((pksize + HEADER_SIZE) >> 1),
pksize + HEADER_SIZE);
(int16_t)(pksize + HEADER_SIZE));
/* 8389 is ~ 1/128000 in Q30 */
byteSecondsPerBit = (uint32_t)(arrTimeDiff * 8389);

View File

@ -447,7 +447,7 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
/* inverse pitch filter */
pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
((ISACdec_obj->plcstr_obj).stretchLag<<7);
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
@ -749,7 +749,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
}
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = (ISACdec_obj->plcstr_obj).stretchLag << 7;
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
/* --- Inverse Pitch Filter --- */

View File

@ -498,7 +498,7 @@ int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance *ISACenc_obj,
{
int ii;
int status;
int16_t BWno = BWnumber;
int16_t BWno = (int16_t)BWnumber;
int stream_length = 0;
int16_t model;

View File

@ -425,7 +425,8 @@ int16_t WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
return -1;
}
write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len, encoded);
write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, (size_t)stream_len,
encoded);
return stream_len;
}

View File

@ -62,7 +62,8 @@ void get_arrival_time(int current_framesamples, /* samples */
/* everything in samples */
BN_data->sample_count = BN_data->sample_count + current_framesamples;
BN_data->arrival_time += ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
BN_data->arrival_time += static_cast<uint32_t>(
((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
BN_data->send_time += current_framesamples;
if (BN_data->arrival_time < BN_data->sample_count)

View File

@ -68,8 +68,8 @@ void get_arrival_time(int current_framesamples, /* samples */
/* everything in samples */
BN_data->sample_count = BN_data->sample_count + current_framesamples;
BN_data->arrival_time +=
((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
BN_data->arrival_time += (uint32_t)
(((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
BN_data->send_time += current_framesamples;
if (BN_data->arrival_time < BN_data->sample_count)

View File

@ -504,7 +504,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
int16_t streamLenUB = 0;
int16_t streamLen = 0;
int16_t k = 0;
int garbageLen = 0;
uint8_t garbageLen = 0;
int32_t bottleneck = 0;
int16_t bottleneckIdx = 0;
int16_t jitterInfo = 0;
@ -645,7 +645,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
streamLen = streamLenLB;
if (streamLenUB > 0) {
encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
encoded[streamLenLB] = (uint8_t)(streamLenUB + 1 + LEN_CHECK_SUM_WORD8);
memcpy(&encoded[streamLenLB + 1],
instUB->ISACencUB_obj.bitstr_obj.stream,
streamLenUB);
@ -703,7 +703,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
}
minBytes = (minBytes > limit) ? limit : minBytes;
garbageLen = (minBytes > streamLen) ? (minBytes - streamLen) : 0;
garbageLen = (minBytes > streamLen) ? (uint8_t)(minBytes - streamLen) : 0;
/* Save data for creation of multiple bit-streams. */
/* If bit-stream too short then add garbage at the end. */

View File

@ -52,7 +52,8 @@ int main(int argc, char* argv[]) {
double starttime, runtime, length_file;
int16_t stream_len = 0;
int16_t declen = 0, lostFrame = 0, declenTC = 0;
int16_t declen = 0, declenTC = 0;
bool lostFrame = false;
int16_t shortdata[SWBFRAMESAMPLES_10ms];
int16_t vaddata[SWBFRAMESAMPLES_10ms * 3];
@ -696,7 +697,7 @@ int main(int argc, char* argv[]) {
if (!lostFrame) {
lostFrame = ((rand() % 100) < packetLossPercent);
} else {
lostFrame = 0;
lostFrame = false;
}
// RED.

View File

@ -98,7 +98,7 @@ int main(int argc, char* argv[]) {
char histFileName[500];
char averageFileName[500];
unsigned int hist[600];
unsigned int tmpSumStreamLen = 0;
double tmpSumStreamLen = 0;
unsigned int packetCntr = 0;
unsigned int lostPacketCntr = 0;
uint8_t payload[1200];
@ -374,7 +374,7 @@ int main(int argc, char* argv[]) {
if (packetCntr == 100) {
// kbps
fprintf(averageFile, "%8.3f ",
(double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
packetCntr = 0;
tmpSumStreamLen = 0;
}
@ -493,7 +493,7 @@ int main(int argc, char* argv[]) {
if (averageFile != NULL) {
if (packetCntr > 0) {
fprintf(averageFile, "%8.3f ",
(double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
}
fprintf(averageFile, "\n");
fclose(averageFile);

View File

@ -115,9 +115,9 @@ size_t AudioEncoderOpus::MaxEncodedBytes() const {
// Calculate the number of bytes we expect the encoder to produce,
// then multiply by two to give a wide margin for error.
int frame_size_ms = num_10ms_frames_per_packet_ * 10;
int bytes_per_millisecond = bitrate_bps_ / (1000 * 8) + 1;
size_t approx_encoded_bytes =
static_cast<size_t>(frame_size_ms * bytes_per_millisecond);
size_t bytes_per_millisecond =
static_cast<size_t>(bitrate_bps_ / (1000 * 8) + 1);
size_t approx_encoded_bytes = frame_size_ms * bytes_per_millisecond;
return 2 * approx_encoded_bytes;
}
@ -206,7 +206,7 @@ AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
EncodedInfo info;
info.encoded_bytes = status;
info.encoded_bytes = static_cast<size_t>(status);
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
info.send_even_if_empty = true; // Allows Opus to send empty packets.

View File

@ -196,7 +196,7 @@ TEST_P(OpusFecTest, RandomPacketLossTest) {
EncodeABlock();
// Check if payload has FEC.
int16_t fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
int fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
// If FEC is disabled or the target packet loss rate is set to 0, there
// should be no FEC in the bit stream.

View File

@ -461,8 +461,8 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
// |audio_frame|.
uint32_t playout_timestamp = 0;
if (GetPlayoutTimestamp(&playout_timestamp)) {
audio_frame->timestamp_ =
playout_timestamp - audio_frame->samples_per_channel_;
audio_frame->timestamp_ = playout_timestamp -
static_cast<uint32_t>(audio_frame->samples_per_channel_);
} else {
// Remain 0 until we have a valid |playout_timestamp|.
audio_frame->timestamp_ = 0;

View File

@ -79,7 +79,7 @@ Packet* AcmSendTest::NextPacket() {
}
int32_t encoded_bytes = acm_->Add10MsAudio(input_frame_);
EXPECT_GE(encoded_bytes, 0);
input_frame_.timestamp_ += input_block_size_samples_;
input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
if (encoded_bytes > 0) {
// Encoded packet received.
return CreatePacket();

View File

@ -92,7 +92,7 @@ Packet* AcmSendTestOldApi::NextPacket() {
}
data_to_send_ = false;
CHECK_GE(acm_->Add10MsData(input_frame_), 0);
input_frame_.timestamp_ += input_block_size_samples_;
input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
if (data_to_send_) {
// Encoded packet received.
return CreatePacket();

View File

@ -431,8 +431,8 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
if (!down_mix && !resample) {
// No pre-processing is required.
expected_in_ts_ += in_frame.samples_per_channel_;
expected_codec_ts_ += in_frame.samples_per_channel_;
expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
*ptr_out = &in_frame;
return 0;
}
@ -477,8 +477,9 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
codec_manager_.CurrentEncoder()->SampleRateHz();
}
expected_codec_ts_ += preprocess_frame_.samples_per_channel_;
expected_in_ts_ += in_frame.samples_per_channel_;
expected_codec_ts_ +=
static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
return 0;
}

View File

@ -144,7 +144,7 @@ class InitialPlayoutDelayTest : public ::testing::Test {
acm_b_->SetInitialPlayoutDelay(initial_delay_ms);
while (rms < kAmp / 2) {
in_audio_frame.timestamp_ = timestamp;
timestamp += in_audio_frame.samples_per_channel_;
timestamp += static_cast<uint32_t>(in_audio_frame.samples_per_channel_);
ASSERT_GE(acm_a_->Add10MsData(in_audio_frame), 0);
ASSERT_EQ(0, acm_b_->PlayoutData10Ms(codec.plfreq, &out_audio_frame));
rms = FrameRms(out_audio_frame);

View File

@ -239,7 +239,7 @@ void BackgroundNoise::SaveParameters(size_t channel,
parameters.low_energy_update_threshold = 0;
// Normalize residual_energy to 29 or 30 bits before sqrt.
int norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
int16_t norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
if (norm_shift & 0x1) {
norm_shift -= 1; // Even number of shifts required.
}
@ -251,7 +251,8 @@ void BackgroundNoise::SaveParameters(size_t channel,
// Add 13 to the |scale_shift_|, since the random numbers table is in
// Q13.
// TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
parameters.scale_shift = 13 + ((kLogResidualLength + norm_shift) / 2);
parameters.scale_shift =
static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));
initialized_ = true;
}

View File

@ -79,7 +79,7 @@ class BackgroundNoise {
static const int kVecLen = 256;
static const int kLogVecLen = 8; // log2(kVecLen).
static const int kResidualLength = 64;
static const int kLogResidualLength = 6; // log2(kResidualLength)
static const int16_t kLogResidualLength = 6; // log2(kResidualLength)
struct ChannelParameters {
// Constructor.

View File

@ -67,7 +67,8 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
return kNormal;
}
const uint32_t five_seconds_samples = 5 * 8000 * fs_mult_;
const uint32_t five_seconds_samples =
static_cast<uint32_t>(5 * 8000 * fs_mult_);
// Check if the required packet is available.
if (target_timestamp == available_timestamp) {
return ExpectedPacketAvailable(prev_mode, play_dtmf);
@ -87,10 +88,11 @@ Operations DecisionLogicNormal::CngOperation(Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp) {
// Signed difference between target and available timestamp.
int32_t timestamp_diff = (generated_noise_samples_ + target_timestamp) -
available_timestamp;
int32_t optimal_level_samp =
(delay_manager_->TargetLevel() * packet_length_samples_) >> 8;
int32_t timestamp_diff = static_cast<int32_t>(
static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) -
available_timestamp);
int32_t optimal_level_samp = static_cast<int32_t>(
(delay_manager_->TargetLevel() * packet_length_samples_) >> 8);
int32_t excess_waiting_time_samp = -timestamp_diff - optimal_level_samp;
if (excess_waiting_time_samp > optimal_level_samp / 2) {
@ -182,11 +184,11 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
// safety precaution), but make sure that the number of samples in buffer
// is no higher than 4 times the optimal level. (Note that TargetLevel()
// is in Q8.)
int32_t timestamp_diff = (generated_noise_samples_ + target_timestamp) -
available_timestamp;
if (timestamp_diff >= 0 ||
if (static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) >=
available_timestamp ||
cur_size_samples >
4 * ((delay_manager_->TargetLevel() * packet_length_samples_) >> 8)) {
((delay_manager_->TargetLevel() * packet_length_samples_) >> 8) *
4) {
// Time to play this new packet.
return kNormal;
} else {

View File

@ -227,7 +227,7 @@ int Expand::Process(AudioMultiVector* output) {
if (mix_factor_increment != 0) {
parameters.current_voice_mix_factor = parameters.voice_mix_factor;
}
int temp_scale = 16384 - parameters.current_voice_mix_factor;
int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
WebRtcSpl_ScaleAndAddVectorsWithRound(
voiced_vector + temp_lenght, parameters.current_voice_mix_factor,
unvoiced_vector + temp_lenght, temp_scale, 14,
@ -669,7 +669,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// even, which is suitable for the sqrt.
unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
int32_t unvoiced_gain = WebRtcSpl_SqrtFloor(unvoiced_energy);
int16_t unvoiced_gain =
static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
parameters.ar_gain_scale = 13
+ (unvoiced_scale + 7 - unvoiced_prescale) / 2;
parameters.ar_gain = unvoiced_gain;
@ -709,8 +710,9 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// the division.
// Shift the denominator from Q13 to Q5 before the division. The result of
// the division will then be in Q20.
int16_t temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12,
(distortion_lag * slope) >> 8);
int16_t temp_ratio = WebRtcSpl_DivW32W16(
(slope - 8192) << 12,
static_cast<int16_t>((distortion_lag * slope) >> 8));
if (slope > 14746) {
// slope > 1.8.
// Divide by 2, with proper rounding.
@ -723,8 +725,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
} else {
// Calculate (1 - slope) / distortion_lag.
// Shift |slope| by 7 to Q20 before the division. The result is in Q20.
parameters.mute_slope = WebRtcSpl_DivW32W16((8192 - slope) << 7,
distortion_lag);
parameters.mute_slope = WebRtcSpl_DivW32W16(
(8192 - slope) << 7, static_cast<int16_t>(distortion_lag));
if (parameters.voice_mix_factor <= 13107) {
// Make sure the mute factor decreases from 1.0 to 0.9 in no more than
// 6.25 ms.
@ -810,7 +812,8 @@ int16_t Expand::Correlation(const int16_t* input, size_t input_length,
// Normalize and move data from 32-bit to 16-bit vector.
int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
kNumCorrelationLags);
int16_t norm_shift2 = std::max(18 - WebRtcSpl_NormW32(max_correlation), 0);
int16_t norm_shift2 = static_cast<int16_t>(
std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
norm_shift2);
// Total scale factor (right shifts) of correlation value.
@ -928,7 +931,7 @@ void Expand::GenerateBackgroundNoise(int16_t* random_vector,
}
}
void Expand::GenerateRandomVector(int seed_increment,
void Expand::GenerateRandomVector(int16_t seed_increment,
size_t length,
int16_t* random_vector) {
// TODO(turajs): According to hlundin The loop should not be needed. Should be

View File

@ -66,7 +66,7 @@ class Expand {
protected:
static const int kMaxConsecutiveExpands = 200;
void GenerateRandomVector(int seed_increment,
void GenerateRandomVector(int16_t seed_increment,
size_t length,
int16_t* random_vector);

View File

@ -108,10 +108,11 @@ int Merge::Process(int16_t* input, size_t input_length,
// Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
// and so on.
int increment = 4194 / fs_mult_;
*external_mute_factor = DspHelper::RampSignal(input_channel,
interpolation_length,
*external_mute_factor,
increment);
*external_mute_factor =
static_cast<int16_t>(DspHelper::RampSignal(input_channel,
interpolation_length,
*external_mute_factor,
increment));
DspHelper::UnmuteSignal(&input_channel[interpolation_length],
input_length_per_channel - interpolation_length,
external_mute_factor, increment,
@ -125,7 +126,8 @@ int Merge::Process(int16_t* input, size_t input_length,
}
// Do overlap and mix linearly.
int increment = 16384 / (interpolation_length + 1); // In Q14.
int16_t increment =
static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14.
int16_t mute_factor = 16384 - increment;
memmove(temp_data, expanded_channel,
sizeof(int16_t) * best_correlation_index);
@ -246,7 +248,8 @@ int16_t Merge::SignalScaling(const int16_t* input, int input_length,
// energy_expanded / energy_input is in Q14.
energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
// Calculate sqrt(energy_expanded / energy_input) in Q14.
mute_factor = WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14);
mute_factor = static_cast<int16_t>(
WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
} else {
// Set to 1 (in Q14) when |expanded| has higher energy than |input|.
mute_factor = 16384;

View File

@ -788,7 +788,8 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
}
case kAudioRepetitionIncreaseTimestamp: {
// TODO(hlundin): Write test for this.
sync_buffer_->IncreaseEndTimestamp(output_size_samples_);
sync_buffer_->IncreaseEndTimestamp(
static_cast<uint32_t>(output_size_samples_));
// Skipping break on purpose. Execution should move on into the
// next case.
FALLTHROUGH();
@ -881,7 +882,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
}
} else {
// Use dead reckoning to estimate the |playout_timestamp_|.
playout_timestamp_ += output_size_samples_;
playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
}
if (decode_return_value) return decode_return_value;
@ -940,9 +941,10 @@ int NetEqImpl::GetDecision(Operations* operation,
}
// Check if it is time to play a DTMF event.
if (dtmf_buffer_->GetEvent(end_timestamp +
decision_logic_->generated_noise_samples(),
dtmf_event)) {
if (dtmf_buffer_->GetEvent(
static_cast<uint32_t>(
end_timestamp + decision_logic_->generated_noise_samples()),
dtmf_event)) {
*play_dtmf = true;
}
@ -1030,7 +1032,8 @@ int NetEqImpl::GetDecision(Operations* operation,
if (decision_logic_->generated_noise_samples() > 0 &&
last_mode_ != kModeDtmf) {
// Make a jump in timestamp due to the recently played comfort noise.
uint32_t timestamp_jump = decision_logic_->generated_noise_samples();
uint32_t timestamp_jump =
static_cast<uint32_t>(decision_logic_->generated_noise_samples());
sync_buffer_->IncreaseEndTimestamp(timestamp_jump);
timestamp_ += timestamp_jump;
}
@ -1224,7 +1227,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
if (*decoded_length < 0) {
// Error returned from the decoder.
*decoded_length = 0;
sync_buffer_->IncreaseEndTimestamp(decoder_frame_length_);
sync_buffer_->IncreaseEndTimestamp(
static_cast<uint32_t>(decoder_frame_length_));
int error_code = 0;
if (decoder)
error_code = decoder->ErrorCode();
@ -1719,7 +1723,8 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
// algorithm_buffer_->PopFront(sync_buffer_->FutureLength());
// }
sync_buffer_->IncreaseEndTimestamp(output_size_samples_);
sync_buffer_->IncreaseEndTimestamp(
static_cast<uint32_t>(output_size_samples_));
expand_->Reset();
last_mode_ = kModeDtmf;
@ -1749,7 +1754,7 @@ void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
stats_.AddZeros(length);
}
if (increase_timestamp) {
sync_buffer_->IncreaseEndTimestamp(length);
sync_buffer_->IncreaseEndTimestamp(static_cast<uint32_t>(length));
}
expand_->Reset();
}

View File

@ -343,7 +343,8 @@ void NetEqDecodingTest::Process(int* out_len) {
ASSERT_EQ(0, neteq_->InsertPacket(
rtp_header, packet_->payload(),
packet_->payload_length_bytes(),
packet_->time_ms() * (output_sample_rate_ / 1000)));
static_cast<uint32_t>(
packet_->time_ms() * (output_sample_rate_ / 1000))));
}
// Get next packet.
packet_.reset(rtp_source_->NextPacket());

View File

@ -50,7 +50,7 @@ int Normal::Process(const int16_t* input,
// fs_shift = log2(fs_mult), rounded down.
// Note that |fs_shift| is not "exact" for 48 kHz.
// TODO(hlundin): Investigate this further.
const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult));
// Check if last RecOut call resulted in an Expand. If so, we have to take
// care of some cross-fading and unmuting.
@ -99,14 +99,15 @@ int Normal::Process(const int16_t* input,
// We want background_noise_.energy() / energy in Q14.
int32_t bgn_energy =
background_noise_.Energy(channel_ix) << (scaling+14);
int16_t energy_scaled = energy << scaling;
int16_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
mute_factor = WebRtcSpl_SqrtFloor(static_cast<int32_t>(ratio) << 14);
int16_t energy_scaled = static_cast<int16_t>(energy << scaling);
int32_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
mute_factor = WebRtcSpl_SqrtFloor(ratio << 14);
} else {
mute_factor = 16384; // 1.0 in Q14.
}
if (mute_factor > external_mute_factor_array[channel_ix]) {
external_mute_factor_array[channel_ix] = std::min(mute_factor, 16384);
external_mute_factor_array[channel_ix] =
static_cast<int16_t>(std::min(mute_factor, 16384));
}
// If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
@ -118,10 +119,11 @@ int Normal::Process(const int16_t* input,
int32_t scaled_signal = (*output)[channel_ix][i] *
external_mute_factor_array[channel_ix];
// Shift 14 with proper rounding.
(*output)[channel_ix][i] = (scaled_signal + 8192) >> 14;
(*output)[channel_ix][i] =
static_cast<int16_t>((scaled_signal + 8192) >> 14);
// Increase mute_factor towards 16384.
external_mute_factor_array[channel_ix] =
std::min(external_mute_factor_array[channel_ix] + increment, 16384);
external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
external_mute_factor_array[channel_ix] + increment, 16384));
}
// Interpolate the expanded data into the new vector.
@ -135,8 +137,8 @@ int Normal::Process(const int16_t* input,
assert(channel_ix < output->Channels());
assert(i < output->Size());
(*output)[channel_ix][i] =
(fraction * (*output)[channel_ix][i] +
(32 - fraction) * expanded[channel_ix][i] + 8) >> 5;
static_cast<int16_t>((fraction * (*output)[channel_ix][i] +
(32 - fraction) * expanded[channel_ix][i] + 8) >> 5);
fraction += increment;
}
}
@ -187,10 +189,11 @@ int Normal::Process(const int16_t* input,
int32_t scaled_signal = (*output)[channel_ix][i] *
external_mute_factor_array[channel_ix];
// Shift 14 with proper rounding.
(*output)[channel_ix][i] = (scaled_signal + 8192) >> 14;
(*output)[channel_ix][i] =
static_cast<int16_t>((scaled_signal + 8192) >> 14);
// Increase mute_factor towards 16384.
external_mute_factor_array[channel_ix] =
std::min(16384, external_mute_factor_array[channel_ix] + increment);
external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
16384, external_mute_factor_array[channel_ix] + increment));
}
}
}

View File

@ -83,7 +83,7 @@ void StatisticsCalculator::LostSamples(int num_samples) {
}
void StatisticsCalculator::IncreaseCounter(int num_samples, int fs_hz) {
timestamps_since_last_report_ += num_samples;
timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
if (timestamps_since_last_report_ >
static_cast<uint32_t>(fs_hz * kMaxReportPeriod)) {
lost_timestamps_ = 0;
@ -121,7 +121,8 @@ void StatisticsCalculator::GetNetworkStatistics(
}
stats->added_zero_samples = added_zero_samples_;
stats->current_buffer_size_ms = num_samples_in_buffers * 1000 / fs_hz;
stats->current_buffer_size_ms =
static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
const int ms_per_packet = decision_logic.packet_length_samples() /
(fs_hz / 1000);
stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
@ -167,14 +168,14 @@ void StatisticsCalculator::WaitingTimes(std::vector<int>* waiting_times) {
ResetWaitingTimeStatistics();
}
int StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
uint32_t denominator) {
uint16_t StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
uint32_t denominator) {
if (numerator == 0) {
return 0;
} else if (numerator < denominator) {
// Ratio must be smaller than 1 in Q14.
assert((numerator << 14) / denominator < (1 << 14));
return (numerator << 14) / denominator;
return static_cast<uint16_t>((numerator << 14) / denominator);
} else {
// Will not produce a ratio larger than 1, since this is probably an error.
return 1 << 14;

View File

@ -91,7 +91,7 @@ class StatisticsCalculator {
static const int kLenWaitingTimes = 100;
// Calculates numerator / denominator, and returns the value in Q14.
static int CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
static uint16_t CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
uint32_t preemptive_samples_;
uint32_t accelerate_samples_;

View File

@ -621,8 +621,8 @@ int main(int argc, char* argv[]) {
}
/* write RTP packet to file */
length = htons(12 + enc_len + 8);
plen = htons(12 + enc_len);
length = htons(static_cast<unsigned short>(12 + enc_len + 8));
plen = htons(static_cast<unsigned short>(12 + enc_len));
offset = (uint32_t)sendtime; //(timestamp/(fs/1000));
offset = htonl(offset);
if (fwrite(&length, 2, 1, out_file) != 1) {
@ -673,7 +673,7 @@ int main(int argc, char* argv[]) {
memmove(&rtp_data[RTPheaderLen + red_len[0]], &rtp_data[12], enc_len);
memcpy(&rtp_data[RTPheaderLen], red_data, red_len[0]);
red_len[1] = enc_len;
red_len[1] = static_cast<uint16_t>(enc_len);
red_TS[1] = timestamp;
if (vad)
red_PT[1] = payloadType;
@ -689,7 +689,7 @@ int main(int argc, char* argv[]) {
memmove(&rtp_data[RTPheaderLen - 4], &rtp_data[12], enc_len);
// memcpy(&rtp_data[RTPheaderLen], red_data, red_len[0]);
red_len[1] = enc_len;
red_len[1] = static_cast<uint16_t>(enc_len);
red_TS[1] = timestamp;
if (vad)
red_PT[1] = payloadType;
@ -714,8 +714,8 @@ int main(int argc, char* argv[]) {
do {
#endif // MULTIPLE_SAME_TIMESTAMP
/* write RTP packet to file */
length = htons(12 + enc_len + 8);
plen = htons(12 + enc_len);
length = htons(static_cast<unsigned short>(12 + enc_len + 8));
plen = htons(static_cast<unsigned short>(12 + enc_len));
offset = (uint32_t)sendtime;
//(timestamp/(fs/1000));
offset = htonl(offset);