Match existing type usage better.

This makes a variety of small changes to synchronize bits of code using different types, remove useless code or casts, and add explicit casts in some places previously doing implicit ones.  For example:

* Change a few type declarations to better match how the majority of code uses those objects.
* Eliminate "< 0" check for unsigned values.
* Replace "(float)sin(x)", where |x| is also a float, with "sinf(x)", and similar.
* Add casts to uint32_t in many places timestamps were used and the existing code stored signed values into the unsigned objects.
* Remove downcasts when the results would be passed to a larger type, e.g. calling "foo((int16_t)x)" with an int |x| when foo() takes an int instead of an int16_t.
* Similarly, add casts when passing a larger type to a function taking a smaller one.
* Add casts to int16_t when doing something like "int16_t = int16_t + int16_t" as the "+" operation would implicitly upconvert to int, and similar.
* Use "false" instead of "0" for setting a bool.
* Shift a few temp types when doing a multi-stage calculation involving typecasts, so as to put the most logical/semantically correct type possible into the temps.  For example, when doing "int foo = int + int; size_t bar = (size_t)foo + size_t;", we might change |foo| to a size_t and move the cast if it makes more sense for |foo| to be represented as a size_t.

BUG=none
R=andrew@webrtc.org, asapersson@webrtc.org, henrika@webrtc.org, juberti@webrtc.org, kwiberg@webrtc.org
TBR=andrew, asapersson, henrika

Review URL: https://codereview.webrtc.org/1168753002

Cr-Commit-Position: refs/heads/master@{#9419}
This commit is contained in:
Peter Kasting
2015-06-11 12:55:50 -07:00
parent cb180976dd
commit b7e5054414
57 changed files with 175 additions and 151 deletions

View File

@ -77,7 +77,7 @@ class AudioEncoderCngTest : public ::testing::Test {
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
encoded_.size(), &encoded_[0]);
timestamp_ += num_audio_samples_10ms_;
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
// Expect |num_calls| calls to the encoder, all successful. The last call

View File

@ -370,7 +370,7 @@ int16_t WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
}
if ((i == 93) && (index == 0))
index = 94;
SIDdata[0] = index;
SIDdata[0] = (uint8_t)index;
/* Quantize coefficients with tweak for WebRtc implementation of RFC3389. */
if (inst->enc_nrOfCoefs == WEBRTC_CNG_MAX_LPC_ORDER) {

View File

@ -108,8 +108,8 @@ void WebRtcIlbcfix_CbSearch(
/* Find the highest absolute value to calculate proper
vector scale factor (so that it uses 12 bits) */
temp1 = WebRtcSpl_MaxAbsValueW16(buf, (int16_t)lMem);
temp2 = WebRtcSpl_MaxAbsValueW16(target, (int16_t)lTarget);
temp1 = WebRtcSpl_MaxAbsValueW16(buf, lMem);
temp2 = WebRtcSpl_MaxAbsValueW16(target, lTarget);
if ((temp1>0)&&(temp2>0)) {
temp1 = WEBRTC_SPL_MAX(temp1, temp2);
@ -332,7 +332,8 @@ void WebRtcIlbcfix_CbSearch(
/* Subtract the best codebook vector, according
to measure, from the target vector */
WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain), (int32_t)8192, (int16_t)14, (int)lTarget);
WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain),
(int32_t)8192, (int16_t)14, lTarget);
/* record quantized gain */
gains[stage+1] = bestGain;

View File

@ -206,7 +206,7 @@ void WebRtcIlbcfix_DecodeImpl(
}
/* Store lag (it is needed if next packet is lost) */
(*iLBCdec_inst).last_lag = (int)lag;
(*iLBCdec_inst).last_lag = lag;
/* copy data and run synthesis filter */
WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);

View File

@ -66,7 +66,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCdec_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCdec_inst->state_short_len);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
iLBCdec_inst->state_short_len);
@ -76,8 +76,7 @@ void WebRtcIlbcfix_DecodeResidual(
&decresidual[start_pos+iLBCdec_inst->state_short_len],
iLBC_encbits->cb_index, iLBC_encbits->gain_index,
mem+CB_MEML-ST_MEM_L_TBL,
ST_MEM_L_TBL, (int16_t)diff
);
ST_MEM_L_TBL, diff);
}
else {/* put adaptive part in the beginning */
@ -87,7 +86,7 @@ void WebRtcIlbcfix_DecodeResidual(
meml_gotten = iLBCdec_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+start_pos, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
/* construct decoded vector */
@ -153,7 +152,7 @@ void WebRtcIlbcfix_DecodeResidual(
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
/* loop over subframes to decode */

View File

@ -193,7 +193,7 @@ void WebRtcIlbcfix_EncodeImpl(
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
decresidual+start_pos, iLBCenc_inst->state_short_len);
@ -224,7 +224,7 @@ void WebRtcIlbcfix_EncodeImpl(
meml_gotten = iLBCenc_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
/* encode subframes */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
@ -397,7 +397,7 @@ void WebRtcIlbcfix_EncodeImpl(
}
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
#ifdef SPLIT_10MS
if (iLBCenc_inst->Nback_flag > 0)

View File

@ -96,11 +96,11 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
memmove(enh_period, &enh_period[new_blocks],
(ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
k=WebRtcSpl_DownsampleFast(
k = WebRtcSpl_DownsampleFast(
enh_buf+ENH_BUFL-inLen, /* Input samples */
(int16_t)(inLen+ENH_BUFL_FILTEROVERHEAD),
inLen + ENH_BUFL_FILTEROVERHEAD,
downsampled,
(int16_t)(inLen / 2),
inLen / 2,
(int16_t*)WebRtcIlbcfix_kLpFiltCoefs, /* Coefficients in Q12 */
FILTERORDER_DS_PLUS1, /* Length of filter (order-1) */
FACTOR_DS,
@ -114,8 +114,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
regressor = target - 10;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50],
(int16_t)(ENH_BLOCKL_HALF+50-1));
max16 = WebRtcSpl_MaxAbsValueW16(&regressor[-50], ENH_BLOCKL_HALF + 50 - 1);
shifts = WebRtcSpl_GetSizeInBits((uint32_t)(max16 * max16)) - 25;
shifts = WEBRTC_SPL_MAX(0, shifts);
@ -199,7 +198,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
regressor=in+tlag-1;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(plc_blockl+3-1));
max16 = WebRtcSpl_MaxAbsValueW16(regressor, plc_blockl + 3 - 1);
if (max16>5000)
shifts=2;
else
@ -338,7 +337,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
synt,
&iLBCdec_inst->old_syntdenum[
(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
LPC_FILTERORDER+1, (int16_t)lag);
LPC_FILTERORDER+1, lag);
WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER],
LPC_FILTERORDER);
@ -349,7 +348,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[
(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
LPC_FILTERORDER+1, (int16_t)lag);
LPC_FILTERORDER+1, lag);
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER],
LPC_FILTERORDER);

View File

@ -62,7 +62,7 @@ int16_t WebRtcIlbcfix_FrameClassify(
}
/* Scale to maximum 20 bits in order to allow for the 11 bit window */
maxW32 = WebRtcSpl_MaxValueW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1));
maxW32 = WebRtcSpl_MaxValueW32(ssqEn, iLBCenc_inst->nsub - 1);
scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
scale1 = WEBRTC_SPL_MAX(0, scale);
@ -82,7 +82,7 @@ int16_t WebRtcIlbcfix_FrameClassify(
}
/* Extract the best choise of start state */
pos = WebRtcSpl_MaxIndexW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1)) + 1;
pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
return(pos);
}

View File

@ -45,7 +45,7 @@ void WebRtcIlbcfix_MyCorr(
loops=dim1-dim2+1;
/* Calculate the cross correlations */
WebRtcSpl_CrossCorrelation(corr, (int16_t*)seq2, seq1, dim2, loops, scale, 1);
WebRtcSpl_CrossCorrelation(corr, seq2, seq1, dim2, loops, scale, 1);
return;
}

View File

@ -42,5 +42,5 @@ void WebRtcIlbcfix_NearestNeighbor(
}
/* Find the minimum square distance */
*index=WebRtcSpl_MinIndexW32(crit, (int16_t)arlength);
*index=WebRtcSpl_MinIndexW32(crit, arlength);
}

View File

@ -75,7 +75,7 @@ void WebRtcIlbcfix_Refiner(
/* Calculate the rescaling factor for the correlation in order to
put the correlation in a int16_t vector instead */
maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, (int16_t)corrdim);
maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
scalefact=WebRtcSpl_GetSizeInBits(maxtemp)-15;
@ -97,7 +97,7 @@ void WebRtcIlbcfix_Refiner(
WebRtcIlbcfix_EnhUpsample(corrVecUps,corrVec);
/* Find maximum */
tloc=WebRtcSpl_MaxIndexW32(corrVecUps, (int16_t) (ENH_UPS0*corrdim));
tloc=WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
/* make vector can be upsampled without ever running outside
bounds */

View File

@ -100,7 +100,7 @@ void WebRtcIlbcfix_StateConstruct(
WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
syntDenum, LPC_FILTERORDER+1, (int16_t)(2*len));
syntDenum, LPC_FILTERORDER+1, 2 * len);
tmp1 = &sampleAr[len-1];
tmp2 = &sampleAr[2*len-1];

View File

@ -71,7 +71,7 @@ void WebRtcIlbcfix_StateSearch(
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
syntDenum, LPC_FILTERORDER+1, (int16_t)(2*iLBCenc_inst->state_short_len));
syntDenum, LPC_FILTERORDER+1, 2 * iLBCenc_inst->state_short_len);
for(k=0;k<iLBCenc_inst->state_short_len;k++){
sampleAr[k] += sampleAr[k+iLBCenc_inst->state_short_len];

View File

@ -55,11 +55,11 @@ int WebRtcIlbcfix_XcorrCoef(
/* Find scale value and start position */
if (step==1) {
max=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(subl+searchLen-1));
max=WebRtcSpl_MaxAbsValueW16(regressor, subl + searchLen - 1);
rp_beg = regressor;
rp_end = &regressor[subl];
} else { /* step==-1 */
max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], (int16_t)(subl+searchLen-1));
max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], subl + searchLen - 1);
rp_beg = &regressor[-1];
rp_end = &regressor[subl-1];
}

View File

@ -374,7 +374,7 @@ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
/* compute inverse receiving rate for last packet, in Q19 */
numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
524288 + ((pksize + HEADER_SIZE) >> 1),
pksize + HEADER_SIZE);
(int16_t)(pksize + HEADER_SIZE));
/* 8389 is ~ 1/128000 in Q30 */
byteSecondsPerBit = (uint32_t)(arrTimeDiff * 8389);

View File

@ -447,7 +447,7 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
/* inverse pitch filter */
pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
((ISACdec_obj->plcstr_obj).stretchLag<<7);
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
@ -749,7 +749,8 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
}
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = (ISACdec_obj->plcstr_obj).stretchLag << 7;
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
/* --- Inverse Pitch Filter --- */

View File

@ -498,7 +498,7 @@ int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance *ISACenc_obj,
{
int ii;
int status;
int16_t BWno = BWnumber;
int16_t BWno = (int16_t)BWnumber;
int stream_length = 0;
int16_t model;

View File

@ -425,7 +425,8 @@ int16_t WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
return -1;
}
write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len, encoded);
write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, (size_t)stream_len,
encoded);
return stream_len;
}

View File

@ -62,7 +62,8 @@ void get_arrival_time(int current_framesamples, /* samples */
/* everything in samples */
BN_data->sample_count = BN_data->sample_count + current_framesamples;
BN_data->arrival_time += ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
BN_data->arrival_time += static_cast<uint32_t>(
((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
BN_data->send_time += current_framesamples;
if (BN_data->arrival_time < BN_data->sample_count)

View File

@ -68,8 +68,8 @@ void get_arrival_time(int current_framesamples, /* samples */
/* everything in samples */
BN_data->sample_count = BN_data->sample_count + current_framesamples;
BN_data->arrival_time +=
((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
BN_data->arrival_time += (uint32_t)
(((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
BN_data->send_time += current_framesamples;
if (BN_data->arrival_time < BN_data->sample_count)

View File

@ -504,7 +504,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
int16_t streamLenUB = 0;
int16_t streamLen = 0;
int16_t k = 0;
int garbageLen = 0;
uint8_t garbageLen = 0;
int32_t bottleneck = 0;
int16_t bottleneckIdx = 0;
int16_t jitterInfo = 0;
@ -645,7 +645,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
streamLen = streamLenLB;
if (streamLenUB > 0) {
encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
encoded[streamLenLB] = (uint8_t)(streamLenUB + 1 + LEN_CHECK_SUM_WORD8);
memcpy(&encoded[streamLenLB + 1],
instUB->ISACencUB_obj.bitstr_obj.stream,
streamLenUB);
@ -703,7 +703,7 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
}
minBytes = (minBytes > limit) ? limit : minBytes;
garbageLen = (minBytes > streamLen) ? (minBytes - streamLen) : 0;
garbageLen = (minBytes > streamLen) ? (uint8_t)(minBytes - streamLen) : 0;
/* Save data for creation of multiple bit-streams. */
/* If bit-stream too short then add garbage at the end. */

View File

@ -52,7 +52,8 @@ int main(int argc, char* argv[]) {
double starttime, runtime, length_file;
int16_t stream_len = 0;
int16_t declen = 0, lostFrame = 0, declenTC = 0;
int16_t declen = 0, declenTC = 0;
bool lostFrame = false;
int16_t shortdata[SWBFRAMESAMPLES_10ms];
int16_t vaddata[SWBFRAMESAMPLES_10ms * 3];
@ -696,7 +697,7 @@ int main(int argc, char* argv[]) {
if (!lostFrame) {
lostFrame = ((rand() % 100) < packetLossPercent);
} else {
lostFrame = 0;
lostFrame = false;
}
// RED.

View File

@ -98,7 +98,7 @@ int main(int argc, char* argv[]) {
char histFileName[500];
char averageFileName[500];
unsigned int hist[600];
unsigned int tmpSumStreamLen = 0;
double tmpSumStreamLen = 0;
unsigned int packetCntr = 0;
unsigned int lostPacketCntr = 0;
uint8_t payload[1200];
@ -374,7 +374,7 @@ int main(int argc, char* argv[]) {
if (packetCntr == 100) {
// kbps
fprintf(averageFile, "%8.3f ",
(double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
packetCntr = 0;
tmpSumStreamLen = 0;
}
@ -493,7 +493,7 @@ int main(int argc, char* argv[]) {
if (averageFile != NULL) {
if (packetCntr > 0) {
fprintf(averageFile, "%8.3f ",
(double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
}
fprintf(averageFile, "\n");
fclose(averageFile);

View File

@ -115,9 +115,9 @@ size_t AudioEncoderOpus::MaxEncodedBytes() const {
// Calculate the number of bytes we expect the encoder to produce,
// then multiply by two to give a wide margin for error.
int frame_size_ms = num_10ms_frames_per_packet_ * 10;
int bytes_per_millisecond = bitrate_bps_ / (1000 * 8) + 1;
size_t approx_encoded_bytes =
static_cast<size_t>(frame_size_ms * bytes_per_millisecond);
size_t bytes_per_millisecond =
static_cast<size_t>(bitrate_bps_ / (1000 * 8) + 1);
size_t approx_encoded_bytes = frame_size_ms * bytes_per_millisecond;
return 2 * approx_encoded_bytes;
}
@ -206,7 +206,7 @@ AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
EncodedInfo info;
info.encoded_bytes = status;
info.encoded_bytes = static_cast<size_t>(status);
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
info.send_even_if_empty = true; // Allows Opus to send empty packets.

View File

@ -196,7 +196,7 @@ TEST_P(OpusFecTest, RandomPacketLossTest) {
EncodeABlock();
// Check if payload has FEC.
int16_t fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
int fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
// If FEC is disabled or the target packet loss rate is set to 0, there
// should be no FEC in the bit stream.