Address Windows 64-bits warnings.

R=henrik.lundin@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/2203004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4803 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
turaj@webrtc.org
2013-09-20 16:25:28 +00:00
parent 0e63e76781
commit 362a55e7b0
30 changed files with 179 additions and 170 deletions

View File

@ -16,13 +16,13 @@ namespace webrtc {
Accelerate::ReturnCodes Accelerate::Process( Accelerate::ReturnCodes Accelerate::Process(
const int16_t* input, const int16_t* input,
int input_length, size_t input_length,
AudioMultiVector<int16_t>* output, AudioMultiVector<int16_t>* output,
int16_t* length_change_samples) { int16_t* length_change_samples) {
// Input length must be (almost) 30 ms. // Input length must be (almost) 30 ms.
static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate. static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
if (num_channels_ == 0 || if (num_channels_ == 0 || static_cast<int>(input_length) / num_channels_ <
input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) { (2 * k15ms - 1) * fs_mult_) {
// Length of input data too short to do accelerate. Simply move all data // Length of input data too short to do accelerate. Simply move all data
// from input to output. // from input to output.
output->PushBackInterleaved(input, input_length); output->PushBackInterleaved(input, input_length);
@ -32,7 +32,7 @@ Accelerate::ReturnCodes Accelerate::Process(
length_change_samples); length_change_samples);
} }
void Accelerate::SetParametersForPassiveSpeech(int /*len*/, void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
int16_t* best_correlation, int16_t* best_correlation,
int* /*peak_index*/) const { int* /*peak_index*/) const {
// When the signal does not contain any active speech, the correlation does // When the signal does not contain any active speech, the correlation does
@ -41,7 +41,7 @@ void Accelerate::SetParametersForPassiveSpeech(int /*len*/,
} }
Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch( Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch(
const int16_t* input, int input_length, size_t peak_index, const int16_t* input, size_t input_length, size_t peak_index,
int16_t best_correlation, bool active_speech, int16_t best_correlation, bool active_speech,
AudioMultiVector<int16_t>* output) const { AudioMultiVector<int16_t>* output) const {
// Check for strong correlation or passive speech. // Check for strong correlation or passive speech.

View File

@ -42,21 +42,21 @@ class Accelerate : public TimeStretch {
// is provided in the output |length_change_samples|. The method returns // is provided in the output |length_change_samples|. The method returns
// the outcome of the operation as an enumerator value. // the outcome of the operation as an enumerator value.
ReturnCodes Process(const int16_t* input, ReturnCodes Process(const int16_t* input,
int input_length, size_t input_length,
AudioMultiVector<int16_t>* output, AudioMultiVector<int16_t>* output,
int16_t* length_change_samples); int16_t* length_change_samples);
protected: protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable // Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech. // values when the signal contains no active speech.
virtual void SetParametersForPassiveSpeech(int len, virtual void SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation, int16_t* best_correlation,
int* peak_index) const OVERRIDE; int* peak_index) const OVERRIDE;
// Checks the criteria for performing the time-stretching operation and, // Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching. // if possible, performs the time-stretching.
virtual ReturnCodes CheckCriteriaAndStretch( virtual ReturnCodes CheckCriteriaAndStretch(
const int16_t* input, int input_length, size_t peak_index, const int16_t* input, size_t input_length, size_t peak_index,
int16_t best_correlation, bool active_speech, int16_t best_correlation, bool active_speech,
AudioMultiVector<int16_t>* output) const OVERRIDE; AudioMultiVector<int16_t>* output) const OVERRIDE;

View File

@ -49,7 +49,8 @@ int AudioDecoderPcmU::Decode(const uint8_t* encoded, size_t encoded_len,
int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded, int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
size_t encoded_len) { size_t encoded_len) {
return encoded_len / channels_; // One encoded byte per sample per channel. // One encoded byte per sample per channel.
return static_cast<int>(encoded_len / channels_);
} }
// PCMa // PCMa
@ -65,7 +66,8 @@ int AudioDecoderPcmA::Decode(const uint8_t* encoded, size_t encoded_len,
int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded, int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
size_t encoded_len) { size_t encoded_len) {
return encoded_len / channels_; // One encoded byte per sample per channel. // One encoded byte per sample per channel.
return static_cast<int>(encoded_len / channels_);
} }
// PCM16B // PCM16B
@ -91,7 +93,7 @@ int AudioDecoderPcm16B::Decode(const uint8_t* encoded, size_t encoded_len,
int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded, int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
size_t encoded_len) { size_t encoded_len) {
// Two encoded byte per sample per channel. // Two encoded byte per sample per channel.
return encoded_len / (2 * channels_); return static_cast<int>(encoded_len / (2 * channels_));
} }
AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh( AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(
@ -195,7 +197,7 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
uint32_t arrival_timestamp) { uint32_t arrival_timestamp) {
return WebRtcIsac_UpdateBwEstimate(static_cast<ISACStruct*>(state_), return WebRtcIsac_UpdateBwEstimate(static_cast<ISACStruct*>(state_),
reinterpret_cast<const uint16_t*>(payload), reinterpret_cast<const uint16_t*>(payload),
payload_len, static_cast<int32_t>(payload_len),
rtp_sequence_number, rtp_sequence_number,
rtp_timestamp, rtp_timestamp,
arrival_timestamp); arrival_timestamp);
@ -249,7 +251,8 @@ int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
uint32_t arrival_timestamp) { uint32_t arrival_timestamp) {
return WebRtcIsacfix_UpdateBwEstimate( return WebRtcIsacfix_UpdateBwEstimate(
static_cast<ISACFIX_MainStruct*>(state_), static_cast<ISACFIX_MainStruct*>(state_),
reinterpret_cast<const uint16_t*>(payload), payload_len, reinterpret_cast<const uint16_t*>(payload),
static_cast<int32_t>(payload_len),
rtp_sequence_number, rtp_timestamp, arrival_timestamp); rtp_sequence_number, rtp_timestamp, arrival_timestamp);
} }
@ -286,7 +289,7 @@ int AudioDecoderG722::Init() {
int AudioDecoderG722::PacketDuration(const uint8_t* encoded, int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
size_t encoded_len) { size_t encoded_len) {
// 1/2 encoded byte per sample per channel. // 1/2 encoded byte per sample per channel.
return 2 * encoded_len / channels_; return static_cast<int>(2 * encoded_len / channels_);
} }
AudioDecoderG722Stereo::AudioDecoderG722Stereo() AudioDecoderG722Stereo::AudioDecoderG722Stereo()
@ -383,7 +386,8 @@ AudioDecoderOpus::AudioDecoderOpus(enum NetEqDecoder type)
} else { } else {
channels_ = 1; channels_ = 1;
} }
WebRtcOpus_DecoderCreate(reinterpret_cast<OpusDecInst**>(&state_), channels_); WebRtcOpus_DecoderCreate(reinterpret_cast<OpusDecInst**>(&state_),
static_cast<int>(channels_));
} }
AudioDecoderOpus::~AudioDecoderOpus() { AudioDecoderOpus::~AudioDecoderOpus() {
@ -397,7 +401,7 @@ int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
static_cast<int16_t>(encoded_len), decoded, static_cast<int16_t>(encoded_len), decoded,
&temp_type); &temp_type);
if (ret > 0) if (ret > 0)
ret *= channels_; // Return total number of samples. ret *= static_cast<int16_t>(channels_); // Return total number of samples.
*speech_type = ConvertSpeechType(temp_type); *speech_type = ConvertSpeechType(temp_type);
return ret; return ret;
} }
@ -409,7 +413,7 @@ int AudioDecoderOpus::Init() {
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded, int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
size_t encoded_len) { size_t encoded_len) {
return WebRtcOpus_DurationEst(static_cast<OpusDecInst*>(state_), return WebRtcOpus_DurationEst(static_cast<OpusDecInst*>(state_),
encoded, encoded_len); encoded, static_cast<int>(encoded_len));
} }
#endif #endif

View File

@ -231,7 +231,8 @@ class AudioDecoderPcmUTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples, virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) { uint8_t* output) {
int enc_len_bytes = int enc_len_bytes =
WebRtcG711_EncodeU(NULL, const_cast<int16_t*>(input), input_len_samples, WebRtcG711_EncodeU(NULL, const_cast<int16_t*>(input),
static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output)); reinterpret_cast<int16_t*>(output));
EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes)); EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes));
return enc_len_bytes; return enc_len_bytes;
@ -250,7 +251,8 @@ class AudioDecoderPcmATest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples, virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) { uint8_t* output) {
int enc_len_bytes = int enc_len_bytes =
WebRtcG711_EncodeA(NULL, const_cast<int16_t*>(input), input_len_samples, WebRtcG711_EncodeA(NULL, const_cast<int16_t*>(input),
static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output)); reinterpret_cast<int16_t*>(output));
EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes)); EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes));
return enc_len_bytes; return enc_len_bytes;
@ -269,7 +271,7 @@ class AudioDecoderPcm16BTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples, virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) { uint8_t* output) {
int enc_len_bytes = WebRtcPcm16b_EncodeW16( int enc_len_bytes = WebRtcPcm16b_EncodeW16(
const_cast<int16_t*>(input), input_len_samples, const_cast<int16_t*>(input), static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output)); reinterpret_cast<int16_t*>(output));
EXPECT_EQ(2 * input_len_samples, static_cast<size_t>(enc_len_bytes)); EXPECT_EQ(2 * input_len_samples, static_cast<size_t>(enc_len_bytes));
return enc_len_bytes; return enc_len_bytes;
@ -297,7 +299,8 @@ class AudioDecoderIlbcTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples, virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) { uint8_t* output) {
int enc_len_bytes = int enc_len_bytes =
WebRtcIlbcfix_Encode(encoder_, input, input_len_samples, WebRtcIlbcfix_Encode(encoder_, input,
static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output)); reinterpret_cast<int16_t*>(output));
EXPECT_EQ(50, enc_len_bytes); EXPECT_EQ(50, enc_len_bytes);
return enc_len_bytes; return enc_len_bytes;
@ -475,7 +478,7 @@ class AudioDecoderG722Test : public AudioDecoderTest {
uint8_t* output) { uint8_t* output) {
int enc_len_bytes = int enc_len_bytes =
WebRtcG722_Encode(encoder_, const_cast<int16_t*>(input), WebRtcG722_Encode(encoder_, const_cast<int16_t*>(input),
input_len_samples, static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output)); reinterpret_cast<int16_t*>(output));
EXPECT_EQ(80, enc_len_bytes); EXPECT_EQ(80, enc_len_bytes);
return enc_len_bytes; return enc_len_bytes;
@ -545,15 +548,17 @@ class AudioDecoderOpusTest : public AudioDecoderTest {
// Upsample from 32 to 48 kHz. // Upsample from 32 to 48 kHz.
Resampler rs; Resampler rs;
rs.Reset(32000, 48000, kResamplerSynchronous); rs.Reset(32000, 48000, kResamplerSynchronous);
const int max_resamp_len_samples = input_len_samples * 3 / 2; const int max_resamp_len_samples = static_cast<int>(input_len_samples) *
3 / 2;
int16_t* resamp_input = new int16_t[max_resamp_len_samples]; int16_t* resamp_input = new int16_t[max_resamp_len_samples];
int resamp_len_samples; int resamp_len_samples;
EXPECT_EQ(0, rs.Push(input, input_len_samples, resamp_input, EXPECT_EQ(0, rs.Push(input, static_cast<int>(input_len_samples),
max_resamp_len_samples, resamp_len_samples)); resamp_input, max_resamp_len_samples,
resamp_len_samples));
EXPECT_EQ(max_resamp_len_samples, resamp_len_samples); EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
int enc_len_bytes = int enc_len_bytes =
WebRtcOpus_Encode(encoder_, resamp_input, WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples,
resamp_len_samples, data_length_, output); static_cast<int>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0); EXPECT_GT(enc_len_bytes, 0);
delete [] resamp_input; delete [] resamp_input;
return enc_len_bytes; return enc_len_bytes;
@ -582,7 +587,7 @@ class AudioDecoderOpusStereoTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples, virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) { uint8_t* output) {
// Create stereo by duplicating each sample in |input|. // Create stereo by duplicating each sample in |input|.
const int input_stereo_samples = input_len_samples * 2; const int input_stereo_samples = static_cast<int>(input_len_samples) * 2;
int16_t* input_stereo = new int16_t[input_stereo_samples]; int16_t* input_stereo = new int16_t[input_stereo_samples];
for (size_t i = 0; i < input_len_samples; i++) for (size_t i = 0; i < input_len_samples; i++)
input_stereo[i * 2] = input_stereo[i * 2 + 1] = input[i]; input_stereo[i * 2] = input_stereo[i * 2 + 1] = input[i];
@ -597,7 +602,7 @@ class AudioDecoderOpusStereoTest : public AudioDecoderTest {
EXPECT_EQ(max_resamp_len_samples, resamp_len_samples); EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
int enc_len_bytes = int enc_len_bytes =
WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples / 2, WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples / 2,
data_length_, output); static_cast<int16_t>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0); EXPECT_GT(enc_len_bytes, 0);
delete [] resamp_input; delete [] resamp_input;
delete [] input_stereo; delete [] input_stereo;

View File

@ -144,7 +144,7 @@ void AudioVector<T>::CrossFade(const AudioVector<T>& append_this,
// |alpha| is the mixing factor in Q14. // |alpha| is the mixing factor in Q14.
// TODO(hlundin): Consider skipping +1 in the denominator to produce a // TODO(hlundin): Consider skipping +1 in the denominator to produce a
// smoother cross-fade, in particular at the end of the fade. // smoother cross-fade, in particular at the end of the fade.
int alpha_step = 16384 / (fade_length + 1); int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
int alpha = 16384; int alpha = 16384;
for (size_t i = 0; i < fade_length; ++i) { for (size_t i = 0; i < fade_length; ++i) {
alpha -= alpha_step; alpha -= alpha_step;
@ -174,7 +174,7 @@ void AudioVector<double>::CrossFade(const AudioVector<double>& append_this,
// |alpha| is the mixing factor in Q14. // |alpha| is the mixing factor in Q14.
// TODO(hlundin): Consider skipping +1 in the denominator to produce a // TODO(hlundin): Consider skipping +1 in the denominator to produce a
// smoother cross-fade, in particular at the end of the fade. // smoother cross-fade, in particular at the end of the fade.
int alpha_step = 16384 / (fade_length + 1); int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
int alpha = 16384; int alpha = 16384;
for (size_t i = 0; i < fade_length; ++i) { for (size_t i = 0; i < fade_length; ++i) {
alpha -= alpha_step; alpha -= alpha_step;

View File

@ -165,15 +165,15 @@ int16_t BackgroundNoise::ScaleShift(size_t channel) const {
} }
int32_t BackgroundNoise::CalculateAutoCorrelation( int32_t BackgroundNoise::CalculateAutoCorrelation(
const int16_t* signal, size_t length, int32_t* auto_correlation) const { const int16_t* signal, int length, int32_t* auto_correlation) const {
int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length); int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length);
int correlation_scale = kLogVecLen - int correlation_scale = kLogVecLen -
WebRtcSpl_NormW32(signal_max * signal_max); WebRtcSpl_NormW32(signal_max * signal_max);
correlation_scale = std::max(0, correlation_scale); correlation_scale = std::max(0, correlation_scale);
static const int kCorrelationStep = -1; static const int kCorrelationStep = -1;
WebRtcSpl_CrossCorrelation(auto_correlation, signal, signal, WebRtcSpl_CrossCorrelation(auto_correlation, signal, signal, length,
length, kMaxLpcOrder + 1, correlation_scale, kMaxLpcOrder + 1, correlation_scale,
kCorrelationStep); kCorrelationStep);
// Number of shifts to normalize energy to energy/sample. // Number of shifts to normalize energy to energy/sample.

View File

@ -113,7 +113,7 @@ class BackgroundNoise {
}; };
int32_t CalculateAutoCorrelation(const int16_t* signal, int32_t CalculateAutoCorrelation(const int16_t* signal,
size_t length, int length,
int32_t* auto_correlation) const; int32_t* auto_correlation) const;
// Increments the energy threshold by a factor 1 + |kThresholdIncrement|. // Increments the energy threshold by a factor 1 + |kThresholdIncrement|.

View File

@ -59,7 +59,7 @@ int ComfortNoise::Generate(size_t requested_length,
return kMultiChannelNotSupported; return kMultiChannelNotSupported;
} }
int16_t number_of_samples = requested_length; size_t number_of_samples = requested_length;
int16_t new_period = 0; int16_t new_period = 0;
if (first_call_) { if (first_call_) {
// Generate noise and overlap slightly with old data. // Generate noise and overlap slightly with old data.
@ -75,7 +75,8 @@ int ComfortNoise::Generate(size_t requested_length,
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state()); CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
// The expression &(*output)[0][0] is a pointer to the first element in // The expression &(*output)[0][0] is a pointer to the first element in
// the first channel. // the first channel.
if (WebRtcCng_Generate(cng_inst, &(*output)[0][0], number_of_samples, if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
static_cast<int16_t>(number_of_samples),
new_period) < 0) { new_period) < 0) {
// Error returned. // Error returned.
output->Zeros(requested_length); output->Zeros(requested_length);

View File

@ -123,7 +123,8 @@ Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
} }
} }
const int samples_left = sync_buffer.FutureLength() - expand.overlap_length(); const int samples_left = static_cast<int>(
sync_buffer.FutureLength() - expand.overlap_length());
const int cur_size_samples = const int cur_size_samples =
samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_, samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_,
decoder_frame_length); decoder_frame_length);

View File

@ -171,8 +171,8 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
} }
} }
const int samples_left = sync_buffer.FutureLength() - const int samples_left = static_cast<int>(sync_buffer.FutureLength() -
expand.overlap_length(); expand.overlap_length());
const int cur_size_samples = samples_left + const int cur_size_samples = samples_left +
packet_buffer_.NumPacketsInBuffer() * decoder_frame_length; packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;

View File

@ -28,7 +28,7 @@ DecoderDatabase::DecoderInfo::~DecoderInfo() {
bool DecoderDatabase::Empty() const { return decoders_.empty(); } bool DecoderDatabase::Empty() const { return decoders_.empty(); }
int DecoderDatabase::Size() const { return decoders_.size(); } int DecoderDatabase::Size() const { return static_cast<int>(decoders_.size()); }
void DecoderDatabase::Reset() { void DecoderDatabase::Reset() {
decoders_.clear(); decoders_.clear();

View File

@ -277,14 +277,13 @@ int DelayManager::CalculateTargetLevel(int iat_packets) {
} while ((sum > limit_probability) && (index < iat_vector_.size() - 1)); } while ((sum > limit_probability) && (index < iat_vector_.size() - 1));
// This is the base value for the target buffer level. // This is the base value for the target buffer level.
int target_level = index; int target_level = static_cast<int>(index);
base_target_level_ = index; base_target_level_ = static_cast<int>(index);
// Update detector for delay peaks. // Update detector for delay peaks.
bool delay_peak_found = peak_detector_.Update(iat_packets, target_level); bool delay_peak_found = peak_detector_.Update(iat_packets, target_level);
if (delay_peak_found) { if (delay_peak_found) {
target_level = std::max(static_cast<int>(target_level), target_level = std::max(target_level, peak_detector_.MaxPeakHeight());
peak_detector_.MaxPeakHeight());
} }
// Sanity check. |target_level| must be strictly positive. // Sanity check. |target_level| must be strictly positive.
@ -322,8 +321,12 @@ void DelayManager::Reset() {
int DelayManager::AverageIAT() const { int DelayManager::AverageIAT() const {
int32_t sum_q24 = 0; int32_t sum_q24 = 0;
// Using an int for the upper limit of the following for-loop so the
// loop-counter can be int. Otherwise we need a cast where |sum_q24| is
// updated.
const int iat_vec_size = static_cast<int>(iat_vector_.size());
assert(iat_vector_.size() == 65); // Algorithm is hard-coded for this size. assert(iat_vector_.size() == 65); // Algorithm is hard-coded for this size.
for (size_t i = 0; i < iat_vector_.size(); ++i) { for (int i = 0; i < iat_vec_size; ++i) {
// Shift 6 to fit worst case: 2^30 * 64. // Shift 6 to fit worst case: 2^30 * 64.
sum_q24 += (iat_vector_[i] >> 6) * i; sum_q24 += (iat_vector_[i] >> 6) * i;
} }

View File

@ -258,11 +258,11 @@ int DspHelper::MinDistortion(const int16_t* signal, int min_lag,
} }
void DspHelper::CrossFade(const int16_t* input1, const int16_t* input2, void DspHelper::CrossFade(const int16_t* input1, const int16_t* input2,
int length, int16_t* mix_factor, size_t length, int16_t* mix_factor,
int16_t factor_decrement, int16_t* output) { int16_t factor_decrement, int16_t* output) {
int16_t factor = *mix_factor; int16_t factor = *mix_factor;
int16_t complement_factor = 16384 - factor; int16_t complement_factor = 16384 - factor;
for (int i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
output[i] = output[i] =
(factor * input1[i] + complement_factor * input2[i] + 8192) >> 14; (factor * input1[i] + complement_factor * input2[i] + 8192) >> 14;
factor -= factor_decrement; factor -= factor_decrement;
@ -271,11 +271,12 @@ void DspHelper::CrossFade(const int16_t* input1, const int16_t* input2,
*mix_factor = factor; *mix_factor = factor;
} }
void DspHelper::UnmuteSignal(const int16_t* input, int length, int16_t* factor, void DspHelper::UnmuteSignal(const int16_t* input, size_t length,
int16_t increment, int16_t* output) { int16_t* factor, int16_t increment,
int16_t* output) {
uint16_t factor_16b = *factor; uint16_t factor_16b = *factor;
int32_t factor_32b = (static_cast<int32_t>(factor_16b) << 6) + 32; int32_t factor_32b = (static_cast<int32_t>(factor_16b) << 6) + 32;
for (int i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
output[i] = (factor_16b * input[i] + 8192) >> 14; output[i] = (factor_16b * input[i] + 8192) >> 14;
factor_32b = std::max(factor_32b + increment, 0); factor_32b = std::max(factor_32b + increment, 0);
factor_16b = std::min(16384, factor_32b >> 6); factor_16b = std::min(16384, factor_32b >> 6);
@ -283,15 +284,15 @@ void DspHelper::UnmuteSignal(const int16_t* input, int length, int16_t* factor,
*factor = factor_16b; *factor = factor_16b;
} }
void DspHelper::MuteSignal(int16_t* signal, int16_t mute_slope, int length) { void DspHelper::MuteSignal(int16_t* signal, int16_t mute_slope, size_t length) {
int32_t factor = (16384 << 6) + 32; int32_t factor = (16384 << 6) + 32;
for (int i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
signal[i] = ((factor >> 6) * signal[i] + 8192) >> 14; signal[i] = ((factor >> 6) * signal[i] + 8192) >> 14;
factor -= mute_slope; factor -= mute_slope;
} }
} }
int DspHelper::DownsampleTo4kHz(const int16_t* input, int input_length, int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
int output_length, int input_rate_hz, int output_length, int input_rate_hz,
bool compensate_delay, int16_t* output) { bool compensate_delay, int16_t* output) {
// Set filter parameters depending on input frequency. // Set filter parameters depending on input frequency.
@ -343,10 +344,10 @@ int DspHelper::DownsampleTo4kHz(const int16_t* input, int input_length,
} }
// Returns -1 if input signal is too short; 0 otherwise. // Returns -1 if input signal is too short; 0 otherwise.
return WebRtcSpl_DownsampleFast(&input[filter_length - 1], return WebRtcSpl_DownsampleFast(
input_length - (filter_length - 1), output, &input[filter_length - 1], static_cast<int>(input_length) -
output_length, filter_coefficients, (filter_length - 1), output, output_length, filter_coefficients,
filter_length, factor, filter_delay); filter_length, factor, filter_delay);
} }
} // namespace webrtc } // namespace webrtc

View File

@ -103,25 +103,25 @@ class DspHelper {
// is decreased by |factor_decrement| (Q14) for each sample. The gain for // is decreased by |factor_decrement| (Q14) for each sample. The gain for
// |input2| is the complement 16384 - mix_factor. // |input2| is the complement 16384 - mix_factor.
static void CrossFade(const int16_t* input1, const int16_t* input2, static void CrossFade(const int16_t* input1, const int16_t* input2,
int length, int16_t* mix_factor, size_t length, int16_t* mix_factor,
int16_t factor_decrement, int16_t* output); int16_t factor_decrement, int16_t* output);
// Scales |input| with an increasing gain. Applies |factor| (Q14) to the first // Scales |input| with an increasing gain. Applies |factor| (Q14) to the first
// sample and increases the gain by |increment| (Q20) for each sample. The // sample and increases the gain by |increment| (Q20) for each sample. The
// result is written to |output|. |length| samples are processed. // result is written to |output|. |length| samples are processed.
static void UnmuteSignal(const int16_t* input, int length, int16_t* factor, static void UnmuteSignal(const int16_t* input, size_t length, int16_t* factor,
int16_t increment, int16_t* output); int16_t increment, int16_t* output);
// Starts at unity gain and gradually fades out |signal|. For each sample, // Starts at unity gain and gradually fades out |signal|. For each sample,
// the gain is reduced by |mute_slope| (Q14). |length| samples are processed. // the gain is reduced by |mute_slope| (Q14). |length| samples are processed.
static void MuteSignal(int16_t* signal, int16_t mute_slope, int length); static void MuteSignal(int16_t* signal, int16_t mute_slope, size_t length);
// Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input // Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input
// has |input_length| samples, and the method will write |output_length| // has |input_length| samples, and the method will write |output_length|
// samples to |output|. Compensates for the phase delay of the downsampling // samples to |output|. Compensates for the phase delay of the downsampling
// filters if |compensate_delay| is true. Returns -1 if the input is too short // filters if |compensate_delay| is true. Returns -1 if the input is too short
// to produce |output_length| samples, otherwise 0. // to produce |output_length| samples, otherwise 0.
static int DownsampleTo4kHz(const int16_t* input, int input_length, static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
int output_length, int input_rate_hz, int output_length, int input_rate_hz,
bool compensate_delay, int16_t* output); bool compensate_delay, int16_t* output);

View File

@ -98,7 +98,7 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound( WebRtcSpl_ScaleAndAddVectorsWithRound(
&parameters.expand_vector0[expansion_vector_position], 3, &parameters.expand_vector0[expansion_vector_position], 3,
&parameters.expand_vector1[expansion_vector_position], 1, 2, &parameters.expand_vector1[expansion_vector_position], 1, 2,
voiced_vector_storage, temp_length); voiced_vector_storage, static_cast<int>(temp_length));
} else if (current_lag_index_ == 2) { } else if (current_lag_index_ == 2) {
// Mix 1/2 of expand_vector0 with 1/2 of expand_vector1. // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
assert(expansion_vector_position + temp_length <= assert(expansion_vector_position + temp_length <=
@ -108,7 +108,7 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound( WebRtcSpl_ScaleAndAddVectorsWithRound(
&parameters.expand_vector0[expansion_vector_position], 1, &parameters.expand_vector0[expansion_vector_position], 1,
&parameters.expand_vector1[expansion_vector_position], 1, 1, &parameters.expand_vector1[expansion_vector_position], 1, 1,
voiced_vector_storage, temp_length); voiced_vector_storage, static_cast<int>(temp_length));
} }
// Get tapering window parameters. Values are in Q15. // Get tapering window parameters. Values are in Q15.
@ -174,10 +174,11 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
} }
WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector, WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
parameters.ar_gain, add_constant, parameters.ar_gain, add_constant,
parameters.ar_gain_scale, current_lag); parameters.ar_gain_scale,
static_cast<int>(current_lag));
WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector, WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
parameters.ar_filter, parameters.ar_filter, kUnvoicedLpcOrder + 1,
kUnvoicedLpcOrder + 1, current_lag); static_cast<int>(current_lag));
memcpy(parameters.ar_filter_state, memcpy(parameters.ar_filter_state,
&(unvoiced_vector[current_lag - kUnvoicedLpcOrder]), &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
sizeof(int16_t) * kUnvoicedLpcOrder); sizeof(int16_t) * kUnvoicedLpcOrder);
@ -215,7 +216,7 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound( WebRtcSpl_ScaleAndAddVectorsWithRound(
voiced_vector + temp_lenght, parameters.current_voice_mix_factor, voiced_vector + temp_lenght, parameters.current_voice_mix_factor,
unvoiced_vector + temp_lenght, temp_scale, 14, unvoiced_vector + temp_lenght, temp_scale, 14,
temp_data + temp_lenght, current_lag - temp_lenght); temp_data + temp_lenght, static_cast<int>(current_lag - temp_lenght));
} }
// Select muting slope depending on how many consecutive expands we have // Select muting slope depending on how many consecutive expands we have
@ -238,7 +239,7 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
// Mute to the previous level, then continue with the muting. // Mute to the previous level, then continue with the muting.
WebRtcSpl_AffineTransformVector(temp_data, temp_data, WebRtcSpl_AffineTransformVector(temp_data, temp_data,
parameters.mute_factor, 8192, parameters.mute_factor, 8192,
14, current_lag); 14, static_cast<int>(current_lag));
if (!stop_muting_) { if (!stop_muting_) {
DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag); DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
@ -246,8 +247,8 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
// Shift by 6 to go from Q20 to Q14. // Shift by 6 to go from Q20 to Q14.
// TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong. // TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
// Legacy. // Legacy.
int16_t gain = 16384 - int16_t gain = static_cast<int16_t>(16384 -
(((current_lag * parameters.mute_slope) + 8192) >> 6); (((current_lag * parameters.mute_slope) + 8192) >> 6));
gain = ((gain * parameters.mute_factor) + 8192) >> 14; gain = ((gain * parameters.mute_factor) + 8192) >> 14;
// Guard against getting stuck with very small (but sometimes audible) // Guard against getting stuck with very small (but sometimes audible)
@ -278,12 +279,13 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
WebRtcSpl_AffineTransformVector( WebRtcSpl_AffineTransformVector(
scaled_random_vector, random_vector, scaled_random_vector, random_vector,
background_noise_->Scale(channel_ix), add_constant, background_noise_->Scale(channel_ix), add_constant,
background_noise_->ScaleShift(channel_ix), current_lag); background_noise_->ScaleShift(channel_ix),
static_cast<int>(current_lag));
WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_vector, WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_vector,
background_noise_->Filter(channel_ix), background_noise_->Filter(channel_ix),
kNoiseLpcOrder + 1, kNoiseLpcOrder + 1,
current_lag); static_cast<int>(current_lag));
background_noise_->SetFilterState( background_noise_->SetFilterState(
channel_ix, channel_ix,
@ -318,15 +320,16 @@ int Expand::Process(AudioMultiVector<int16_t>* output) {
if (!stop_muting_ && bgn_mode != BackgroundNoise::kBgnOff && if (!stop_muting_ && bgn_mode != BackgroundNoise::kBgnOff &&
!(bgn_mode == BackgroundNoise::kBgnFade && !(bgn_mode == BackgroundNoise::kBgnFade &&
consecutive_expands_ >= kMaxConsecutiveExpands)) { consecutive_expands_ >= kMaxConsecutiveExpands)) {
DspHelper::UnmuteSignal(noise_vector, current_lag, &bgn_mute_factor, DspHelper::UnmuteSignal(noise_vector, static_cast<int>(current_lag),
parameters.mute_slope, noise_vector); &bgn_mute_factor, parameters.mute_slope,
noise_vector);
} else { } else {
// kBgnOn and stop muting, or // kBgnOn and stop muting, or
// kBgnOff (mute factor is always 0), or // kBgnOff (mute factor is always 0), or
// kBgnFade has reached 0. // kBgnFade has reached 0.
WebRtcSpl_AffineTransformVector(noise_vector, noise_vector, WebRtcSpl_AffineTransformVector(noise_vector, noise_vector,
bgn_mute_factor, 8192, 14, bgn_mute_factor, 8192, 14,
current_lag); static_cast<int>(current_lag));
} }
} }
// Update mute_factor in BackgroundNoise class. // Update mute_factor in BackgroundNoise class.
@ -540,7 +543,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Extract the two vectors expand_vector0 and expand_vector1 from // Extract the two vectors expand_vector0 and expand_vector1 from
// |audio_history|. // |audio_history|.
int16_t expansion_length = max_lag_ + overlap_length_; int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_);
const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
const int16_t* vector2 = vector1 - distortion_lag; const int16_t* vector2 = vector1 - distortion_lag;
// Normalize the second vector to the same energy as the first. // Normalize the second vector to the same energy as the first.
@ -781,7 +784,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
} }
} }
int16_t Expand::Correlation(const int16_t* input, int16_t input_length, int16_t Expand::Correlation(const int16_t* input, size_t input_length,
int16_t* output, int16_t* output_scale) const { int16_t* output, int16_t* output_scale) const {
// Set parameters depending on sample rate. // Set parameters depending on sample rate.
const int16_t* filter_coefficients; const int16_t* filter_coefficients;

View File

@ -130,7 +130,7 @@ class Expand {
// samples. The correlation is calculated from a downsampled version of // samples. The correlation is calculated from a downsampled version of
// |input|, and is written to |output|. The scale factor is written to // |input|, and is written to |output|. The scale factor is written to
// |output_scale|. Returns the length of the correlation vector. // |output_scale|. Returns the length of the correlation vector.
int16_t Correlation(const int16_t* input, int16_t input_length, int16_t Correlation(const int16_t* input, size_t input_length,
int16_t* output, int16_t* output_scale) const; int16_t* output, int16_t* output_scale) const;
void UpdateLagIndex(); void UpdateLagIndex();

View File

@ -23,7 +23,7 @@
namespace webrtc { namespace webrtc {
int Merge::Process(int16_t* input, int input_length, int Merge::Process(int16_t* input, size_t input_length,
int16_t* external_mute_factor_array, int16_t* external_mute_factor_array,
AudioMultiVector<int16_t>* output) { AudioMultiVector<int16_t>* output) {
// TODO(hlundin): Change to an enumerator and skip assert. // TODO(hlundin): Change to an enumerator and skip assert.
@ -49,10 +49,9 @@ int Merge::Process(int16_t* input, int input_length,
int16_t* input_channel = &input_vector[channel][0]; int16_t* input_channel = &input_vector[channel][0];
int16_t* expanded_channel = &expanded_[channel][0]; int16_t* expanded_channel = &expanded_[channel][0];
int16_t expanded_max, input_max; int16_t expanded_max, input_max;
int16_t new_mute_factor = SignalScaling(input_channel, int16_t new_mute_factor = SignalScaling(
input_length_per_channel, input_channel, static_cast<int>(input_length_per_channel),
expanded_channel, &expanded_max, expanded_channel, &expanded_max, &input_max);
&input_max);
// Adjust muting factor (product of "main" muting factor and expand muting // Adjust muting factor (product of "main" muting factor and expand muting
// factor). // factor).
@ -70,15 +69,13 @@ int Merge::Process(int16_t* input, int input_length,
// Downsample, correlate, and find strongest correlation period for the // Downsample, correlate, and find strongest correlation period for the
// master (i.e., first) channel only. // master (i.e., first) channel only.
// Downsample to 4kHz sample rate. // Downsample to 4kHz sample rate.
Downsample(input_channel, input_length_per_channel, expanded_channel, Downsample(input_channel, static_cast<int>(input_length_per_channel),
expanded_length); expanded_channel, expanded_length);
// Calculate the lag of the strongest correlation period. // Calculate the lag of the strongest correlation period.
best_correlation_index = CorrelateAndPeakSearch(expanded_max, best_correlation_index = CorrelateAndPeakSearch(
input_max, expanded_max, input_max, old_length,
old_length, static_cast<int>(input_length_per_channel), expand_period);
input_length_per_channel,
expand_period);
} }
static const int kTempDataSize = 3600; static const int kTempDataSize = 3600;
@ -139,12 +136,12 @@ int Merge::Process(int16_t* input, int input_length,
// Return new added length. |old_length| samples were borrowed from // Return new added length. |old_length| samples were borrowed from
// |sync_buffer_|. // |sync_buffer_|.
return output_length - old_length; return static_cast<int>(output_length) - old_length;
} }
int Merge::GetExpandedSignal(int* old_length, int* expand_period) { int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
// Check how much data that is left since earlier. // Check how much data that is left since earlier.
*old_length = sync_buffer_->FutureLength(); *old_length = static_cast<int>(sync_buffer_->FutureLength());
// Should never be less than overlap_length. // Should never be less than overlap_length.
assert(*old_length >= static_cast<int>(expand_->overlap_length())); assert(*old_length >= static_cast<int>(expand_->overlap_length()));
// Generate data to merge the overlap with using expand. // Generate data to merge the overlap with using expand.
@ -167,7 +164,8 @@ int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
AudioMultiVector<int16_t> expanded_temp(num_channels_); AudioMultiVector<int16_t> expanded_temp(num_channels_);
expand_->Process(&expanded_temp); expand_->Process(&expanded_temp);
*expand_period = expanded_temp.Size(); // Samples per channel. *expand_period = static_cast<int>(expanded_temp.Size()); // Samples per
// channel.
expanded_.Clear(); expanded_.Clear();
// Copy what is left since earlier into the expanded vector. // Copy what is left since earlier into the expanded vector.
@ -325,7 +323,8 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
// (1) w16_bestIndex + input_length < // (1) w16_bestIndex + input_length <
// timestamps_per_call_ + expand_->overlap_length(); // timestamps_per_call_ + expand_->overlap_length();
// (2) w16_bestIndex + input_length < start_position. // (2) w16_bestIndex + input_length < start_position.
int start_index = timestamps_per_call_ + expand_->overlap_length(); int start_index = timestamps_per_call_ +
static_cast<int>(expand_->overlap_length());
start_index = std::max(start_position, start_index); start_index = std::max(start_position, start_index);
start_index = std::max(start_index - input_length, 0); start_index = std::max(start_index - input_length, 0);
// Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.) // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
@ -333,7 +332,7 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
// Calculate a modified |stop_position_downsamp| to account for the increased // Calculate a modified |stop_position_downsamp| to account for the increased
// start index |start_index_downsamp| and the effective array length. // start index |start_index_downsamp| and the effective array length.
int16_t modified_stop_pos = int modified_stop_pos =
std::min(stop_position_downsamp, std::min(stop_position_downsamp,
kMaxCorrelationLength + kPadLength - start_index_downsamp); kMaxCorrelationLength + kPadLength - start_index_downsamp);
int best_correlation_index; int best_correlation_index;

View File

@ -51,7 +51,7 @@ class Merge {
// de-interleaving |input|. The values in |external_mute_factor_array| (Q14) // de-interleaving |input|. The values in |external_mute_factor_array| (Q14)
// will be used to scale the audio, and is updated in the process. The array // will be used to scale the audio, and is updated in the process. The array
// must have |num_channels_| elements. // must have |num_channels_| elements.
int Process(int16_t* input, int input_length, int Process(int16_t* input, size_t input_length,
int16_t* external_mute_factor_array, int16_t* external_mute_factor_array,
AudioMultiVector<int16_t>* output); AudioMultiVector<int16_t>* output);

View File

@ -112,10 +112,6 @@
'time_stretch.cc', 'time_stretch.cc',
'time_stretch.h', 'time_stretch.h',
], ],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
}, },
], # targets ], # targets
'conditions': [ 'conditions': [
@ -156,10 +152,6 @@
], ],
}], }],
], ],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
}, # audio_decoder_unittests }, # audio_decoder_unittests
{ {
@ -186,10 +178,6 @@
'tools/rtp_generator.cc', 'tools/rtp_generator.cc',
'tools/rtp_generator.h', 'tools/rtp_generator.h',
], ],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
}, # neteq_unittest_tools }, # neteq_unittest_tools
], # targets ], # targets
'conditions': [ 'conditions': [

View File

@ -286,7 +286,7 @@ int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
assert(decoder_database_.get()); assert(decoder_database_.get());
const int total_samples_in_buffers = packet_buffer_->NumSamplesInBuffer( const int total_samples_in_buffers = packet_buffer_->NumSamplesInBuffer(
decoder_database_.get(), decoder_frame_length_) + decoder_database_.get(), decoder_frame_length_) +
sync_buffer_->FutureLength(); static_cast<int>(sync_buffer_->FutureLength());
assert(delay_manager_.get()); assert(delay_manager_.get());
assert(decision_logic_.get()); assert(decision_logic_.get());
stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers, stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers,
@ -716,17 +716,19 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
sync_buffer_->PushBack(*algorithm_buffer_); sync_buffer_->PushBack(*algorithm_buffer_);
// Extract data from |sync_buffer_| to |output|. // Extract data from |sync_buffer_| to |output|.
int num_output_samples_per_channel = output_size_samples_; size_t num_output_samples_per_channel = output_size_samples_;
int num_output_samples = output_size_samples_ * sync_buffer_->Channels(); size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels();
if (num_output_samples > static_cast<int>(max_length)) { if (num_output_samples > max_length) {
LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " << LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " <<
output_size_samples_ << " * " << sync_buffer_->Channels(); output_size_samples_ << " * " << sync_buffer_->Channels();
num_output_samples = max_length; num_output_samples = max_length;
num_output_samples_per_channel = max_length / sync_buffer_->Channels(); num_output_samples_per_channel = static_cast<int>(
max_length / sync_buffer_->Channels());
} }
int samples_from_sync = sync_buffer_->GetNextAudioInterleaved( int samples_from_sync = static_cast<int>(
num_output_samples_per_channel, output); sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
*num_channels = sync_buffer_->Channels(); output));
*num_channels = static_cast<int>(sync_buffer_->Channels());
NETEQ_LOG_VERBOSE << "Sync buffer (" << *num_channels << " channel(s)):" << NETEQ_LOG_VERBOSE << "Sync buffer (" << *num_channels << " channel(s)):" <<
" insert " << algorithm_buffer_->Size() << " samples, extract " << " insert " << algorithm_buffer_->Size() << " samples, extract " <<
samples_from_sync << " samples"; samples_from_sync << " samples";
@ -768,7 +770,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
// |playout_timestamp_| from the |sync_buffer_|. However, do not update the // |playout_timestamp_| from the |sync_buffer_|. However, do not update the
// |playout_timestamp_| if it would be moved "backwards". // |playout_timestamp_| if it would be moved "backwards".
uint32_t temp_timestamp = sync_buffer_->end_timestamp() - uint32_t temp_timestamp = sync_buffer_->end_timestamp() -
sync_buffer_->FutureLength(); static_cast<uint32_t>(sync_buffer_->FutureLength());
if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) { if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) {
playout_timestamp_ = temp_timestamp; playout_timestamp_ = temp_timestamp;
} }
@ -821,8 +823,8 @@ int NetEqImpl::GetDecision(Operations* operation,
} }
assert(expand_.get()); assert(expand_.get());
const int samples_left = sync_buffer_->FutureLength() - const int samples_left = static_cast<int>(sync_buffer_->FutureLength() -
expand_->overlap_length(); expand_->overlap_length());
if (last_mode_ == kModeAccelerateSuccess || if (last_mode_ == kModeAccelerateSuccess ||
last_mode_ == kModeAccelerateLowEnergy || last_mode_ == kModeAccelerateLowEnergy ||
last_mode_ == kModePreemptiveExpandSuccess || last_mode_ == kModePreemptiveExpandSuccess ||
@ -1126,8 +1128,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
// Increase with number of samples per channel. // Increase with number of samples per channel.
assert(*decoded_length == 0 || assert(*decoded_length == 0 ||
(decoder && decoder->channels() == sync_buffer_->Channels())); (decoder && decoder->channels() == sync_buffer_->Channels()));
sync_buffer_->IncreaseEndTimestamp(*decoded_length / sync_buffer_->IncreaseEndTimestamp(
sync_buffer_->Channels()); *decoded_length / static_cast<int>(sync_buffer_->Channels()));
} }
return return_value; return return_value;
} }
@ -1180,7 +1182,8 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
if (decode_length > 0) { if (decode_length > 0) {
*decoded_length += decode_length; *decoded_length += decode_length;
// Update |decoder_frame_length_| with number of samples per channel. // Update |decoder_frame_length_| with number of samples per channel.
decoder_frame_length_ = decode_length / decoder->channels(); decoder_frame_length_ = decode_length /
static_cast<int>(decoder->channels());
NETEQ_LOG_VERBOSE << "Decoded " << decode_length << " samples (" << NETEQ_LOG_VERBOSE << "Decoded " << decode_length << " samples (" <<
decoder->channels() << " channel(s) -> " << decoder_frame_length_ << decoder->channels() << " channel(s) -> " << decoder_frame_length_ <<
" samples per channel)"; " samples per channel)";
@ -1246,10 +1249,10 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
// Update in-call and post-call statistics. // Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) { if (expand_->MuteFactor(0) == 0) {
// Expand generates only noise. // Expand generates only noise.
stats_.ExpandedNoiseSamples(new_length - decoded_length); stats_.ExpandedNoiseSamples(new_length - static_cast<int>(decoded_length));
} else { } else {
// Expansion generates more than only noise. // Expansion generates more than only noise.
stats_.ExpandedVoiceSamples(new_length - decoded_length); stats_.ExpandedVoiceSamples(new_length - static_cast<int>(decoded_length));
} }
last_mode_ = kModeMerge; last_mode_ = kModeMerge;
@ -1268,7 +1271,7 @@ int NetEqImpl::DoExpand(bool play_dtmf) {
static_cast<size_t>(output_size_samples_)) { static_cast<size_t>(output_size_samples_)) {
algorithm_buffer_->Clear(); algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer_.get()); int return_value = expand_->Process(algorithm_buffer_.get());
int length = algorithm_buffer_->Size(); int length = static_cast<int>(algorithm_buffer_->Size());
// Update in-call and post-call statistics. // Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) { if (expand_->MuteFactor(0) == 0) {
@ -1298,13 +1301,13 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, AudioDecoder::SpeechType speech_type,
bool play_dtmf) { bool play_dtmf) {
const size_t required_samples = 240 * fs_mult_; // Must have 30 ms. const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
int borrowed_samples_per_channel = 0; size_t borrowed_samples_per_channel = 0;
size_t num_channels = algorithm_buffer_->Channels(); size_t num_channels = algorithm_buffer_->Channels();
size_t decoded_length_per_channel = decoded_length / num_channels; size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) { if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms. // Must move data from the |sync_buffer_| in order to get 30 ms.
borrowed_samples_per_channel = required_samples - borrowed_samples_per_channel = static_cast<int>(required_samples -
decoded_length_per_channel; decoded_length_per_channel);
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer, decoded_buffer,
sizeof(int16_t) * decoded_length); sizeof(int16_t) * decoded_length);
@ -1336,7 +1339,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
if (borrowed_samples_per_channel > 0) { if (borrowed_samples_per_channel > 0) {
// Copy borrowed samples back to the |sync_buffer_|. // Copy borrowed samples back to the |sync_buffer_|.
int length = algorithm_buffer_->Size(); size_t length = algorithm_buffer_->Size();
if (length < borrowed_samples_per_channel) { if (length < borrowed_samples_per_channel) {
// This destroys the beginning of the buffer, but will not cause any // This destroys the beginning of the buffer, but will not cause any
// problems. // problems.
@ -1377,11 +1380,11 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
size_t decoded_length_per_channel = decoded_length / num_channels; size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) { if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms. // Must move data from the |sync_buffer_| in order to get 30 ms.
borrowed_samples_per_channel = required_samples - borrowed_samples_per_channel = static_cast<int>(required_samples -
decoded_length_per_channel; decoded_length_per_channel);
// Calculate how many of these were already played out. // Calculate how many of these were already played out.
old_borrowed_samples_per_channel = borrowed_samples_per_channel - old_borrowed_samples_per_channel = static_cast<int>(
sync_buffer_->FutureLength(); borrowed_samples_per_channel - sync_buffer_->FutureLength());
old_borrowed_samples_per_channel = std::max( old_borrowed_samples_per_channel = std::max(
0, old_borrowed_samples_per_channel); 0, old_borrowed_samples_per_channel);
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
@ -1394,7 +1397,8 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
int16_t samples_added; int16_t samples_added;
PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process( PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
decoded_buffer, decoded_length, old_borrowed_samples_per_channel, decoded_buffer, static_cast<int>(decoded_length),
old_borrowed_samples_per_channel,
algorithm_buffer_.get(), &samples_added); algorithm_buffer_.get(), &samples_added);
stats_.PreemptiveExpandedSamples(samples_added); stats_.PreemptiveExpandedSamples(samples_added);
switch (return_code) { switch (return_code) {
@ -1621,7 +1625,7 @@ int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
out_index = std::min( out_index = std::min(
sync_buffer_->dtmf_index() - sync_buffer_->next_index(), sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
static_cast<size_t>(output_size_samples_)); static_cast<size_t>(output_size_samples_));
overdub_length = output_size_samples_ - out_index; overdub_length = output_size_samples_ - static_cast<int>(out_index);
} }
AudioMultiVector<int16_t> dtmf_output(num_channels); AudioMultiVector<int16_t> dtmf_output(num_channels);

View File

@ -32,7 +32,7 @@ int Normal::Process(const int16_t* input,
if (length == 0) { if (length == 0) {
// Nothing to process. // Nothing to process.
output->Clear(); output->Clear();
return length; return static_cast<int>(length);
} }
assert(output->Empty()); assert(output->Empty());
@ -68,11 +68,11 @@ int Normal::Process(const int16_t* input,
int16_t* signal = &(*output)[channel_ix][0]; int16_t* signal = &(*output)[channel_ix][0];
size_t length_per_channel = length / output->Channels(); size_t length_per_channel = length / output->Channels();
// Find largest absolute value in new data. // Find largest absolute value in new data.
int16_t decoded_max = WebRtcSpl_MaxAbsValueW16(signal, int16_t decoded_max = WebRtcSpl_MaxAbsValueW16(
length_per_channel); signal, static_cast<int>(length_per_channel));
// Adjust muting factor if needed (to BGN level). // Adjust muting factor if needed (to BGN level).
int energy_length = std::min(static_cast<size_t>(fs_mult * 64), int energy_length = std::min(static_cast<int>(fs_mult * 64),
length_per_channel); static_cast<int>(length_per_channel));
int scaling = 6 + fs_shift int scaling = 6 + fs_shift
- WebRtcSpl_NormW32(decoded_max * decoded_max); - WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0. scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
@ -184,7 +184,7 @@ int Normal::Process(const int16_t* input,
} }
} }
return length; return static_cast<int>(length);
} }
} // namespace webrtc } // namespace webrtc

View File

@ -275,14 +275,14 @@ void PacketBuffer::DeleteAllPackets(PacketList* packet_list) {
} }
} }
void PacketBuffer::BufferStat(int* num_packest, void PacketBuffer::BufferStat(int* num_packets,
int* max_num_packets, int* max_num_packets,
int* current_memory_bytes, int* current_memory_bytes,
int* max_memory_bytes) const { int* max_memory_bytes) const {
*num_packest = buffer_.size(); *num_packets = static_cast<int>(buffer_.size());
*max_num_packets = max_number_of_packets_; *max_num_packets = static_cast<int>(max_number_of_packets_);
*current_memory_bytes = current_memory_bytes_; *current_memory_bytes = current_memory_bytes_;
*max_memory_bytes = max_memory_bytes_; *max_memory_bytes = static_cast<int>(max_memory_bytes_);
} }
} // namespace webrtc } // namespace webrtc

View File

@ -116,7 +116,7 @@ class PacketBuffer {
// The default value for |inc| is 1. // The default value for |inc| is 1.
virtual void IncrementWaitingTimes(int inc = 1); virtual void IncrementWaitingTimes(int inc = 1);
virtual void BufferStat(int* num_packest, virtual void BufferStat(int* num_packets,
int* max_num_packets, int* max_num_packets,
int* current_memory_bytes, int* current_memory_bytes,
int* max_memory_bytes) const; int* max_memory_bytes) const;

View File

@ -44,7 +44,7 @@ void PostDecodeVad::Init() {
} }
} }
void PostDecodeVad::Update(int16_t* signal, size_t length, void PostDecodeVad::Update(int16_t* signal, int length,
AudioDecoder::SpeechType speech_type, AudioDecoder::SpeechType speech_type,
bool sid_frame, bool sid_frame,
int fs_hz) { int fs_hz) {
@ -67,16 +67,16 @@ void PostDecodeVad::Update(int16_t* signal, size_t length,
} }
if (length > 0 && running_) { if (length > 0 && running_) {
size_t vad_sample_index = 0; int vad_sample_index = 0;
active_speech_ = false; active_speech_ = false;
// Loop through frame sizes 30, 20, and 10 ms. // Loop through frame sizes 30, 20, and 10 ms.
for (size_t vad_frame_size_ms = 30; vad_frame_size_ms >= 10; for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
vad_frame_size_ms -= 10) { vad_frame_size_ms -= 10) {
size_t vad_frame_size_samples = vad_frame_size_ms * fs_hz / 1000; int vad_frame_size_samples = vad_frame_size_ms * fs_hz / 1000;
while (length - vad_sample_index >= vad_frame_size_samples) { while (length - vad_sample_index >= vad_frame_size_samples) {
int vad_return = WebRtcVad_Process(vad_instance_, fs_hz, int vad_return = WebRtcVad_Process(
&signal[vad_sample_index], vad_instance_, fs_hz, &signal[vad_sample_index],
vad_frame_size_samples); vad_frame_size_samples);
active_speech_ |= (vad_return == 1); active_speech_ |= (vad_return == 1);
vad_sample_index += vad_frame_size_samples; vad_sample_index += vad_frame_size_samples;
} }

View File

@ -46,7 +46,7 @@ class PostDecodeVad {
// Updates post-decode VAD with the audio data in |signal| having |length| // Updates post-decode VAD with the audio data in |signal| having |length|
// samples. The data is of type |speech_type|, at the sample rate |fs_hz|. // samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
void Update(int16_t* signal, size_t length, void Update(int16_t* signal, int length,
AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz); AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
// Accessors. // Accessors.

View File

@ -38,7 +38,7 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
length_change_samples); length_change_samples);
} }
void PreemptiveExpand::SetParametersForPassiveSpeech(int len, void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation, int16_t* best_correlation,
int* peak_index) const { int* peak_index) const {
// When the signal does not contain any active speech, the correlation does // When the signal does not contain any active speech, the correlation does
@ -49,11 +49,12 @@ void PreemptiveExpand::SetParametersForPassiveSpeech(int len,
// but we must ensure that best_correlation is not larger than the length of // but we must ensure that best_correlation is not larger than the length of
// the new data. // the new data.
// but we must ensure that best_correlation is not larger than the new data. // but we must ensure that best_correlation is not larger than the new data.
*peak_index = std::min(*peak_index, len - old_data_length_per_channel_); *peak_index = std::min(*peak_index,
static_cast<int>(len - old_data_length_per_channel_));
} }
PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch( PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
const int16_t *input, int input_length, size_t peak_index, const int16_t *input, size_t input_length, size_t peak_index,
int16_t best_correlation, bool active_speech, int16_t best_correlation, bool active_speech,
AudioMultiVector<int16_t>* output) const { AudioMultiVector<int16_t>* output) const {
// Pre-calculate common multiplication with |fs_mult_|. // Pre-calculate common multiplication with |fs_mult_|.

View File

@ -45,21 +45,21 @@ class PreemptiveExpand : public TimeStretch {
// the outcome of the operation as an enumerator value. // the outcome of the operation as an enumerator value.
ReturnCodes Process(const int16_t *pw16_decoded, ReturnCodes Process(const int16_t *pw16_decoded,
int len, int len,
int oldDataLen, int old_data_len,
AudioMultiVector<int16_t>* output, AudioMultiVector<int16_t>* output,
int16_t* length_change_samples); int16_t* length_change_samples);
protected: protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable // Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech. // values when the signal contains no active speech.
virtual void SetParametersForPassiveSpeech(int len, virtual void SetParametersForPassiveSpeech(size_t len,
int16_t* w16_bestCorr, int16_t* w16_bestCorr,
int* w16_bestIndex) const; int* w16_bestIndex) const;
// Checks the criteria for performing the time-stretching operation and, // Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching. // if possible, performs the time-stretching.
virtual ReturnCodes CheckCriteriaAndStretch( virtual ReturnCodes CheckCriteriaAndStretch(
const int16_t *pw16_decoded, int len, size_t w16_bestIndex, const int16_t *pw16_decoded, size_t len, size_t w16_bestIndex,
int16_t w16_bestCorr, bool w16_VAD, int16_t w16_bestCorr, bool w16_VAD,
AudioMultiVector<int16_t>* output) const; AudioMultiVector<int16_t>* output) const;

View File

@ -49,7 +49,8 @@ TimeStretch::ReturnCodes TimeStretch::Process(
} }
// Find maximum absolute value of input signal. // Find maximum absolute value of input signal.
max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len); max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal,
static_cast<int>(signal_len));
// Downsample to 4 kHz sample rate and calculate auto-correlation. // Downsample to 4 kHz sample rate and calculate auto-correlation.
DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen, DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
@ -139,10 +140,8 @@ TimeStretch::ReturnCodes TimeStretch::Process(
// Check accelerate criteria and stretch the signal. // Check accelerate criteria and stretch the signal.
ReturnCodes return_value = CheckCriteriaAndStretch(input, input_len, ReturnCodes return_value = CheckCriteriaAndStretch(
peak_index, input, input_len, peak_index, best_correlation, active_speech, output);
best_correlation,
active_speech, output);
switch (return_value) { switch (return_value) {
case kSuccess: case kSuccess:
*length_change_samples = peak_index; *length_change_samples = peak_index;

View File

@ -39,7 +39,7 @@ class TimeStretch {
const BackgroundNoise& background_noise) const BackgroundNoise& background_noise)
: sample_rate_hz_(sample_rate_hz), : sample_rate_hz_(sample_rate_hz),
fs_mult_(sample_rate_hz / 8000), fs_mult_(sample_rate_hz / 8000),
num_channels_(num_channels), num_channels_(static_cast<int>(num_channels)),
master_channel_(0), // First channel is master. master_channel_(0), // First channel is master.
background_noise_(background_noise), background_noise_(background_noise),
max_input_value_(0) { max_input_value_(0) {
@ -65,7 +65,7 @@ class TimeStretch {
// Sets the parameters |best_correlation| and |peak_index| to suitable // Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech. This method must be // values when the signal contains no active speech. This method must be
// implemented by the sub-classes. // implemented by the sub-classes.
virtual void SetParametersForPassiveSpeech(int input_length, virtual void SetParametersForPassiveSpeech(size_t input_length,
int16_t* best_correlation, int16_t* best_correlation,
int* peak_index) const = 0; int* peak_index) const = 0;
@ -73,7 +73,7 @@ class TimeStretch {
// if possible, performs the time-stretching. This method must be implemented // if possible, performs the time-stretching. This method must be implemented
// by the sub-classes. // by the sub-classes.
virtual ReturnCodes CheckCriteriaAndStretch( virtual ReturnCodes CheckCriteriaAndStretch(
const int16_t* input, int input_length, size_t peak_index, const int16_t* input, size_t input_length, size_t peak_index,
int16_t best_correlation, bool active_speech, int16_t best_correlation, bool active_speech,
AudioMultiVector<int16_t>* output) const = 0; AudioMultiVector<int16_t>* output) const = 0;

View File

@ -24,7 +24,7 @@ uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type,
} }
rtp_header->header.sequenceNumber = seq_number_++; rtp_header->header.sequenceNumber = seq_number_++;
rtp_header->header.timestamp = timestamp_; rtp_header->header.timestamp = timestamp_;
timestamp_ += payload_length_samples; timestamp_ += static_cast<uint32_t>(payload_length_samples);
rtp_header->header.payloadType = payload_type; rtp_header->header.payloadType = payload_type;
rtp_header->header.markerBit = false; rtp_header->header.markerBit = false;
rtp_header->header.ssrc = ssrc_; rtp_header->header.ssrc = ssrc_;