Update a ton of audio code to use size_t more correctly and in general reduce

use of int16_t/uint16_t.

This is the upshot of a recommendation by henrik.lundin and kwiberg on an original small change ( https://webrtc-codereview.appspot.com/42569004/#ps1 ) to stop using int16_t just because values could fit in it, and is similar in nature to a previous "mass change to use size_t more" ( https://webrtc-codereview.appspot.com/23129004/ ) which also needed to be split up for review but to land all at once, since, like adding "const", such changes tend to cause a lot of transitive effects.

This was be reviewed and approved in pieces:
https://codereview.webrtc.org/1224093003
https://codereview.webrtc.org/1224123002
https://codereview.webrtc.org/1224163002
https://codereview.webrtc.org/1225133003
https://codereview.webrtc.org/1225173002
https://codereview.webrtc.org/1227163003
https://codereview.webrtc.org/1227203003
https://codereview.webrtc.org/1227213002
https://codereview.webrtc.org/1227893002
https://codereview.webrtc.org/1228793004
https://codereview.webrtc.org/1228803003
https://codereview.webrtc.org/1228823002
https://codereview.webrtc.org/1228823003
https://codereview.webrtc.org/1228843002
https://codereview.webrtc.org/1230693002
https://codereview.webrtc.org/1231713002

The change is being landed as TBR to all the folks who reviewed the above.

BUG=chromium:81439
TEST=none
R=andrew@webrtc.org, pbos@webrtc.org
TBR=aluebs, andrew, asapersson, henrika, hlundin, jan.skoglund, kwiberg, minyue, pbos, pthatcher

Review URL: https://codereview.webrtc.org/1230503003 .

Cr-Commit-Position: refs/heads/master@{#9768}
This commit is contained in:
Peter Kasting
2015-08-24 14:52:23 -07:00
parent b594041ec8
commit dce40cf804
471 changed files with 3716 additions and 3499 deletions

View File

@ -56,7 +56,9 @@ int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
bool AudioDecoder::HasDecodePlc() const { return false; }
int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return 0; }
size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
return 0;
}
int AudioDecoder::IncomingPacket(const uint8_t* payload,
size_t payload_len,

View File

@ -62,7 +62,7 @@ class AudioDecoder {
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets.
virtual int DecodePlc(int num_frames, int16_t* decoded);
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Initializes the decoder.
virtual int Init() = 0;

View File

@ -90,11 +90,11 @@ class AudioEncoder {
// the encoder may vary the number of 10 ms frames from packet to packet, but
// it must decide the length of the next packet no later than when outputting
// the preceding packet.
virtual int Num10MsFramesInNextPacket() const = 0;
virtual size_t Num10MsFramesInNextPacket() const = 0;
// Returns the maximum value that can be returned by
// Num10MsFramesInNextPacket().
virtual int Max10MsFramesInAPacket() const = 0;
virtual size_t Max10MsFramesInAPacket() const = 0;
// Returns the current target bitrate in bits/s. The value -1 means that the
// codec adapts the target automatically, and a current target cannot be

View File

@ -74,11 +74,11 @@ class AudioEncoderMutableImpl : public P {
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->RtpTimestampRateHz();
}
int Num10MsFramesInNextPacket() const override {
size_t Num10MsFramesInNextPacket() const override {
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->Num10MsFramesInNextPacket();
}
int Max10MsFramesInAPacket() const override {
size_t Max10MsFramesInAPacket() const override {
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->Max10MsFramesInAPacket();
}

View File

@ -38,7 +38,8 @@ bool AudioEncoderCng::Config::IsOk() const {
return false;
if (num_channels != speech_encoder->NumChannels())
return false;
if (sid_frame_interval_ms < speech_encoder->Max10MsFramesInAPacket() * 10)
if (sid_frame_interval_ms <
static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
return false;
if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
num_cng_coefficients <= 0)
@ -89,11 +90,11 @@ size_t AudioEncoderCng::MaxEncodedBytes() const {
return std::max(max_encoded_bytes_active, max_encoded_bytes_passive);
}
int AudioEncoderCng::Num10MsFramesInNextPacket() const {
size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
int AudioEncoderCng::Max10MsFramesInAPacket() const {
size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
@ -124,11 +125,11 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
speech_buffer_.push_back(audio[i]);
}
const int frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
if (rtp_timestamps_.size() < static_cast<size_t>(frames_to_encode)) {
const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
if (rtp_timestamps_.size() < frames_to_encode) {
return EncodedInfo();
}
CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
<< " ms when using VAD/CNG.";
@ -136,12 +137,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
// following split sizes:
// 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
// 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
int blocks_in_first_vad_call =
size_t blocks_in_first_vad_call =
(frames_to_encode > 3 ? 3 : frames_to_encode);
if (frames_to_encode == 4)
blocks_in_first_vad_call = 2;
CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
const int blocks_in_second_vad_call =
const size_t blocks_in_second_vad_call =
frames_to_encode - blocks_in_first_vad_call;
// Check if all of the buffer is passive speech. Start with checking the first
@ -183,7 +184,7 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
}
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
int frames_to_encode,
size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
bool force_sid = last_frame_active_;
@ -191,15 +192,19 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
AudioEncoder::EncodedInfo info;
for (int i = 0; i < frames_to_encode; ++i) {
int16_t encoded_bytes_tmp = 0;
for (size_t i = 0; i < frames_to_encode; ++i) {
// It's important not to pass &info.encoded_bytes directly to
// WebRtcCng_Encode(), since later loop iterations may return zero in that
// value, in which case we don't want to overwrite any value from an earlier
// iteration.
size_t encoded_bytes_tmp = 0;
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
&speech_buffer_[i * samples_per_10ms_frame],
static_cast<int16_t>(samples_per_10ms_frame),
samples_per_10ms_frame,
encoded, &encoded_bytes_tmp, force_sid), 0);
if (encoded_bytes_tmp > 0) {
CHECK(!output_produced);
info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
info.encoded_bytes = encoded_bytes_tmp;
output_produced = true;
force_sid = false;
}
@ -212,12 +217,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
}
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
int frames_to_encode,
size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
for (int i = 0; i < frames_to_encode; ++i) {
for (size_t i = 0; i < frames_to_encode; ++i) {
info = speech_encoder_->Encode(
rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded);

View File

@ -59,14 +59,14 @@ class AudioEncoderCngTest : public ::testing::Test {
void CreateCng() {
// The config_ parameters may be changed by the TEST_Fs up until CreateCng()
// is called, thus we cannot use the values until now.
num_audio_samples_10ms_ = 10 * sample_rate_hz_ / 1000;
num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
EXPECT_CALL(mock_encoder_, SampleRateHz())
.WillRepeatedly(Return(sample_rate_hz_));
// Max10MsFramesInAPacket() is just used to verify that the SID frame period
// is not too small. The return value does not matter that much, as long as
// it is smaller than 10.
EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1));
EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1u));
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
.WillRepeatedly(Return(kMockMaxEncodedBytes));
cng_.reset(new AudioEncoderCng(config_));
@ -83,10 +83,10 @@ class AudioEncoderCngTest : public ::testing::Test {
// Expect |num_calls| calls to the encoder, all successful. The last call
// claims to have encoded |kMockMaxEncodedBytes| bytes, and all the preceding
// ones 0 bytes.
void ExpectEncodeCalls(int num_calls) {
void ExpectEncodeCalls(size_t num_calls) {
InSequence s;
AudioEncoder::EncodedInfo info;
for (int j = 0; j < num_calls - 1; ++j) {
for (size_t j = 0; j < num_calls - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
}
@ -98,7 +98,7 @@ class AudioEncoderCngTest : public ::testing::Test {
// Verifies that the cng_ object waits until it has collected
// |blocks_per_frame| blocks of audio, and then dispatches all of them to
// the underlying codec (speech or cng).
void CheckBlockGrouping(int blocks_per_frame, bool active_speech) {
void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
CreateCng();
@ -107,7 +107,7 @@ class AudioEncoderCngTest : public ::testing::Test {
// Don't expect any calls to the encoder yet.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
for (int i = 0; i < blocks_per_frame - 1; ++i) {
for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
}
@ -127,14 +127,15 @@ class AudioEncoderCngTest : public ::testing::Test {
void CheckVadInputSize(int input_frame_size_ms,
int expected_first_block_size_ms,
int expected_second_block_size_ms) {
const int blocks_per_frame = input_frame_size_ms / 10;
const size_t blocks_per_frame =
static_cast<size_t>(input_frame_size_ms / 10);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
// Expect nothing to happen before the last block is sent to cng_.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
for (int i = 0; i < blocks_per_frame - 1; ++i) {
for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
}
@ -163,7 +164,7 @@ class AudioEncoderCngTest : public ::testing::Test {
Vad::Activity second_type) {
// Set the speech encoder frame size to 60 ms, to ensure that the VAD will
// be called twice.
const int blocks_per_frame = 6;
const size_t blocks_per_frame = 6;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
InSequence s;
@ -175,7 +176,7 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillOnce(Return(second_type));
}
encoded_info_.payload_type = 0;
for (int i = 0; i < blocks_per_frame; ++i) {
for (size_t i = 0; i < blocks_per_frame; ++i) {
Encode();
}
return encoded_info_.payload_type != kCngPayloadType;
@ -199,8 +200,8 @@ TEST_F(AudioEncoderCngTest, CreateAndDestroy) {
TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
EXPECT_EQ(17, cng_->Num10MsFramesInNextPacket());
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCngTest, CheckChangeBitratePropagation) {
@ -217,7 +218,7 @@ TEST_F(AudioEncoderCngTest, CheckProjectedPacketLossRatePropagation) {
TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(1));
.WillRepeatedly(Return(1U));
CreateCng();
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
@ -249,7 +250,7 @@ TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksActiveSpeech) {
}
TEST_F(AudioEncoderCngTest, EncodePassive) {
const int kBlocksPerFrame = 3;
const size_t kBlocksPerFrame = 3;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(kBlocksPerFrame));
CreateCng();
@ -258,7 +259,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
// Expect no calls at all to the speech encoder mock.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
uint32_t expected_timestamp = timestamp_;
for (int i = 0; i < 100; ++i) {
for (size_t i = 0; i < 100; ++i) {
Encode();
// Check if it was time to call the cng encoder. This is done once every
// |kBlocksPerFrame| calls.
@ -339,7 +340,7 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
encoded_info_.payload_type = 0;
@ -352,7 +353,7 @@ TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(1));
.WillRepeatedly(Return(1U));
// Start with encoding noise.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.Times(2)
@ -443,7 +444,7 @@ TEST_F(AudioEncoderCngDeathTest, Stereo) {
TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(7));
.WillRepeatedly(Return(7U));
for (int i = 0; i < 6; ++i)
Encode();
EXPECT_DEATH(Encode(),

View File

@ -99,7 +99,7 @@ TEST_F(CngTest, CngInitFail) {
TEST_F(CngTest, CngEncode) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -151,7 +151,7 @@ TEST_F(CngTest, CngEncode) {
// Encode Cng with too long input vector.
TEST_F(CngTest, CngEncodeTooLong) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create and init encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -170,7 +170,7 @@ TEST_F(CngTest, CngEncodeTooLong) {
// Call encode without calling init.
TEST_F(CngTest, CngEncodeNoInit) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -187,7 +187,7 @@ TEST_F(CngTest, CngEncodeNoInit) {
// Update SID parameters, for both 9 and 16 parameters.
TEST_F(CngTest, CngUpdateSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -224,7 +224,7 @@ TEST_F(CngTest, CngUpdateSid) {
// Update SID parameters, with wrong parameters or without calling decode.
TEST_F(CngTest, CngUpdateSidErroneous) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -261,7 +261,7 @@ TEST_F(CngTest, CngUpdateSidErroneous) {
TEST_F(CngTest, CngGenerate) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t out_data[640];
int16_t number_bytes;
size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -294,7 +294,7 @@ TEST_F(CngTest, CngGenerate) {
// Test automatic SID.
TEST_F(CngTest, CngAutoSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@ -321,7 +321,7 @@ TEST_F(CngTest, CngAutoSid) {
// Test automatic SID, with very short interval.
TEST_F(CngTest, CngAutoSidShort) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t number_bytes;
size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));

View File

@ -50,8 +50,8 @@ class AudioEncoderCng final : public AudioEncoder {
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
int Num10MsFramesInNextPacket() const override;
int Max10MsFramesInAPacket() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void SetTargetBitrate(int bits_per_second) override;
void SetProjectedPacketLossRate(double fraction) override;
@ -67,10 +67,10 @@ class AudioEncoderCng final : public AudioEncoder {
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
};
EncodedInfo EncodePassive(int frames_to_encode,
EncodedInfo EncodePassive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
EncodedInfo EncodeActive(int frames_to_encode,
EncodedInfo EncodeActive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
size_t SamplesPer10msFrame() const;

View File

@ -104,8 +104,8 @@ int16_t WebRtcCng_FreeDec(CNG_dec_inst* cng_inst);
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
int16_t nrOfSamples, uint8_t* SIDdata,
int16_t* bytesOut, int16_t forceSID);
size_t nrOfSamples, uint8_t* SIDdata,
size_t* bytesOut, int16_t forceSID);
/****************************************************************************
* WebRtcCng_UpdateSid(...)
@ -138,7 +138,7 @@ int16_t WebRtcCng_UpdateSid(CNG_dec_inst* cng_inst, uint8_t* SID,
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
int16_t nrOfSamples, int16_t new_period);
size_t nrOfSamples, int16_t new_period);
/*****************************************************************************
* WebRtcCng_GetErrorCodeEnc/Dec(...)

View File

@ -35,7 +35,7 @@ typedef struct WebRtcCngDecoder_ {
} WebRtcCngDecoder;
typedef struct WebRtcCngEncoder_ {
int16_t enc_nrOfCoefs;
size_t enc_nrOfCoefs;
int enc_sampfreq;
int16_t enc_interval;
int16_t enc_msSinceSID;
@ -228,8 +228,8 @@ int16_t WebRtcCng_FreeDec(CNG_dec_inst* cng_inst) {
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
int16_t nrOfSamples, uint8_t* SIDdata,
int16_t* bytesOut, int16_t forceSID) {
size_t nrOfSamples, uint8_t* SIDdata,
size_t* bytesOut, int16_t forceSID) {
WebRtcCngEncoder* inst = (WebRtcCngEncoder*) cng_inst;
int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
@ -240,10 +240,11 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
int16_t ReflBetaComp = 13107; /* 0.4 in q15. */
int32_t outEnergy;
int outShifts;
int i, stab;
size_t i;
int stab;
int acorrScale;
int index;
int16_t ind, factor;
size_t index;
size_t ind, factor;
int32_t* bptr;
int32_t blo, bhi;
int16_t negate;
@ -281,7 +282,7 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
outShifts--;
}
}
outEnergy = WebRtcSpl_DivW32W16(outEnergy, factor);
outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
if (outEnergy > 1) {
/* Create Hanning Window. */
@ -390,7 +391,7 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
return inst->enc_nrOfCoefs + 1;
return (int)(inst->enc_nrOfCoefs + 1);
} else {
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
@ -475,10 +476,10 @@ int16_t WebRtcCng_UpdateSid(CNG_dec_inst* cng_inst, uint8_t* SID,
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
int16_t nrOfSamples, int16_t new_period) {
size_t nrOfSamples, int16_t new_period) {
WebRtcCngDecoder* inst = (WebRtcCngDecoder*) cng_inst;
int i;
size_t i;
int16_t excitation[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t low[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];

View File

@ -37,7 +37,8 @@ AudioEncoderPcm::AudioEncoderPcm(const Config& config, int sample_rate_hz)
: sample_rate_hz_(sample_rate_hz),
num_channels_(config.num_channels),
payload_type_(config.payload_type),
num_10ms_frames_per_packet_(config.frame_size_ms / 10),
num_10ms_frames_per_packet_(
static_cast<size_t>(config.frame_size_ms / 10)),
full_frame_samples_(NumSamplesPerFrame(config.num_channels,
config.frame_size_ms,
sample_rate_hz_)),
@ -63,11 +64,11 @@ size_t AudioEncoderPcm::MaxEncodedBytes() const {
return full_frame_samples_ * BytesPerSample();
}
int AudioEncoderPcm::Num10MsFramesInNextPacket() const {
size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
int AudioEncoderPcm::Max10MsFramesInAPacket() const {
size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@ -95,27 +96,26 @@ AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
EncodedInfo info;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
CHECK_GE(ret, 0);
info.encoded_bytes = static_cast<size_t>(ret);
info.encoded_bytes =
EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
speech_buffer_.clear();
return info;
}
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeA(audio, static_cast<int16_t>(input_len), encoded);
size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeA(audio, input_len, encoded);
}
int AudioEncoderPcmA::BytesPerSample() const {
return 1;
}
int16_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeU(audio, static_cast<int16_t>(input_len), encoded);
size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeU(audio, input_len, encoded);
}
int AudioEncoderPcmU::BytesPerSample() const {

View File

@ -12,40 +12,40 @@
#include "g711_interface.h"
#include "webrtc/typedefs.h"
int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
int16_t len,
uint8_t* encoded) {
int n;
size_t WebRtcG711_EncodeA(const int16_t* speechIn,
size_t len,
uint8_t* encoded) {
size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_alaw(speechIn[n]);
return len;
}
int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
int16_t len,
uint8_t* encoded) {
int n;
size_t WebRtcG711_EncodeU(const int16_t* speechIn,
size_t len,
uint8_t* encoded) {
size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_ulaw(speechIn[n]);
return len;
}
int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
int16_t len,
int16_t* decoded,
int16_t* speechType) {
int n;
size_t WebRtcG711_DecodeA(const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType) {
size_t n;
for (n = 0; n < len; n++)
decoded[n] = alaw_to_linear(encoded[n]);
*speechType = 1;
return len;
}
int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
int16_t len,
int16_t* decoded,
int16_t* speechType) {
int n;
size_t WebRtcG711_DecodeU(const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType) {
size_t n;
for (n = 0; n < len; n++)
decoded[n] = ulaw_to_linear(encoded[n]);
*speechType = 1;

View File

@ -39,8 +39,8 @@ class AudioEncoderPcm : public AudioEncoder {
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int Num10MsFramesInNextPacket() const override;
int Max10MsFramesInAPacket() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@ -50,9 +50,9 @@ class AudioEncoderPcm : public AudioEncoder {
protected:
AudioEncoderPcm(const Config& config, int sample_rate_hz);
virtual int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) = 0;
virtual size_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) = 0;
virtual int BytesPerSample() const = 0;
@ -60,7 +60,7 @@ class AudioEncoderPcm : public AudioEncoder {
const int sample_rate_hz_;
const int num_channels_;
const int payload_type_;
const int num_10ms_frames_per_packet_;
const size_t num_10ms_frames_per_packet_;
const size_t full_frame_samples_;
std::vector<int16_t> speech_buffer_;
uint32_t first_timestamp_in_buffer_;
@ -76,9 +76,9 @@ class AudioEncoderPcmA final : public AudioEncoderPcm {
: AudioEncoderPcm(config, kSampleRateHz) {}
protected:
int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) override;
size_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) override;
int BytesPerSample() const override;
@ -96,9 +96,9 @@ class AudioEncoderPcmU final : public AudioEncoderPcm {
: AudioEncoderPcm(config, kSampleRateHz) {}
protected:
int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) override;
size_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) override;
int BytesPerSample() const override;

View File

@ -38,9 +38,9 @@ extern "C" {
* Always equal to len input parameter.
*/
int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
int16_t len,
uint8_t* encoded);
size_t WebRtcG711_EncodeA(const int16_t* speechIn,
size_t len,
uint8_t* encoded);
/****************************************************************************
* WebRtcG711_EncodeU(...)
@ -59,9 +59,9 @@ int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
* Always equal to len input parameter.
*/
int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
int16_t len,
uint8_t* encoded);
size_t WebRtcG711_EncodeU(const int16_t* speechIn,
size_t len,
uint8_t* encoded);
/****************************************************************************
* WebRtcG711_DecodeA(...)
@ -82,10 +82,10 @@ int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
* -1 - Error
*/
int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
int16_t len,
int16_t* decoded,
int16_t* speechType);
size_t WebRtcG711_DecodeA(const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/****************************************************************************
* WebRtcG711_DecodeU(...)
@ -106,10 +106,10 @@ int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
* -1 - Error
*/
int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
int16_t len,
int16_t* decoded,
int16_t* speechType);
size_t WebRtcG711_DecodeU(const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/**********************************************************************
* WebRtcG711_Version(...)

View File

@ -24,8 +24,8 @@
#define CLOCKS_PER_SEC_G711 1000
/* function for reading audio data from PCM file */
bool readframe(int16_t* data, FILE* inp, int length) {
short rlen = (short) fread(data, sizeof(int16_t), length, inp);
bool readframe(int16_t* data, FILE* inp, size_t length) {
size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@ -40,16 +40,14 @@ int main(int argc, char* argv[]) {
int framecnt;
bool endfile;
int16_t framelength = 80;
int err;
size_t framelength = 80;
/* Runtime statistics */
double starttime;
double runtime;
double length_file;
int16_t stream_len = 0;
size_t stream_len = 0;
int16_t shortdata[480];
int16_t decoded[480];
uint8_t streamdata[1000];
@ -80,11 +78,12 @@ int main(int argc, char* argv[]) {
printf("-----------------------------------\n");
printf("G.711 version: %s\n\n", versionNumber);
/* Get frame length */
framelength = atoi(argv[1]);
if (framelength < 0) {
printf(" G.711: Invalid framelength %d.\n", framelength);
exit(1);
int framelength_int = atoi(argv[1]);
if (framelength_int < 0) {
printf(" G.722: Invalid framelength %d.\n", framelength_int);
exit(1);
}
framelength = static_cast<size_t>(framelength_int);
/* Get compression law */
strcpy(law, argv[2]);
@ -130,36 +129,29 @@ int main(int argc, char* argv[]) {
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
static_cast<size_t>(stream_len)) {
stream_len) {
return -1;
}
}
err = WebRtcG711_DecodeA(streamdata, stream_len, decoded,
speechType);
WebRtcG711_DecodeA(streamdata, stream_len, decoded, speechType);
} else if (!strcmp(law, "u")) {
/* u-law encoding */
stream_len = WebRtcG711_EncodeU(shortdata, framelength, streamdata);
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
static_cast<size_t>(stream_len)) {
stream_len) {
return -1;
}
}
err = WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
} else {
printf("Wrong law mode\n");
exit(1);
}
if (stream_len < 0 || err < 0) {
/* exit if returned with error */
printf("Error in encoder/decoder\n");
} else {
/* Write coded speech to file */
if (fwrite(decoded, sizeof(short), framelength, outp) !=
static_cast<size_t>(framelength)) {
return -1;
}
/* Write coded speech to file */
if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
return -1;
}
}

View File

@ -19,7 +19,7 @@ namespace webrtc {
namespace {
const int kSampleRateHz = 16000;
const size_t kSampleRateHz = 16000;
} // namespace
@ -40,13 +40,14 @@ AudioEncoderG722::EncoderState::~EncoderState() {
AudioEncoderG722::AudioEncoderG722(const Config& config)
: num_channels_(config.num_channels),
payload_type_(config.payload_type),
num_10ms_frames_per_packet_(config.frame_size_ms / 10),
num_10ms_frames_per_packet_(
static_cast<size_t>(config.frame_size_ms / 10)),
num_10ms_frames_buffered_(0),
first_timestamp_in_buffer_(0),
encoders_(new EncoderState[num_channels_]),
interleave_buffer_(2 * num_channels_) {
CHECK(config.IsOk());
const int samples_per_channel =
const size_t samples_per_channel =
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
for (int i = 0; i < num_channels_; ++i) {
encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
@ -71,14 +72,14 @@ int AudioEncoderG722::NumChannels() const {
}
size_t AudioEncoderG722::MaxEncodedBytes() const {
return static_cast<size_t>(SamplesPerChannel() / 2 * num_channels_);
return SamplesPerChannel() / 2 * num_channels_;
}
int AudioEncoderG722::Num10MsFramesInNextPacket() const {
size_t AudioEncoderG722::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
int AudioEncoderG722::Max10MsFramesInAPacket() const {
size_t AudioEncoderG722::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@ -98,8 +99,8 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
first_timestamp_in_buffer_ = rtp_timestamp;
// Deinterleave samples and save them in each channel's buffer.
const int start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
for (int i = 0; i < kSampleRateHz / 100; ++i)
const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
for (size_t i = 0; i < kSampleRateHz / 100; ++i)
for (int j = 0; j < num_channels_; ++j)
encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
@ -111,19 +112,18 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
// Encode each channel separately.
CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
const int samples_per_channel = SamplesPerChannel();
const size_t samples_per_channel = SamplesPerChannel();
for (int i = 0; i < num_channels_; ++i) {
const int encoded = WebRtcG722_Encode(
const size_t encoded = WebRtcG722_Encode(
encoders_[i].encoder, encoders_[i].speech_buffer.get(),
samples_per_channel, encoders_[i].encoded_buffer.data<uint8_t>());
CHECK_GE(encoded, 0);
CHECK_EQ(encoded, samples_per_channel / 2);
}
// Interleave the encoded bytes of the different channels. Each separate
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
for (int i = 0; i < samples_per_channel / 2; ++i) {
for (size_t i = 0; i < samples_per_channel / 2; ++i) {
for (int j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
@ -140,7 +140,7 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
return info;
}
int AudioEncoderG722::SamplesPerChannel() const {
size_t AudioEncoderG722::SamplesPerChannel() const {
return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
}

View File

@ -188,8 +188,8 @@ int WebRtc_g722_decode_release(G722DecoderState *s)
}
/*- End of function --------------------------------------------------------*/
int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
const uint8_t g722_data[], int len)
size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
const uint8_t g722_data[], size_t len)
{
static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
@ -258,9 +258,9 @@ int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
int wd2;
int wd3;
int code;
int outlen;
size_t outlen;
int i;
int j;
size_t j;
outlen = 0;
rhigh = 0;

View File

@ -139,19 +139,19 @@ G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
int rate,
int options);
int WebRtc_g722_encode_release(G722EncoderState *s);
int WebRtc_g722_encode(G722EncoderState *s,
uint8_t g722_data[],
const int16_t amp[],
int len);
size_t WebRtc_g722_encode(G722EncoderState *s,
uint8_t g722_data[],
const int16_t amp[],
size_t len);
G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
int rate,
int options);
int WebRtc_g722_decode_release(G722DecoderState *s);
int WebRtc_g722_decode(G722DecoderState *s,
int16_t amp[],
const uint8_t g722_data[],
int len);
size_t WebRtc_g722_decode(G722DecoderState *s,
int16_t amp[],
const uint8_t g722_data[],
size_t len);
#ifdef __cplusplus
}

View File

@ -202,8 +202,8 @@ int16_t limitValues (int16_t rl)
}
#endif
int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
const int16_t amp[], int len)
size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
const int16_t amp[], size_t len)
{
static const int q6[32] =
{
@ -275,11 +275,11 @@ int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
int eh;
int mih;
int i;
int j;
size_t j;
/* Low and high band PCM from the QMF */
int xlow;
int xhigh;
int g722_bytes;
size_t g722_bytes;
/* Even and odd tap accumulators */
int sumeven;
int sumodd;

View File

@ -45,10 +45,10 @@ int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst)
return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
}
int16_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
const int16_t* speechIn,
int16_t len,
uint8_t* encoded)
size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
const int16_t* speechIn,
size_t len,
uint8_t* encoded)
{
unsigned char *codechar = (unsigned char*) encoded;
// Encode the input speech vector
@ -85,11 +85,11 @@ int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
}
int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
const uint8_t *encoded,
int16_t len,
int16_t *decoded,
int16_t *speechType)
size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
const uint8_t *encoded,
size_t len,
int16_t *decoded,
int16_t *speechType)
{
// Decode the G.722 encoder stream
*speechType=G722_WEBRTC_SPEECH;

View File

@ -37,8 +37,8 @@ class AudioEncoderG722 final : public AudioEncoder {
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
int Num10MsFramesInNextPacket() const override;
int Max10MsFramesInAPacket() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@ -55,12 +55,12 @@ class AudioEncoderG722 final : public AudioEncoder {
~EncoderState();
};
int SamplesPerChannel() const;
size_t SamplesPerChannel() const;
const int num_channels_;
const int payload_type_;
const int num_10ms_frames_per_packet_;
int num_10ms_frames_buffered_;
const size_t num_10ms_frames_per_packet_;
size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
const rtc::scoped_ptr<EncoderState[]> encoders_;
rtc::Buffer interleave_buffer_;

View File

@ -94,10 +94,10 @@ int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst);
* Return value : Length (in bytes) of coded data
*/
int16_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
const int16_t* speechIn,
int16_t len,
uint8_t* encoded);
size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
const int16_t* speechIn,
size_t len,
uint8_t* encoded);
/****************************************************************************
@ -162,15 +162,14 @@ int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst);
* - speechType : 1 normal, 2 CNG (Since G722 does not have its own
* DTX/CNG scheme it should always return 1)
*
* Return value : >0 - Samples in decoded vector
* -1 - Error
* Return value : Samples in decoded vector
*/
int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
const uint8_t* encoded,
int16_t len,
int16_t *decoded,
int16_t *speechType);
size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
const uint8_t* encoded,
size_t len,
int16_t *decoded,
int16_t *speechType);
/****************************************************************************
* WebRtcG722_Version(...)

View File

@ -29,9 +29,9 @@ typedef struct WebRtcG722EncInst G722EncInst;
typedef struct WebRtcG722DecInst G722DecInst;
/* function for reading audio data from PCM file */
bool readframe(int16_t *data, FILE *inp, int length)
bool readframe(int16_t *data, FILE *inp, size_t length)
{
short rlen = (short)fread(data, sizeof(int16_t), length, inp);
size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@ -45,17 +45,16 @@ int main(int argc, char* argv[])
int framecnt;
bool endfile;
int16_t framelength = 160;
size_t framelength = 160;
G722EncInst *G722enc_inst;
G722DecInst *G722dec_inst;
int err;
/* Runtime statistics */
double starttime;
double runtime = 0;
double length_file;
int16_t stream_len = 0;
size_t stream_len = 0;
int16_t shortdata[960];
int16_t decoded[960];
uint8_t streamdata[80 * 6];
@ -78,11 +77,12 @@ int main(int argc, char* argv[])
}
/* Get frame length */
framelength = atoi(argv[1]);
if (framelength < 0) {
printf(" G.722: Invalid framelength %d.\n", framelength);
int framelength_int = atoi(argv[1]);
if (framelength_int < 0) {
printf(" G.722: Invalid framelength %d.\n", framelength_int);
exit(1);
}
framelength = static_cast<size_t>(framelength_int);
/* Get Input and Output files */
sscanf(argv[2], "%s", inname);
@ -124,26 +124,21 @@ int main(int argc, char* argv[])
/* G.722 encoding + decoding */
stream_len = WebRtcG722_Encode((G722EncInst *)G722enc_inst, shortdata, framelength, streamdata);
err = WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
speechType);
WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
speechType);
/* Stop clock after call to encoder and decoder */
runtime += (double)((clock()/(double)CLOCKS_PER_SEC_G722)-starttime);
if (stream_len < 0 || err < 0) {
/* exit if returned with error */
printf("Error in encoder/decoder\n");
} else {
/* Write coded bits to file */
if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
static_cast<size_t>(stream_len / 2)) {
return -1;
}
/* Write coded speech to file */
if (fwrite(decoded, sizeof(short), framelength, outp) !=
static_cast<size_t>(framelength)) {
return -1;
}
/* Write coded bits to file */
if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
stream_len / 2) {
return -1;
}
/* Write coded speech to file */
if (fwrite(decoded, sizeof(short), framelength, outp) !=
framelength) {
return -1;
}
}

View File

@ -36,7 +36,7 @@ void WebRtcIlbcfix_AbsQuant(
int16_t *weightDenum /* (i) denominator of synthesis filter */
) {
int16_t *syntOut;
int16_t quantLen[2];
size_t quantLen[2];
/* Stack based */
int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];

View File

@ -21,9 +21,9 @@
#include "sort_sq.h"
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
int16_t *weightDenumIN, int16_t *quantLenIN,
int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN ) {
int k1, k2;
size_t k1, k2;
int16_t index;
int32_t toQW32;
int32_t toQ32;
@ -33,7 +33,7 @@ void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
int16_t *syntOut = syntOutIN;
int16_t *in_weighted = in_weightedIN;
int16_t *weightDenum = weightDenumIN;
int16_t *quantLen = quantLenIN;
size_t *quantLen = quantLenIN;
int16_t *idxVec = idxVecIN;
for(k1=0;k1<2;k1++) {

View File

@ -27,7 +27,7 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
int16_t *weightDenumIN, int16_t *quantLenIN,
int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN);
#endif

View File

@ -24,15 +24,20 @@ const int kSampleRateHz = 8000;
} // namespace
// static
const size_t AudioEncoderIlbc::kMaxSamplesPerPacket;
bool AudioEncoderIlbc::Config::IsOk() const {
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
frame_size_ms == 60) &&
(kSampleRateHz / 100 * (frame_size_ms / 10)) <= kMaxSamplesPerPacket;
static_cast<size_t>(kSampleRateHz / 100 * (frame_size_ms / 10)) <=
kMaxSamplesPerPacket;
}
AudioEncoderIlbc::AudioEncoderIlbc(const Config& config)
: payload_type_(config.payload_type),
num_10ms_frames_per_packet_(config.frame_size_ms / 10),
num_10ms_frames_per_packet_(
static_cast<size_t>(config.frame_size_ms / 10)),
num_10ms_frames_buffered_(0) {
CHECK(config.IsOk());
CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
@ -58,11 +63,11 @@ size_t AudioEncoderIlbc::MaxEncodedBytes() const {
return RequiredOutputSizeBytes();
}
int AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
size_t AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
size_t AudioEncoderIlbc::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@ -111,7 +116,7 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
encoded);
CHECK_GE(output_len, 0);
EncodedInfo info;
info.encoded_bytes = output_len;
info.encoded_bytes = static_cast<size_t>(output_len);
DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;

View File

@ -28,14 +28,14 @@ void WebRtcIlbcfix_AugmentedCbCorr(
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
int16_t low, /* (i) Lag to start from (typically
size_t low, /* (i) Lag to start from (typically
20) */
int16_t high, /* (i) Lag to end at (typically 39) */
size_t high, /* (i) Lag to end at (typically 39) */
int scale) /* (i) Scale factor to use for
the crossDot */
{
int lagcount;
int16_t ilow;
size_t lagcount;
size_t ilow;
int16_t *targetPtr;
int32_t *crossDotPtr;
int16_t *iSPtr=interpSamples;
@ -46,7 +46,7 @@ void WebRtcIlbcfix_AugmentedCbCorr(
crossDotPtr=crossDot;
for (lagcount=low; lagcount<=high; lagcount++) {
ilow = (int16_t) (lagcount-4);
ilow = lagcount - 4;
/* Compute dot product for the first (lagcount-4) samples */
(*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);

View File

@ -33,9 +33,9 @@ void WebRtcIlbcfix_AugmentedCbCorr(
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
int16_t low, /* (i) Lag to start from (typically
size_t low, /* (i) Lag to start from (typically
20) */
int16_t high, /* (i) Lag to end at (typically 39 */
size_t high, /* (i) Lag to end at (typically 39 */
int scale); /* (i) Scale factor to use for the crossDot */
#endif

View File

@ -29,10 +29,10 @@ void WebRtcIlbcfix_CbConstruct(
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
int16_t lMem, /* (i) Length of buffer */
int16_t veclen /* (i) Length of vector */
size_t lMem, /* (i) Length of buffer */
size_t veclen /* (i) Length of vector */
){
int j;
size_t j;
int16_t gain[CB_NSTAGES];
/* Stack based */
int16_t cbvec0[SUBL];
@ -50,9 +50,9 @@ void WebRtcIlbcfix_CbConstruct(
/* codebook vector construction and construction of total vector */
/* Stack based */
WebRtcIlbcfix_GetCbVec(cbvec0, mem, index[0], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec1, mem, index[1], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec2, mem, index[2], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen);
gainPtr = &gain[0];
for (j=0;j<veclen;j++) {

View File

@ -30,8 +30,8 @@ void WebRtcIlbcfix_CbConstruct(
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
int16_t lMem, /* (i) Length of buffer */
int16_t veclen /* (i) Length of vector */
size_t lMem, /* (i) Length of buffer */
size_t veclen /* (i) Length of vector */
);

View File

@ -27,15 +27,15 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CbMemEnergy(
int16_t range,
size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
int16_t lMem, /* (i) Length of the CB memory */
int16_t lTarget, /* (i) Length of the target vector */
size_t lMem, /* (i) Length of the CB memory */
size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
int16_t base_size /* (i) Index to where energy values should be stored */
size_t base_size /* (i) Index to where energy values should be stored */
) {
int16_t *ppi, *ppo, *pp;
int32_t energy, tmp32;

View File

@ -20,15 +20,15 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
void WebRtcIlbcfix_CbMemEnergy(
int16_t range,
size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
int16_t lMem, /* (i) Length of the CB memory */
int16_t lTarget, /* (i) Length of the target vector */
size_t lMem, /* (i) Length of the CB memory */
size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
int16_t base_size /* (i) Index to where energy values should be stored */
size_t base_size /* (i) Index to where energy values should be stored */
);
#endif

View File

@ -23,13 +23,14 @@ void WebRtcIlbcfix_CbMemEnergyAugmentation(
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
int16_t base_size, /* (i) Index to where energy values should be stored */
size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
){
int32_t energy, tmp32;
int16_t *ppe, *pp, *interpSamplesPtr;
int16_t *CBmemPtr, lagcount;
int16_t *CBmemPtr;
size_t lagcount;
int16_t *enPtr=&energyW16[base_size-20];
int16_t *enShPtr=&energyShifts[base_size-20];
int32_t nrjRecursive;

View File

@ -23,7 +23,7 @@ void WebRtcIlbcfix_CbMemEnergyAugmentation(
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
int16_t base_size, /* (i) Index to where energy values should be stored */
size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
);

View File

@ -23,16 +23,17 @@
* sample and the last sample respectively */
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
int16_t range, /* (i) number of iterations */
size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
int16_t base_size /* (i) Index to where energy values should be stored */
size_t base_size /* (i) Index to where energy values should be stored */
)
{
int16_t j,shft;
size_t j;
int16_t shft;
int32_t tmp;
int16_t *eSh_ptr;
int16_t *eW16_ptr;

View File

@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
int16_t range, /* (i) number of iterations */
size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
int16_t base_size /* (i) Index to where energy values should be stored */
size_t base_size /* (i) Index to where energy values should be stored */
);
#endif

View File

@ -40,29 +40,31 @@ void WebRtcIlbcfix_CbSearch(
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
int16_t lMem, /* (i) Length of buffer */
int16_t lTarget, /* (i) Length of vector */
size_t lMem, /* (i) Length of buffer */
size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
int16_t block /* (i) the subblock number */
size_t block /* (i) the subblock number */
) {
int16_t i, j, stage, range;
size_t i, range;
int16_t ii, j, stage;
int16_t *pp;
int16_t tmp;
int scale;
int16_t bits, temp1, temp2;
int16_t base_size;
size_t base_size;
int32_t codedEner, targetEner;
int16_t gains[CB_NSTAGES+1];
int16_t *cb_vecPtr;
int16_t indexOffset, sInd, eInd;
size_t indexOffset, sInd, eInd;
int32_t CritMax=0;
int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
int16_t bestIndex=0;
size_t bestIndex=0;
int16_t bestGain=0;
int16_t indexNew, CritNewSh;
size_t indexNew;
int16_t CritNewSh;
int32_t CritNew;
int32_t *cDotPtr;
int16_t noOfZeros;
size_t noOfZeros;
int16_t *gainPtr;
int32_t t32, tmpW32;
int16_t *WebRtcIlbcfix_kGainSq5_ptr;
@ -148,9 +150,9 @@ void WebRtcIlbcfix_CbSearch(
scale, 20, energyW16, energyShifts);
/* Compute the CB vectors' energies for the second cb section (filtered cb) */
WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors,
scale, (int16_t)(base_size + 20),
energyW16, energyShifts);
WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
base_size + 20, energyW16,
energyShifts);
/* Compute the CB vectors' energies and store them in the vector
* energyW16. Also the corresponding shift values are stored. The
@ -224,7 +226,7 @@ void WebRtcIlbcfix_CbSearch(
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew+indexOffset],
CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
@ -242,11 +244,8 @@ void WebRtcIlbcfix_CbSearch(
i=sInd;
if (sInd<20) {
WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
interpSamplesFilt, cDot,
(int16_t)(sInd + 20),
(int16_t)(WEBRTC_SPL_MIN(39,
(eInd + 20))),
scale);
interpSamplesFilt, cDot, sInd + 20,
WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
i=20;
cDotPtr = &cDot[20 - sInd];
} else {
@ -257,7 +256,7 @@ void WebRtcIlbcfix_CbSearch(
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
(int16_t)(eInd - i + 1), scale, -1);
eInd - i + 1, scale, -1);
} else {
cDotPtr = cDot;
@ -265,7 +264,7 @@ void WebRtcIlbcfix_CbSearch(
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
(int16_t)(eInd - sInd + 1), scale, -1);
eInd - sInd + 1, scale, -1);
}
@ -274,17 +273,17 @@ void WebRtcIlbcfix_CbSearch(
/* Search for best index in this part of the vector */
WebRtcIlbcfix_CbSearchCore(
cDot, (int16_t)(eInd-sInd+1), stage, inverseEnergy+indexOffset,
cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
inverseEnergyShifts+indexOffset, Crit,
&indexNew, &CritNew, &CritNewSh);
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew],
CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
index[stage] = bestIndex;
index[stage] = (int16_t)bestIndex;
bestGain = WebRtcIlbcfix_GainQuant(bestGain,
@ -297,7 +296,7 @@ void WebRtcIlbcfix_CbSearch(
if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
if(index[stage]<base_size) {
if((size_t)index[stage]<base_size) {
pp=buf+lMem-lTarget-index[stage];
} else {
pp=cbvectors+lMem-lTarget-
@ -306,16 +305,16 @@ void WebRtcIlbcfix_CbSearch(
} else {
if (index[stage]<base_size) {
if ((size_t)index[stage]<base_size) {
if (index[stage]>=20) {
/* Adjust index and extract vector */
index[stage]-=20;
pp=buf+lMem-lTarget-index[stage];
} else {
/* Adjust index and extract vector */
index[stage]+=(base_size-20);
index[stage]+=(int16_t)(base_size-20);
WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-base_size+40),
WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
buf+lMem, aug_vec);
pp = aug_vec;
@ -329,8 +328,8 @@ void WebRtcIlbcfix_CbSearch(
index[stage]+base_size;
} else {
/* Adjust index and extract vector */
index[stage]+=(base_size-20);
WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-2*base_size+40),
index[stage]+=(int16_t)(base_size-20);
WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
cbvectors+lMem, aug_vec);
pp = aug_vec;
}
@ -381,7 +380,7 @@ void WebRtcIlbcfix_CbSearch(
WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
/* targetEner and codedEner are in Q(-2*scale) */
for (i=gain_index[0];i<32;i++) {
for (ii=gain_index[0];ii<32;ii++) {
/* Change the index if
(codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
@ -392,8 +391,8 @@ void WebRtcIlbcfix_CbSearch(
t32 = t32 - targetEner;
if (t32 < 0) {
if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
j=i;
WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[i];
j=ii;
WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
}
}
gainPtr++;

View File

@ -26,10 +26,10 @@ void WebRtcIlbcfix_CbSearch(
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
int16_t lMem, /* (i) Length of buffer */
int16_t lTarget, /* (i) Length of vector */
size_t lMem, /* (i) Length of buffer */
size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
int16_t block /* (i) the subblock number */
size_t block /* (i) the subblock number */
);
#endif

View File

@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
int16_t range, /* (i) Search range */
size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
int16_t *bestIndex, /* (o) Index that corresponds to
size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the
@ -37,7 +37,7 @@ void WebRtcIlbcfix_CbSearchCore(
{
int32_t maxW32, tmp32;
int16_t max, sh, tmp16;
int i;
size_t i;
int32_t *cDotPtr;
int16_t cDotSqW16;
int16_t *inverseEnergyPtr;
@ -103,7 +103,7 @@ void WebRtcIlbcfix_CbSearchCore(
}
/* Find the index of the best value */
*bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
*bestIndex = (size_t)WebRtcSpl_MaxIndexW32(Crit, range);
*bestCrit = Crit[*bestIndex];
/* Calculate total shifts of this criteria */

View File

@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
int16_t range, /* (i) Search range */
size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
int16_t *bestIndex, /* (o) Index that corresponds to
size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the

View File

@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
int16_t IndexNew, /* (i) Index of new Criteria */
size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
int16_t *bestIndex, /* (i/o) Index that corresponds to
size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain) /* (i/o) Gain in Q14 that corresponds
to maximum criteria */

View File

@ -24,13 +24,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
int16_t IndexNew, /* (i) Index of new Criteria */
size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
int16_t *bestIndex, /* (i/o) Index that corresponds to
size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain); /* (i/o) Gain in Q14 that corresponds
to maximum criteria */

View File

@ -27,9 +27,9 @@ void WebRtcIlbcfix_CompCorr(
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
int16_t lag, /* (i) pitch lag */
int16_t bLen, /* (i) length of buffer */
int16_t sRange, /* (i) correlation search length */
size_t lag, /* (i) pitch lag */
size_t bLen, /* (i) length of buffer */
size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
){
int16_t *w16ptr;

View File

@ -30,9 +30,9 @@ void WebRtcIlbcfix_CompCorr(
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
int16_t lag, /* (i) pitch lag */
int16_t bLen, /* (i) length of buffer */
int16_t sRange, /* (i) correlation search length */
size_t lag, /* (i) pitch lag */
size_t bLen, /* (i) length of buffer */
size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
);

View File

@ -593,10 +593,10 @@ const int16_t WebRtcIlbcfix_kAlpha[4]={
/* Ranges for search and filters at different subframes */
const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
{58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
const int16_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
/* Gain Quantization for the codebook gains of the 3 stages */

View File

@ -61,8 +61,8 @@ extern const int16_t WebRtcIlbcfix_kFrgQuantMod[];
/* Ranges for search and filters at different subframes */
extern const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
extern const int16_t WebRtcIlbcfix_kFilterRange[];
extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
extern const size_t WebRtcIlbcfix_kFilterRange[];
/* gain quantization tables */

View File

@ -25,12 +25,12 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
int16_t index, /* (i) Index for the augmented vector to be created */
size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */
) {
int16_t ilow;
size_t ilow;
int16_t *ppo, *ppi;
int16_t cbVecTmp[4];

View File

@ -27,7 +27,7 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
int16_t index, /* (i) Index for the augmented vector to be created */
size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */

View File

@ -44,7 +44,7 @@ void WebRtcIlbcfix_DecodeImpl(
int16_t mode /* (i) 0: bad packet, PLC,
1: normal */
) {
int i;
size_t i;
int16_t order_plus_one;
int16_t last_bit;
@ -106,7 +106,7 @@ void WebRtcIlbcfix_DecodeImpl(
WebRtcIlbcfix_DoThePlc(
PLCresidual, PLClpc, 0, decresidual,
syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
(int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
iLBCdec_inst->last_lag, iLBCdec_inst);
/* Use the output from doThePLC */
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@ -122,7 +122,7 @@ void WebRtcIlbcfix_DecodeImpl(
/* packet loss conceal */
WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
(int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
iLBCdec_inst->last_lag, iLBCdec_inst);
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@ -188,18 +188,18 @@ void WebRtcIlbcfix_DecodeImpl(
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
} else { /* Enhancer not activated */
int16_t lag;
size_t lag;
/* Find last lag (since the enhancer is not called to give this info) */
lag = 20;
if (iLBCdec_inst->mode==20) {
lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-60],
&decresidual[iLBCdec_inst->blockl-60-lag],
60,
80, lag, -1);
} else {
lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
ENH_BLOCKL,

View File

@ -41,8 +41,8 @@ void WebRtcIlbcfix_DecodeResidual(
int16_t *syntdenum /* (i) the decoded synthesis filter
coefficients */
) {
int16_t meml_gotten, diff, start_pos;
int16_t subcount, subframe;
size_t meml_gotten, diff, start_pos;
size_t subcount, subframe;
int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
int16_t *mem = &memVec[CB_HALFFILTERLEN]; /* Memory for codebook */
@ -118,7 +118,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to encode */
int16_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nfor; subframe++) {
/* construct decoded vector */
@ -156,7 +156,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to decode */
int16_t Nback = iLBC_encbits->startIdx - 1;
size_t Nback = iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nback; subframe++) {
/* construct decoded vector */

View File

@ -34,7 +34,8 @@ void WebRtcIlbcfix_DecoderInterpolateLsp(
IlbcDecoder *iLBCdec_inst
/* (i) the decoder state structure */
){
int i, pos, lp_length;
size_t i;
int pos, lp_length;
int16_t lp[LPC_FILTERORDER + 1], *lsfdeq2;
lsfdeq2 = lsfdeq + length;

View File

@ -121,11 +121,11 @@ typedef struct iLBC_bits_t_ {
int16_t lsf[LSF_NSPLIT*LPC_N_MAX];
int16_t cb_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB index */
int16_t gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
int16_t idxForMax;
size_t idxForMax;
int16_t state_first;
int16_t idxVec[STATE_SHORT_LEN_30MS];
int16_t firstbits;
int16_t startIdx;
size_t startIdx;
} iLBC_bits;
/* type definition encoder instance */
@ -135,12 +135,12 @@ typedef struct IlbcEncoder_ {
int16_t mode;
/* basic parameters for different frame sizes */
int16_t blockl;
int16_t nsub;
size_t blockl;
size_t nsub;
int16_t nasub;
int16_t no_of_bytes, no_of_words;
size_t no_of_bytes, no_of_words;
int16_t lpc_n;
int16_t state_short_len;
size_t state_short_len;
/* analysis filter state */
int16_t anaMem[LPC_FILTERORDER];
@ -164,7 +164,7 @@ typedef struct IlbcEncoder_ {
int16_t Nfor_flag;
int16_t Nback_flag;
int16_t start_pos;
int16_t diff;
size_t diff;
#endif
} IlbcEncoder;
@ -176,12 +176,12 @@ typedef struct IlbcDecoder_ {
int16_t mode;
/* basic parameters for different frame sizes */
int16_t blockl;
int16_t nsub;
size_t blockl;
size_t nsub;
int16_t nasub;
int16_t no_of_bytes, no_of_words;
size_t no_of_bytes, no_of_words;
int16_t lpc_n;
int16_t state_short_len;
size_t state_short_len;
/* synthesis filter state */
int16_t syntMem[LPC_FILTERORDER];
@ -190,14 +190,15 @@ typedef struct IlbcDecoder_ {
int16_t lsfdeqold[LPC_FILTERORDER];
/* pitch lag estimated in enhancer and used in PLC */
int last_lag;
size_t last_lag;
/* PLC state information */
int consPLICount, prev_enh_pl;
int16_t perSquare;
int16_t prevScale, prevPLI;
int16_t prevLag, prevLpc[LPC_FILTERORDER+1];
size_t prevLag;
int16_t prevLpc[LPC_FILTERORDER+1];
int16_t prevResidual[NSUB_MAX*SUBL];
int16_t seed;

View File

@ -33,18 +33,19 @@ void WebRtcIlbcfix_DoThePlc(
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
int16_t inlag, /* (i) pitch lag */
size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
){
int16_t i;
size_t i;
int32_t cross, ener, cross_comp, ener_comp = 0;
int32_t measure, maxMeasure, energy;
int16_t max, crossSquareMax, crossSquare;
int16_t j, lag, tmp1, tmp2, randlag;
size_t j, lag, randlag;
int16_t tmp1, tmp2;
int16_t shift1, shift2, shift3, shiftMax;
int16_t scale3;
int16_t corrLen;
size_t corrLen;
int32_t tmpW32, tmp2W32;
int16_t use_gain;
int16_t tot_gain;
@ -54,7 +55,7 @@ void WebRtcIlbcfix_DoThePlc(
int32_t nom;
int16_t denom;
int16_t pitchfact;
int16_t use_lag;
size_t use_lag;
int ind;
int16_t randvec[BLOCKL_MAX];
@ -71,7 +72,7 @@ void WebRtcIlbcfix_DoThePlc(
/* Maximum 60 samples are correlated, preserve as high accuracy
as possible without getting overflow */
max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
(int16_t)iLBCdec_inst->blockl);
iLBCdec_inst->blockl);
scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
if (scale3 < 0) {
scale3 = 0;
@ -86,7 +87,7 @@ void WebRtcIlbcfix_DoThePlc(
lag = inlag - 3;
/* Guard against getting outside the frame */
corrLen = WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
WebRtcIlbcfix_CompCorr( &cross, &ener,
iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
@ -234,7 +235,7 @@ void WebRtcIlbcfix_DoThePlc(
/* noise component - 52 < randlagFIX < 117 */
iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63);
randlag = 53 + (iLBCdec_inst->seed & 63);
if (randlag > i) {
randvec[i] =
iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];

View File

@ -33,7 +33,7 @@ void WebRtcIlbcfix_DoThePlc(
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
int16_t inlag, /* (i) pitch lag */
size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
);

View File

@ -48,11 +48,11 @@ void WebRtcIlbcfix_EncodeImpl(
IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
state */
){
int n, meml_gotten, Nfor;
int16_t diff, start_pos;
int index;
int subcount, subframe;
int16_t start_count, end_count;
size_t n, meml_gotten, Nfor;
size_t diff, start_pos;
size_t index;
size_t subcount, subframe;
size_t start_count, end_count;
int16_t *residual;
int32_t en1, en2;
int16_t scale, max;
@ -86,7 +86,7 @@ void WebRtcIlbcfix_EncodeImpl(
#ifdef SPLIT_10MS
WebRtcSpl_MemSetW16 ( (int16_t *) iLBCbits_inst, 0,
(int16_t) (sizeof(iLBC_bits) / sizeof(int16_t)) );
sizeof(iLBC_bits) / sizeof(int16_t) );
start_pos = iLBCenc_inst->start_pos;
diff = iLBCenc_inst->diff;
@ -317,17 +317,17 @@ void WebRtcIlbcfix_EncodeImpl(
if (iLBCenc_inst->section == 1)
{
start_count = 0;
end_count = WEBRTC_SPL_MIN (Nfor, 2);
end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
}
if (iLBCenc_inst->section == 2)
{
start_count = WEBRTC_SPL_MIN (Nfor, 2);
start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
end_count = Nfor;
}
}
#else
start_count = 0;
end_count = (int16_t)Nfor;
end_count = Nfor;
#endif
/* loop over subframes to encode */
@ -341,7 +341,7 @@ void WebRtcIlbcfix_EncodeImpl(
&residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
(int16_t)subcount);
subcount);
/* construct decoded vector */
@ -386,7 +386,7 @@ void WebRtcIlbcfix_EncodeImpl(
contained in the same vector as the residual)
*/
int Nback = iLBCbits_inst->startIdx - 1;
size_t Nback = iLBCbits_inst->startIdx - 1;
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
/* setup memory */
@ -434,7 +434,7 @@ void WebRtcIlbcfix_EncodeImpl(
}
#else
start_count = 0;
end_count = (int16_t)Nback;
end_count = Nback;
#endif
/* loop over subframes to encode */
@ -447,7 +447,7 @@ void WebRtcIlbcfix_EncodeImpl(
iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
(int16_t)subcount);
subcount);
/* construct decoded vector */

View File

@ -23,12 +23,12 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
int noOfEnergies) /* (i) The length of the energy
size_t noOfEnergies) /* (i) The length of the energy
vector */
{
int32_t Nom=(int32_t)0x1FFFFFFF;
int16_t *energyPtr;
int i;
size_t i;
/* Set the minimum energy value to 16384 to avoid overflow */
energyPtr=energy;

View File

@ -26,7 +26,7 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
int noOfEnergies); /* (i) The length of the energy
size_t noOfEnergies); /* (i) The length of the energy
vector */
#endif

View File

@ -33,7 +33,7 @@ void WebRtcIlbcfix_Enhancer(
int16_t centerStartPos, /* (i) first sample current block within idata */
int16_t *period, /* (i) pitch period array (pitch bward-in time) */
int16_t *plocs, /* (i) locations where period array values valid */
int16_t periodl /* (i) dimension of period and plocs */
size_t periodl /* (i) dimension of period and plocs */
){
/* Stack based */
int16_t surround[ENH_BLOCKL];

View File

@ -33,7 +33,7 @@ void WebRtcIlbcfix_Enhancer(
int16_t centerStartPos, /* (i) first sample current block within idata */
int16_t *period, /* (i) pitch period array (pitch bward-in time) */
int16_t *plocs, /* (i) locations where period array values valid */
int16_t periodl /* (i) dimension of period and plocs */
size_t periodl /* (i) dimension of period and plocs */
);
#endif

View File

@ -30,19 +30,21 @@
* interface for enhancer
*---------------------------------------------------------------*/
int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
){
int iblock;
int lag=20, tlag=20;
int inLen=iLBCdec_inst->blockl+120;
int16_t scale, scale1, plc_blockl;
size_t lag=20, tlag=20;
size_t inLen=iLBCdec_inst->blockl+120;
int16_t scale, scale1;
size_t plc_blockl;
int16_t *enh_buf, *enh_period;
int32_t tmp1, tmp2, max, new_blocks;
int16_t *enh_bufPtr1;
int i, k;
size_t i;
int k;
int16_t EnChange;
int16_t SqrtEnChange;
int16_t inc;
@ -56,7 +58,8 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int32_t ener;
int16_t enerSh;
int16_t corrSh;
int16_t ind, sh;
size_t ind;
int16_t sh;
int16_t start, stop;
/* Stack based */
int16_t totsh[3];
@ -168,7 +171,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
}
}
lag = lagmax[ind] + 10;
lag = (size_t)(lagmax[ind] + 10);
/* Store the estimated lag in the non-downsampled domain */
enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = (int16_t)(lag * 8);
@ -224,7 +227,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
(plc_blockl-lag));
}
} else {
int pos;
size_t pos;
pos = plc_blockl;
@ -280,8 +283,8 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
/* Multiply first part of vector with 2*SqrtEnChange */
WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange,
(int16_t)(plc_blockl-16), 14);
WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
14);
/* Calculate increase parameter for window part (16 last samples) */
/* (1-2*SqrtEnChange)/16 in Q15 */
@ -343,7 +346,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
(int16_t)lag);
lag);
WebRtcSpl_FilterARFastQ12(
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[
@ -354,7 +357,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
(int16_t)lag);
lag);
}
}

View File

@ -25,7 +25,7 @@
* interface for enhancer
*---------------------------------------------------------------*/
int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */

View File

@ -29,8 +29,8 @@ void WebRtcIlbcfix_FilteredCbVecs(
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
int lMem, /* (i) Length of codebook memory */
int16_t samples /* (i) Number of samples to filter */
size_t lMem, /* (i) Length of codebook memory */
size_t samples /* (i) Number of samples to filter */
) {
/* Set up the memory, start with zero state */

View File

@ -31,8 +31,8 @@ void WebRtcIlbcfix_FilteredCbVecs(
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
int lMem, /* (i) Length of codebook memory */
int16_t samples /* (i) Number of samples to filter */
size_t lMem, /* (i) Length of codebook memory */
size_t samples /* (i) Number of samples to filter */
);
#endif

View File

@ -23,7 +23,7 @@
* Classification of subframes to localize start state
*---------------------------------------------------------------*/
int16_t WebRtcIlbcfix_FrameClassify(
size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */
@ -35,8 +35,8 @@ int16_t WebRtcIlbcfix_FrameClassify(
int32_t *seqEnPtr;
int32_t maxW32;
int16_t scale1;
int16_t pos;
int n;
size_t pos;
size_t n;
/*
Calculate the energy of each of the 80 sample blocks
@ -82,7 +82,7 @@ int16_t WebRtcIlbcfix_FrameClassify(
}
/* Extract the best choise of start state */
pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
pos = (size_t)WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
return(pos);
}

View File

@ -19,7 +19,7 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
int16_t WebRtcIlbcfix_FrameClassify(
size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */

View File

@ -27,12 +27,12 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
int16_t index, /* (i) Codebook index */
int16_t lMem, /* (i) Length of codebook buffer */
int16_t cbveclen /* (i) Codebook vector length */
size_t index, /* (i) Codebook index */
size_t lMem, /* (i) Length of codebook buffer */
size_t cbveclen /* (i) Codebook vector length */
){
int16_t k, base_size;
int16_t lag;
size_t k, base_size;
size_t lag;
/* Stack based */
int16_t tempbuff2[SUBL+5];
@ -58,7 +58,7 @@ void WebRtcIlbcfix_GetCbVec(
/* Calculate lag */
k = (int16_t)(2 * (index - (lMem - cbveclen + 1))) + cbveclen;
k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
lag = k / 2;
@ -70,7 +70,7 @@ void WebRtcIlbcfix_GetCbVec(
else {
int16_t memIndTest;
size_t memIndTest;
/* first non-interpolated vectors */
@ -100,7 +100,7 @@ void WebRtcIlbcfix_GetCbVec(
/* do filtering */
WebRtcSpl_FilterMAFastQ12(
&mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
CB_FILTERLEN, (int16_t)(cbveclen+5));
CB_FILTERLEN, cbveclen+5);
/* Calculate lag index */
lag = (cbveclen<<1)-20+index-base_size-lMem-1;

View File

@ -22,9 +22,9 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
int16_t index, /* (i) Codebook index */
int16_t lMem, /* (i) Length of codebook buffer */
int16_t cbveclen /* (i) Codebook vector length */
size_t index, /* (i) Codebook index */
size_t lMem, /* (i) Length of codebook buffer */
size_t cbveclen /* (i) Codebook vector length */
);
#endif

View File

@ -31,12 +31,13 @@ void WebRtcIlbcfix_GetSyncSeq(
int16_t centerStartPos, /* (i) where current block starts */
int16_t *period, /* (i) rough-pitch-period array (Q-2) */
int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
int16_t periodl, /* (i) dimension period array */
size_t periodl, /* (i) dimension period array */
int16_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
){
int16_t i,centerEndPos,q;
size_t i;
int16_t centerEndPos,q;
/* Stack based */
int16_t lagBlock[2*ENH_HL+1];
int16_t blockStartPos[2*ENH_HL+1]; /* Defines the position to search around (Q2) */

View File

@ -31,7 +31,7 @@ void WebRtcIlbcfix_GetSyncSeq(
int16_t centerStartPos, /* (i) where current block starts */
int16_t *period, /* (i) rough-pitch-period array (Q-2) */
int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
int16_t periodl, /* (i) dimension period array */
size_t periodl, /* (i) dimension period array */
int16_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */

View File

@ -30,9 +30,9 @@ void WebRtcIlbcfix_HpInput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
int16_t len) /* (i) Number of samples to filter */
size_t len) /* (i) Number of samples to filter */
{
int i;
size_t i;
int32_t tmpW32;
int32_t tmpW32b;

View File

@ -29,6 +29,6 @@ void WebRtcIlbcfix_HpInput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
int16_t len); /* (i) Number of samples to filter */
size_t len); /* (i) Number of samples to filter */
#endif

View File

@ -30,9 +30,9 @@ void WebRtcIlbcfix_HpOutput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
int16_t len) /* (i) Number of samples to filter */
size_t len) /* (i) Number of samples to filter */
{
int i;
size_t i;
int32_t tmpW32;
int32_t tmpW32b;

View File

@ -29,6 +29,6 @@ void WebRtcIlbcfix_HpOutput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
int16_t len); /* (i) Number of samples to filter */
size_t len); /* (i) Number of samples to filter */
#endif

View File

@ -90,10 +90,10 @@ int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance* iLBCenc_inst,
int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
const int16_t* speechIn,
int16_t len,
size_t len,
uint8_t* encoded) {
int16_t pos = 0;
int16_t encpos = 0;
size_t pos = 0;
size_t encpos = 0;
if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
#ifdef SPLIT_10MS
@ -118,7 +118,7 @@ int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
#endif
encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
}
return (encpos*2);
return (int)(encpos*2);
}
}
@ -143,11 +143,11 @@ int16_t WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance *iLBCdec_inst) {
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType)
{
int i=0;
size_t i=0;
/* Allow for automatic switching between the frame sizes
(although you do get some discontinuity) */
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
@ -191,16 +191,16 @@ int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType)
{
int i=0;
size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@ -219,16 +219,16 @@ int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType)
{
int i=0;
size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@ -247,13 +247,13 @@ int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
int16_t* decoded,
int16_t noOfLostFrames) {
int i;
size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
int16_t* decoded,
size_t noOfLostFrames) {
size_t i;
uint16_t dummy;
for (i=0;i<noOfLostFrames;i++) {
@ -265,9 +265,9 @@ int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
int16_t* decoded,
int16_t noOfLostFrames) {
size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
int16_t* decoded,
size_t noOfLostFrames) {
/* Two input parameters not used, but needed for function pointers in NetEQ */
(void)(decoded = NULL);
(void)(noOfLostFrames = 0);

View File

@ -92,5 +92,5 @@ int WebRtcIlbcfix_InitDecode( /* (o) Number of decoded samples */
iLBCdec_inst->prev_enh_pl = 0;
return (iLBCdec_inst->blockl);
return (int)(iLBCdec_inst->blockl);
}

View File

@ -67,5 +67,5 @@ int WebRtcIlbcfix_InitEncode( /* (o) Number of bytes encoded */
iLBCenc_inst->section = 0;
#endif
return (iLBCenc_inst->no_of_bytes);
return (int)(iLBCenc_inst->no_of_bytes);
}

View File

@ -36,8 +36,8 @@ class AudioEncoderIlbc final : public AudioEncoder {
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int Num10MsFramesInNextPacket() const override;
int Max10MsFramesInAPacket() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@ -47,10 +47,10 @@ class AudioEncoderIlbc final : public AudioEncoder {
private:
size_t RequiredOutputSizeBytes() const;
static const int kMaxSamplesPerPacket = 480;
static const size_t kMaxSamplesPerPacket = 480;
const int payload_type_;
const int num_10ms_frames_per_packet_;
int num_10ms_frames_buffered_;
const size_t num_10ms_frames_per_packet_;
size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
int16_t input_buffer_[kMaxSamplesPerPacket];
IlbcEncoderInstance* encoder_;

View File

@ -18,6 +18,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
#include <stddef.h>
/*
* Define the fixpoint numeric formats
*/
@ -137,7 +139,7 @@ extern "C" {
int WebRtcIlbcfix_Encode(IlbcEncoderInstance *iLBCenc_inst,
const int16_t *speechIn,
int16_t len,
size_t len,
uint8_t* encoded);
/****************************************************************************
@ -182,17 +184,17 @@ extern "C" {
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
int16_t len,
size_t len,
int16_t* decoded,
int16_t* speechType);
@ -210,13 +212,12 @@ extern "C" {
* Output:
* - decoded : The "decoded" vector
*
* Return value : >0 - Samples in decoded PLC vector
* -1 - Error
* Return value : Samples in decoded PLC vector
*/
int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
int16_t *decoded,
int16_t noOfLostFrames);
size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
int16_t *decoded,
size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_NetEqPlc(...)
@ -232,13 +233,12 @@ extern "C" {
* Output:
* - decoded : The "decoded" vector (nothing in this case)
*
* Return value : >0 - Samples in decoded PLC vector
* -1 - Error
* Return value : Samples in decoded PLC vector
*/
int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
int16_t *decoded,
int16_t noOfLostFrames);
size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
int16_t *decoded,
size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_version(...)

View File

@ -22,7 +22,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int16_t lMem /* (i) Length of the CB memory */
size_t lMem /* (i) Length of the CB memory */
) {
int16_t *ppi, *ppo, i, j, temp1, temp2;
int16_t *tmpPtr;

View File

@ -28,7 +28,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int16_t lMem /* (i) Length of the CB memory */
size_t lMem /* (i) Length of the CB memory */
);
#endif

View File

@ -25,11 +25,12 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
int16_t dim1, /* (i) dimension first seq1 */
size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
int16_t dim2 /* (i) dimension seq2 */
size_t dim2 /* (i) dimension seq2 */
){
int16_t max, loops;
int16_t max;
size_t loops;
int scale;
/* Calculate correlation between the two sequences. Scale the

View File

@ -28,9 +28,9 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
int16_t dim1, /* (i) dimension first seq1 */
size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
int16_t dim2 /* (i) dimension seq2 */
size_t dim2 /* (i) dimension seq2 */
);
#endif

View File

@ -28,9 +28,9 @@ void WebRtcIlbcfix_NearestNeighbor(
int16_t *index, /* (o) index of array element closest to value */
int16_t *array, /* (i) data array (Q2) */
int16_t value, /* (i) value (Q2) */
int16_t arlength /* (i) dimension of data array (==8) */
size_t arlength /* (i) dimension of data array (==8) */
){
int i;
size_t i;
int16_t diff;
/* Stack based */
int32_t crit[8];

View File

@ -31,7 +31,7 @@ void WebRtcIlbcfix_NearestNeighbor(
int16_t *index, /* (o) index of array element closest to value */
int16_t *array, /* (i) data array (Q2) */
int16_t value, /* (i) value (Q2) */
int16_t arlength /* (i) dimension of data array (==8) */
size_t arlength /* (i) dimension of data array (==8) */
);
#endif

View File

@ -39,8 +39,9 @@ void WebRtcIlbcfix_Refiner(
summed with earlier contributions */
int16_t gain /* (i) Gain to use for this sequence */
){
int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos,corrdim;
int16_t tloc,tloc2,i,st,en,fraction;
int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos;
size_t corrdim,i;
int16_t tloc,tloc2,st,en,fraction;
int32_t maxtemp, scalefact;
int16_t *filtStatePtr, *polyPtr;
@ -65,13 +66,13 @@ void WebRtcIlbcfix_Refiner(
if(searchSegEndPos+ENH_BLOCKL >= idatal) {
searchSegEndPos=idatal-ENH_BLOCKL-1;
}
corrdim=searchSegEndPos-searchSegStartPos+1;
corrdim=(size_t)(searchSegEndPos-searchSegStartPos+1);
/* compute upsampled correlation and find
location of max */
WebRtcIlbcfix_MyCorr(corrVecTemp,idata+searchSegStartPos,
(int16_t)(corrdim+ENH_BLOCKL-1),idata+centerStartPos,ENH_BLOCKL);
corrdim+ENH_BLOCKL-1,idata+centerStartPos,ENH_BLOCKL);
/* Calculate the rescaling factor for the correlation in order to
put the correlation in a int16_t vector instead */
@ -110,7 +111,7 @@ void WebRtcIlbcfix_Refiner(
/* initialize the vector to be filtered, stuff with zeros
when data is outside idata buffer */
if(st<0){
WebRtcSpl_MemSetW16(vect, 0, (int16_t)(-st));
WebRtcSpl_MemSetW16(vect, 0, (size_t)(-st));
WEBRTC_SPL_MEMCPY_W16(&vect[-st], idata, (ENH_VECTL+st));
}
else{
@ -120,7 +121,7 @@ void WebRtcIlbcfix_Refiner(
WEBRTC_SPL_MEMCPY_W16(vect, &idata[st],
(ENH_VECTL-(en-idatal)));
WebRtcSpl_MemSetW16(&vect[ENH_VECTL-(en-idatal)], 0,
(int16_t)(en-idatal));
(size_t)(en-idatal));
}
else {
WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);

View File

@ -42,7 +42,8 @@ void WebRtcIlbcfix_SimpleInterpolateLsf(
IlbcEncoder *iLBCenc_inst
/* (i/o) the encoder state structure */
) {
int i, pos, lp_length;
size_t i;
int pos, lp_length;
int16_t *lsf2, *lsfdeq2;
/* Stack based */

View File

@ -34,7 +34,7 @@ void WebRtcIlbcfix_SimpleLpcAnalysis(
) {
int k;
int scale;
int16_t is;
size_t is;
int16_t stability;
/* Stack based */
int16_t A[LPC_FILTERORDER + 1];

View File

@ -24,14 +24,14 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
int16_t idxForMax, /* (i) 6-bit index for the quantization of
size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
int16_t len /* (i) length of a state vector */
size_t len /* (i) length of a state vector */
) {
int k;
size_t k;
int16_t maxVal;
int16_t *tmp1, *tmp2, *tmp3;
/* Stack based */
@ -96,7 +96,7 @@ void WebRtcIlbcfix_StateConstruct(
/* Run MA filter + AR filter */
WebRtcSpl_FilterMAFastQ12(
sampleVal, sampleMa,
numerator, LPC_FILTERORDER+1, (int16_t)(len + LPC_FILTERORDER));
numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,

View File

@ -24,12 +24,12 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
int16_t idxForMax, /* (i) 6-bit index for the quantization of
size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
int16_t len /* (i) length of a state vector */
size_t len /* (i) length of a state vector */
);
#endif

View File

@ -33,7 +33,7 @@ void WebRtcIlbcfix_StateSearch(
int16_t *syntDenum, /* (i) lpc synthesis filter */
int16_t *weightDenum /* (i) weighting filter denuminator */
) {
int16_t k, index;
size_t k, index;
int16_t maxVal;
int16_t scale, shift;
int32_t maxValsq;
@ -64,9 +64,9 @@ void WebRtcIlbcfix_StateSearch(
/* Run the Zero-Pole filter (Ciurcular convolution) */
WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
WebRtcSpl_FilterMAFastQ12(
residualLong, sampleMa,
numerator, LPC_FILTERORDER+1, (int16_t)(iLBCenc_inst->state_short_len + LPC_FILTERORDER));
WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
LPC_FILTERORDER + 1,
iLBCenc_inst->state_short_len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(

View File

@ -24,10 +24,10 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
int16_t wordLength, /* (i) number or uint16_t to swap */
size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
) {
int k;
size_t k;
for (k = wordLength; k > 0; k--) {
*output++ = (*input >> 8)|(*input << 8);
input++;

View File

@ -27,7 +27,7 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
int16_t wordLength, /* (i) number or uint16_t to swap */
size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
);

View File

@ -47,12 +47,11 @@ int main(int argc, char* argv[])
int16_t data[BLOCKL_MAX];
uint8_t encoded_data[2 * ILBCNOOFWORDS_MAX];
int16_t decoded_data[BLOCKL_MAX];
int len;
short pli, mode;
int len_int, mode;
short pli;
int blockcount = 0;
int packetlosscount = 0;
int frameLen;
size_t len_i16s;
size_t frameLen, len, len_i16s;
int16_t speechType;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
@ -153,23 +152,23 @@ int main(int argc, char* argv[])
WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
frameLen = mode*8;
frameLen = (size_t)(mode*8);
/* loop over input blocks */
while (((int16_t)fread(data,sizeof(int16_t),frameLen,ifileid))==
frameLen) {
while (fread(data,sizeof(int16_t),frameLen,ifileid) == frameLen) {
blockcount++;
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data);
if (len < 0) {
len_int = WebRtcIlbcfix_Encode(Enc_Inst, data, frameLen, encoded_data);
if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
len = (size_t)len_int;
fprintf(stderr, "\r");
/* write byte file */
@ -204,12 +203,13 @@ int main(int argc, char* argv[])
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
(int16_t)len, decoded_data,&speechType);
if (len < 0) {
len_int=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
len, decoded_data,&speechType);
if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
}
@ -217,8 +217,7 @@ int main(int argc, char* argv[])
/* write output file */
if (fwrite(decoded_data, sizeof(int16_t), len,
ofileid) != (size_t)len) {
if (fwrite(decoded_data, sizeof(int16_t), len, ofileid) != len) {
return -1;
}
}

View File

@ -41,15 +41,15 @@ int main(int argc, char* argv[])
{
FILE *ifileid,*efileid,*ofileid, *chfileid;
short encoded_data[55], data[240], speechType;
int len;
short mode, pli;
size_t readlen;
int len_int, mode;
short pli;
size_t len, readlen;
int blockcount = 0;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
#ifdef JUNK_DATA
int i;
size_t i;
FILE *seedfile;
unsigned int random_seed = (unsigned int) time(NULL);//1196764538
#endif
@ -136,11 +136,12 @@ int main(int argc, char* argv[])
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)readlen, encoded_data);
if (len < 0) {
len_int=WebRtcIlbcfix_Encode(Enc_Inst, data, readlen, encoded_data);
if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
len = (size_t)len_int;
fprintf(stderr, "\r");
#ifdef JUNK_DATA
@ -174,12 +175,13 @@ int main(int argc, char* argv[])
/* decoding */
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data,
&speechType);
if (len < 0) {
len_int = WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len, data,
&speechType);
if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
}

View File

@ -26,9 +26,9 @@ void WebRtcIlbcfix_Window32W32(
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
int16_t N /* length to process */
size_t N /* length to process */
) {
int16_t i;
size_t i;
int16_t x_low, x_hi, y_low, y_hi;
int16_t left_shifts;
int32_t temp;

View File

@ -29,7 +29,7 @@ void WebRtcIlbcfix_Window32W32(
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
int16_t N /* length to process */
size_t N /* length to process */
);
#endif

View File

@ -23,16 +23,16 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
int WebRtcIlbcfix_XcorrCoef(
size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
int16_t subl, /* (i) dimension arrays */
int16_t searchLen, /* (i) the search lenght */
int16_t offset, /* (i) samples offset between arrays */
size_t subl, /* (i) dimension arrays */
size_t searchLen, /* (i) the search lenght */
size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
){
int k;
int16_t maxlag;
size_t k;
size_t maxlag;
int16_t pos;
int16_t max;
int16_t crossCorrScale, Energyscale;

View File

@ -26,12 +26,12 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
int WebRtcIlbcfix_XcorrCoef(
size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
int16_t subl, /* (i) dimension arrays */
int16_t searchLen, /* (i) the search lenght */
int16_t offset, /* (i) samples offset between arrays */
size_t subl, /* (i) dimension arrays */
size_t searchLen, /* (i) the search lenght */
size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
);

View File

@ -55,8 +55,8 @@ class AudioEncoderIsacT final : public AudioEncoder {
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int Num10MsFramesInNextPacket() const override;
int Max10MsFramesInAPacket() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@ -94,7 +94,7 @@ class AudioDecoderIsacT final : public AudioDecoder {
~AudioDecoderIsacT() override;
bool HasDecodePlc() const override;
int DecodePlc(int num_frames, int16_t* decoded) override;
size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
int Init() override;
int IncomingPacket(const uint8_t* payload,
size_t payload_len,

Some files were not shown because too many files have changed in this diff Show More