Remove the state_ member from AudioDecoder

The subclasses that need a state pointer should declare them---with
the right type, not void*, to get rid of all those casts.

Two small but not quite trivial cleanups are included because they
blocked the state_ removal:

  - AudioDecoderG722Stereo now inherits directly from AudioDecoder
    instead of being a subclass of AudioDecoderG722.

  - AudioDecoder now has a CngDecoderInstance member function, which
    is implemented only by AudioDecoderCng. This replaces the previous
    practice of calling AudioDecoder::state() and casting the result
    to a CNG_dec_inst*. It still isn't pretty, but now the blemish is
    plainly visible in the AudioDecoder class declaration.

R=henrik.lundin@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/24169005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7644 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
kwiberg@webrtc.org
2014-11-06 07:54:31 +00:00
parent 32022c6fb1
commit 8b2058e733
7 changed files with 86 additions and 68 deletions

View File

@ -103,17 +103,17 @@ AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels) {
// iLBC
#ifdef WEBRTC_CODEC_ILBC
AudioDecoderIlbc::AudioDecoderIlbc() {
WebRtcIlbcfix_DecoderCreate(reinterpret_cast<iLBC_decinst_t**>(&state_));
WebRtcIlbcfix_DecoderCreate(&dec_state_);
}
AudioDecoderIlbc::~AudioDecoderIlbc() {
WebRtcIlbcfix_DecoderFree(static_cast<iLBC_decinst_t*>(state_));
WebRtcIlbcfix_DecoderFree(dec_state_);
}
int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIlbcfix_Decode(static_cast<iLBC_decinst_t*>(state_),
int16_t ret = WebRtcIlbcfix_Decode(dec_state_,
reinterpret_cast<const int16_t*>(encoded),
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -122,12 +122,11 @@ int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
return WebRtcIlbcfix_NetEqPlc(static_cast<iLBC_decinst_t*>(state_),
decoded, num_frames);
return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
}
int AudioDecoderIlbc::Init() {
return WebRtcIlbcfix_Decoderinit30Ms(static_cast<iLBC_decinst_t*>(state_));
return WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
}
#endif
@ -135,19 +134,18 @@ int AudioDecoderIlbc::Init() {
#ifdef WEBRTC_CODEC_ISAC
AudioDecoderIsac::AudioDecoderIsac(int decode_sample_rate_hz) {
DCHECK(decode_sample_rate_hz == 16000 || decode_sample_rate_hz == 32000);
WebRtcIsac_Create(reinterpret_cast<ISACStruct**>(&state_));
WebRtcIsac_SetDecSampRate(static_cast<ISACStruct*>(state_),
decode_sample_rate_hz);
WebRtcIsac_Create(&isac_state_);
WebRtcIsac_SetDecSampRate(isac_state_, decode_sample_rate_hz);
}
AudioDecoderIsac::~AudioDecoderIsac() {
WebRtcIsac_Free(static_cast<ISACStruct*>(state_));
WebRtcIsac_Free(isac_state_);
}
int AudioDecoderIsac::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsac_Decode(static_cast<ISACStruct*>(state_),
int16_t ret = WebRtcIsac_Decode(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -159,7 +157,7 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len, int16_t* decoded,
SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsac_DecodeRcu(static_cast<ISACStruct*>(state_),
int16_t ret = WebRtcIsac_DecodeRcu(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -168,12 +166,11 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
}
int AudioDecoderIsac::DecodePlc(int num_frames, int16_t* decoded) {
return WebRtcIsac_DecodePlc(static_cast<ISACStruct*>(state_),
decoded, num_frames);
return WebRtcIsac_DecodePlc(isac_state_, decoded, num_frames);
}
int AudioDecoderIsac::Init() {
return WebRtcIsac_DecoderInit(static_cast<ISACStruct*>(state_));
return WebRtcIsac_DecoderInit(isac_state_);
}
int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
@ -181,7 +178,7 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
return WebRtcIsac_UpdateBwEstimate(static_cast<ISACStruct*>(state_),
return WebRtcIsac_UpdateBwEstimate(isac_state_,
payload,
static_cast<int32_t>(payload_len),
rtp_sequence_number,
@ -190,24 +187,24 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
}
int AudioDecoderIsac::ErrorCode() {
return WebRtcIsac_GetErrorCode(static_cast<ISACStruct*>(state_));
return WebRtcIsac_GetErrorCode(isac_state_);
}
#endif
// iSAC fix
#ifdef WEBRTC_CODEC_ISACFX
AudioDecoderIsacFix::AudioDecoderIsacFix() {
WebRtcIsacfix_Create(reinterpret_cast<ISACFIX_MainStruct**>(&state_));
WebRtcIsacfix_Create(&isac_state_);
}
AudioDecoderIsacFix::~AudioDecoderIsacFix() {
WebRtcIsacfix_Free(static_cast<ISACFIX_MainStruct*>(state_));
WebRtcIsacfix_Free(isac_state_);
}
int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsacfix_Decode(static_cast<ISACFIX_MainStruct*>(state_),
int16_t ret = WebRtcIsacfix_Decode(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -216,7 +213,7 @@ int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderIsacFix::Init() {
return WebRtcIsacfix_DecoderInit(static_cast<ISACFIX_MainStruct*>(state_));
return WebRtcIsacfix_DecoderInit(isac_state_);
}
int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
@ -225,32 +222,32 @@ int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
return WebRtcIsacfix_UpdateBwEstimate(
static_cast<ISACFIX_MainStruct*>(state_),
isac_state_,
payload,
static_cast<int32_t>(payload_len),
rtp_sequence_number, rtp_timestamp, arrival_timestamp);
}
int AudioDecoderIsacFix::ErrorCode() {
return WebRtcIsacfix_GetErrorCode(static_cast<ISACFIX_MainStruct*>(state_));
return WebRtcIsacfix_GetErrorCode(isac_state_);
}
#endif
// G.722
#ifdef WEBRTC_CODEC_G722
AudioDecoderG722::AudioDecoderG722() {
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_));
WebRtcG722_CreateDecoder(&dec_state_);
}
AudioDecoderG722::~AudioDecoderG722() {
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_));
WebRtcG722_FreeDecoder(dec_state_);
}
int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_),
dec_state_,
const_cast<int16_t*>(reinterpret_cast<const int16_t*>(encoded)),
static_cast<int16_t>(encoded_len), decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
@ -258,7 +255,7 @@ int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderG722::Init() {
return WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_));
return WebRtcG722_DecoderInit(dec_state_);
}
int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
@ -267,18 +264,15 @@ int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
return static_cast<int>(2 * encoded_len / channels_);
}
AudioDecoderG722Stereo::AudioDecoderG722Stereo()
: AudioDecoderG722(),
state_left_(state_), // Base member |state_| is used for left channel.
state_right_(NULL) {
AudioDecoderG722Stereo::AudioDecoderG722Stereo() {
channels_ = 2;
// |state_left_| already created by the base class AudioDecoderG722.
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_right_));
WebRtcG722_CreateDecoder(&dec_state_left_);
WebRtcG722_CreateDecoder(&dec_state_right_);
}
AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
// |state_left_| will be freed by the base class AudioDecoderG722.
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_right_));
WebRtcG722_FreeDecoder(dec_state_left_);
WebRtcG722_FreeDecoder(dec_state_right_);
}
int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
@ -289,13 +283,13 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
// Decode left and right.
int16_t ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_left_),
dec_state_left_,
reinterpret_cast<int16_t*>(encoded_deinterleaved),
static_cast<int16_t>(encoded_len / 2), decoded, &temp_type);
if (ret >= 0) {
int decoded_len = ret;
ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_right_),
dec_state_right_,
reinterpret_cast<int16_t*>(&encoded_deinterleaved[encoded_len / 2]),
static_cast<int16_t>(encoded_len / 2), &decoded[decoded_len], &temp_type);
if (ret == decoded_len) {
@ -317,11 +311,10 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderG722Stereo::Init() {
int ret = WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_right_));
if (ret != 0) {
return ret;
}
return AudioDecoderG722::Init();
int r = WebRtcG722_DecoderInit(dec_state_left_);
if (r != 0)
return r;
return WebRtcG722_DecoderInit(dec_state_right_);
}
// Split the stereo packet and place left and right channel after each other
@ -401,18 +394,17 @@ int AudioDecoderCelt::DecodePlc(int num_frames, int16_t* decoded) {
AudioDecoderOpus::AudioDecoderOpus(int num_channels) {
DCHECK(num_channels == 1 || num_channels == 2);
channels_ = num_channels;
WebRtcOpus_DecoderCreate(reinterpret_cast<OpusDecInst**>(&state_),
static_cast<int>(channels_));
WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
}
AudioDecoderOpus::~AudioDecoderOpus() {
WebRtcOpus_DecoderFree(static_cast<OpusDecInst*>(state_));
WebRtcOpus_DecoderFree(dec_state_);
}
int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcOpus_DecodeNew(static_cast<OpusDecInst*>(state_), encoded,
int16_t ret = WebRtcOpus_DecodeNew(dec_state_, encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
if (ret > 0)
@ -425,7 +417,7 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len, int16_t* decoded,
SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcOpus_DecodeFec(static_cast<OpusDecInst*>(state_), encoded,
int16_t ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
if (ret > 0)
@ -435,12 +427,12 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
}
int AudioDecoderOpus::Init() {
return WebRtcOpus_DecoderInitNew(static_cast<OpusDecInst*>(state_));
return WebRtcOpus_DecoderInitNew(dec_state_);
}
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
size_t encoded_len) {
return WebRtcOpus_DurationEst(static_cast<OpusDecInst*>(state_),
return WebRtcOpus_DurationEst(dec_state_,
encoded, static_cast<int>(encoded_len));
}
@ -458,19 +450,15 @@ bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
#endif
AudioDecoderCng::AudioDecoderCng() {
WebRtcCng_CreateDec(reinterpret_cast<CNG_dec_inst**>(&state_));
assert(state_);
CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
}
AudioDecoderCng::~AudioDecoderCng() {
if (state_) {
WebRtcCng_FreeDec(static_cast<CNG_dec_inst*>(state_));
}
WebRtcCng_FreeDec(dec_state_);
}
int AudioDecoderCng::Init() {
assert(state_);
return WebRtcCng_InitDec(static_cast<CNG_dec_inst*>(state_));
return WebRtcCng_InitDec(dec_state_);
}
} // namespace webrtc