Move Rent-A-Codec out of CodecManager

So that the two of them sit next to each other at the top level of
AudioCodingModuleImpl. CodecManager now manages the specifications for
Rent-A-Codec, rather than managing encoders directly.

BUG=webrtc:5028

Review URL: https://codereview.webrtc.org/1520283006

Cr-Commit-Position: refs/heads/master@{#11048}
This commit is contained in:
kwiberg
2015-12-16 04:19:08 -08:00
committed by Commit bot
parent a29386c26d
commit a6db4958c9
6 changed files with 167 additions and 221 deletions

View File

@ -133,7 +133,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (!HaveValidEncoder("Process")) if (!HaveValidEncoder("Process"))
return -1; return -1;
AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder(); AudioEncoder* audio_encoder = rent_a_codec_.GetEncoderStack();
// Scale the timestamp to the codec's RTP timestamp rate. // Scale the timestamp to the codec's RTP timestamp rate.
uint32_t rtp_timestamp = uint32_t rtp_timestamp =
first_frame_ ? input_data.input_timestamp first_frame_ ? input_data.input_timestamp
@ -198,19 +198,43 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
// Can be called multiple times for Codec, CNG, RED. // Can be called multiple times for Codec, CNG, RED.
int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) { int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.RegisterEncoder(send_codec); if (!codec_manager_.RegisterEncoder(send_codec)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (!sp->speech_encoder && codec_manager_.GetCodecInst()) {
// We have no speech encoder, but we have a specification for making one.
AudioEncoder* enc =
rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst());
if (!enc)
return -1;
sp->speech_encoder = enc;
}
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
} }
void AudioCodingModuleImpl::RegisterExternalSendCodec( void AudioCodingModuleImpl::RegisterExternalSendCodec(
AudioEncoder* external_speech_encoder) { AudioEncoder* external_speech_encoder) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.RegisterEncoder(external_speech_encoder); auto* sp = codec_manager_.GetStackParams();
sp->speech_encoder = external_speech_encoder;
rent_a_codec_.RentEncoderStack(sp);
} }
// Get current send codec. // Get current send codec.
rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.GetCodecInst(); auto* ci = codec_manager_.GetCodecInst();
if (ci) {
return rtc::Optional<CodecInst>(*ci);
}
auto* enc = codec_manager_.GetStackParams()->speech_encoder;
if (enc) {
return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc));
}
return rtc::Optional<CodecInst>();
} }
// Get current send frequency. // Get current send frequency.
@ -219,19 +243,21 @@ int AudioCodingModuleImpl::SendFrequency() const {
"SendFrequency()"); "SendFrequency()");
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
if (!codec_manager_.CurrentEncoder()) { const auto* enc = rent_a_codec_.GetEncoderStack();
if (!enc) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendFrequency Failed, no codec is registered"); "SendFrequency Failed, no codec is registered");
return -1; return -1;
} }
return codec_manager_.CurrentEncoder()->SampleRateHz(); return enc->SampleRateHz();
} }
void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
if (codec_manager_.CurrentEncoder()) { auto* enc = rent_a_codec_.GetEncoderStack();
codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps); if (enc) {
enc->SetTargetBitrate(bitrate_bps);
} }
} }
@ -298,10 +324,12 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
} }
// Check whether we need an up-mix or down-mix? // Check whether we need an up-mix or down-mix?
bool remix = ptr_frame->num_channels_ != const int current_num_channels =
codec_manager_.CurrentEncoder()->NumChannels(); rent_a_codec_.GetEncoderStack()->NumChannels();
const bool same_num_channels =
ptr_frame->num_channels_ == current_num_channels;
if (remix) { if (!same_num_channels) {
if (ptr_frame->num_channels_ == 1) { if (ptr_frame->num_channels_ == 1) {
if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
return -1; return -1;
@ -316,14 +344,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
const int16_t* ptr_audio = ptr_frame->data_; const int16_t* ptr_audio = ptr_frame->data_;
// For pushing data to primary, point the |ptr_audio| to correct buffer. // For pushing data to primary, point the |ptr_audio| to correct buffer.
if (codec_manager_.CurrentEncoder()->NumChannels() != if (!same_num_channels)
ptr_frame->num_channels_)
ptr_audio = input_data->buffer; ptr_audio = input_data->buffer;
input_data->input_timestamp = ptr_frame->timestamp_; input_data->input_timestamp = ptr_frame->timestamp_;
input_data->audio = ptr_audio; input_data->audio = ptr_audio;
input_data->length_per_channel = ptr_frame->samples_per_channel_; input_data->length_per_channel = ptr_frame->samples_per_channel_;
input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels(); input_data->audio_channel = current_num_channels;
return 0; return 0;
} }
@ -335,13 +362,14 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// is required, |*ptr_out| points to |in_frame|. // is required, |*ptr_out| points to |in_frame|.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) { const AudioFrame** ptr_out) {
bool resample = (in_frame.sample_rate_hz_ != const auto* enc = rent_a_codec_.GetEncoderStack();
codec_manager_.CurrentEncoder()->SampleRateHz()); const bool resample = in_frame.sample_rate_hz_ != enc->SampleRateHz();
// This variable is true if primary codec and secondary codec (if exists) // This variable is true if primary codec and secondary codec (if exists)
// are both mono and input is stereo. // are both mono and input is stereo.
bool down_mix = (in_frame.num_channels_ == 2) && // TODO(henrik.lundin): This condition should probably be
(codec_manager_.CurrentEncoder()->NumChannels() == 1); // in_frame.num_channels_ > enc->NumChannels()
const bool down_mix = in_frame.num_channels_ == 2 && enc->NumChannels() == 1;
if (!first_10ms_data_) { if (!first_10ms_data_) {
expected_in_ts_ = in_frame.timestamp_; expected_in_ts_ = in_frame.timestamp_;
@ -351,10 +379,8 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
// TODO(turajs): Do we need a warning here. // TODO(turajs): Do we need a warning here.
expected_codec_ts_ += expected_codec_ts_ +=
(in_frame.timestamp_ - expected_in_ts_) * (in_frame.timestamp_ - expected_in_ts_) *
static_cast<uint32_t>( static_cast<uint32_t>(static_cast<double>(enc->SampleRateHz()) /
(static_cast<double>( static_cast<double>(in_frame.sample_rate_hz_));
codec_manager_.CurrentEncoder()->SampleRateHz()) /
static_cast<double>(in_frame.sample_rate_hz_)));
expected_in_ts_ = in_frame.timestamp_; expected_in_ts_ = in_frame.timestamp_;
} }
@ -393,8 +419,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
dest_ptr_audio = preprocess_frame_.data_; dest_ptr_audio = preprocess_frame_.data_;
int samples_per_channel = resampler_.Resample10Msec( int samples_per_channel = resampler_.Resample10Msec(
src_ptr_audio, in_frame.sample_rate_hz_, src_ptr_audio, in_frame.sample_rate_hz_, enc->SampleRateHz(),
codec_manager_.CurrentEncoder()->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio); dest_ptr_audio);
@ -405,8 +430,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
} }
preprocess_frame_.samples_per_channel_ = preprocess_frame_.samples_per_channel_ =
static_cast<size_t>(samples_per_channel); static_cast<size_t>(samples_per_channel);
preprocess_frame_.sample_rate_hz_ = preprocess_frame_.sample_rate_hz_ = enc->SampleRateHz();
codec_manager_.CurrentEncoder()->SampleRateHz();
} }
expected_codec_ts_ += expected_codec_ts_ +=
@ -422,17 +446,21 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
bool AudioCodingModuleImpl::REDStatus() const { bool AudioCodingModuleImpl::REDStatus() const {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.red_enabled(); return codec_manager_.GetStackParams()->use_red;
} }
// Configure RED status i.e on/off. // Configure RED status i.e on/off.
int AudioCodingModuleImpl::SetREDStatus( int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
#ifdef WEBRTC_CODEC_RED #ifdef WEBRTC_CODEC_RED
bool enable_red) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCopyRed(enable_red) ? 0 : -1; if (!codec_manager_.SetCopyRed(enable_red)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
#else #else
bool /* enable_red */) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
" WEBRTC_CODEC_RED is undefined"); " WEBRTC_CODEC_RED is undefined");
return -1; return -1;
@ -445,18 +473,29 @@ int AudioCodingModuleImpl::SetREDStatus(
bool AudioCodingModuleImpl::CodecFEC() const { bool AudioCodingModuleImpl::CodecFEC() const {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.codec_fec_enabled(); return codec_manager_.GetStackParams()->use_codec_fec;
} }
int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCodecFEC(enable_codec_fec); if (!codec_manager_.SetCodecFEC(enable_codec_fec)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
if (enable_codec_fec) {
return sp->use_codec_fec ? 0 : -1;
} else {
RTC_DCHECK(!sp->use_codec_fec);
return 0;
}
} }
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
if (HaveValidEncoder("SetPacketLossRate")) { if (HaveValidEncoder("SetPacketLossRate")) {
codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate / rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate /
100.0); 100.0);
} }
return 0; return 0;
@ -471,14 +510,22 @@ int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
RTC_DCHECK_EQ(enable_dtx, enable_vad); RTC_DCHECK_EQ(enable_dtx, enable_vad);
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetVAD(enable_dtx, mode); if (!codec_manager_.SetVAD(enable_dtx, mode)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
} }
// Get VAD/DTX settings. // Get VAD/DTX settings.
int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
ACMVADMode* mode) const { ACMVADMode* mode) const {
CriticalSectionScoped lock(acm_crit_sect_.get()); CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.VAD(dtx_enabled, vad_enabled, mode); const auto* sp = codec_manager_.GetStackParams();
*dtx_enabled = *vad_enabled = sp->use_cng;
*mode = sp->vad_mode;
return 0; return 0;
} }
@ -565,9 +612,11 @@ int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
// Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does // Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does
// not own its decoder. // not own its decoder.
return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels, return receiver_.AddCodec(
codec.plfreq, codec_manager_.GetAudioDecoder(codec), *codec_index, codec.pltype, codec.channels, codec.plfreq,
codec.plname); STR_CASE_CMP(codec.plname, "isac") == 0 ? rent_a_codec_.RentIsacDecoder()
: nullptr,
codec.plname);
} }
int AudioCodingModuleImpl::RegisterExternalReceiveCodec( int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
@ -709,7 +758,7 @@ int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
FATAL(); FATAL();
return 0; return 0;
} }
return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1; return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1;
} }
// Informs Opus encoder of the maximum playback rate the receiver will render. // Informs Opus encoder of the maximum playback rate the receiver will render.
@ -720,7 +769,7 @@ int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
} }
if (!codec_manager_.CurrentEncoderIsOpus()) if (!codec_manager_.CurrentEncoderIsOpus())
return -1; return -1;
codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz); rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz);
return 0; return 0;
} }
@ -731,7 +780,7 @@ int AudioCodingModuleImpl::EnableOpusDtx() {
} }
if (!codec_manager_.CurrentEncoderIsOpus()) if (!codec_manager_.CurrentEncoderIsOpus())
return -1; return -1;
return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1; return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1;
} }
int AudioCodingModuleImpl::DisableOpusDtx() { int AudioCodingModuleImpl::DisableOpusDtx() {
@ -741,7 +790,7 @@ int AudioCodingModuleImpl::DisableOpusDtx() {
} }
if (!codec_manager_.CurrentEncoderIsOpus()) if (!codec_manager_.CurrentEncoderIsOpus())
return -1; return -1;
return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1; return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1;
} }
int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
@ -749,7 +798,7 @@ int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
} }
bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const { bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
if (!codec_manager_.CurrentEncoder()) { if (!rent_a_codec_.GetEncoderStack()) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"%s failed: No send codec is registered.", caller_name); "%s failed: No send codec is registered.", caller_name);
return false; return false;

View File

@ -249,6 +249,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
AcmReceiver receiver_; // AcmReceiver has it's own internal lock. AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_); ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_);
CodecManager codec_manager_ GUARDED_BY(acm_crit_sect_); CodecManager codec_manager_ GUARDED_BY(acm_crit_sect_);
RentACodec rent_a_codec_ GUARDED_BY(acm_crit_sect_);
// This is to keep track of CN instances where we can send DTMFs. // This is to keep track of CN instances where we can send DTMFs.
uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_); uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);

View File

@ -1619,10 +1619,6 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
.Times(AtLeast(1)) .Times(AtLeast(1))
.WillRepeatedly( .WillRepeatedly(
Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket)); Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
.Times(AtLeast(1))
.WillRepeatedly(
Invoke(&encoder, &AudioEncoderPcmU::Max10MsFramesInAPacket));
EXPECT_CALL(mock_encoder, GetTargetBitrate()) EXPECT_CALL(mock_encoder, GetTargetBitrate())
.Times(AtLeast(1)) .Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate)); .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));

View File

@ -55,14 +55,6 @@ int IsValidSendCodec(const CodecInst& send_codec) {
return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1); return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
} }
bool IsIsac(const CodecInst& codec) {
return
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
!STR_CASE_CMP(codec.plname, "isac") ||
#endif
false;
}
bool IsOpus(const CodecInst& codec) { bool IsOpus(const CodecInst& codec) {
return return
#ifdef WEBRTC_CODEC_OPUS #ifdef WEBRTC_CODEC_OPUS
@ -71,138 +63,73 @@ bool IsOpus(const CodecInst& codec) {
false; false;
} }
bool IsPcmU(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcmu");
}
bool IsPcmA(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcma");
}
bool IsPcm16B(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "l16");
}
bool IsIlbc(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_ILBC
!STR_CASE_CMP(codec.plname, "ilbc") ||
#endif
false;
}
bool IsG722(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_G722
!STR_CASE_CMP(codec.plname, "g722") ||
#endif
false;
}
bool CodecSupported(const CodecInst& codec) {
return IsOpus(codec) || IsPcmU(codec) || IsPcmA(codec) || IsPcm16B(codec) ||
IsIlbc(codec) || IsG722(codec) || IsIsac(codec);
}
const CodecInst kEmptyCodecInst = {-1, "noCodecRegistered", 0, 0, 0, 0};
} // namespace } // namespace
CodecManager::CodecManager() CodecManager::CodecManager() {
: send_codec_inst_(kEmptyCodecInst), encoder_is_opus_(false) {
thread_checker_.DetachFromThread(); thread_checker_.DetachFromThread();
} }
CodecManager::~CodecManager() = default; CodecManager::~CodecManager() = default;
int CodecManager::RegisterEncoder(const CodecInst& send_codec) { bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
RTC_DCHECK(thread_checker_.CalledOnValidThread()); RTC_DCHECK(thread_checker_.CalledOnValidThread());
int codec_id = IsValidSendCodec(send_codec); int codec_id = IsValidSendCodec(send_codec);
// Check for reported errors from function IsValidSendCodec(). // Check for reported errors from function IsValidSendCodec().
if (codec_id < 0) { if (codec_id < 0) {
return -1; return false;
} }
int dummy_id = 0; int dummy_id = 0;
switch (RentACodec::RegisterRedPayloadType( switch (RentACodec::RegisterRedPayloadType(
&codec_stack_params_.red_payload_types, send_codec)) { &codec_stack_params_.red_payload_types, send_codec)) {
case RentACodec::RegistrationResult::kOk: case RentACodec::RegistrationResult::kOk:
return 0; return true;
case RentACodec::RegistrationResult::kBadFreq: case RentACodec::RegistrationResult::kBadFreq:
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id, WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"RegisterSendCodec() failed, invalid frequency for RED" "RegisterSendCodec() failed, invalid frequency for RED"
" registration"); " registration");
return -1; return false;
case RentACodec::RegistrationResult::kSkip: case RentACodec::RegistrationResult::kSkip:
break; break;
} }
switch (RentACodec::RegisterCngPayloadType( switch (RentACodec::RegisterCngPayloadType(
&codec_stack_params_.cng_payload_types, send_codec)) { &codec_stack_params_.cng_payload_types, send_codec)) {
case RentACodec::RegistrationResult::kOk: case RentACodec::RegistrationResult::kOk:
return 0; return true;
case RentACodec::RegistrationResult::kBadFreq: case RentACodec::RegistrationResult::kBadFreq:
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id, WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"RegisterSendCodec() failed, invalid frequency for CNG" "RegisterSendCodec() failed, invalid frequency for CNG"
" registration"); " registration");
return -1; return false;
case RentACodec::RegistrationResult::kSkip: case RentACodec::RegistrationResult::kSkip:
break; break;
} }
encoder_is_opus_ = IsOpus(send_codec); if (IsOpus(send_codec)) {
if (encoder_is_opus_) {
// VAD/DTX not supported. // VAD/DTX not supported.
codec_stack_params_.use_cng = false; codec_stack_params_.use_cng = false;
} }
// Recreate the encoder if anything except the send bitrate has changed. send_codec_inst_ = rtc::Optional<CodecInst>(send_codec);
if (!CurrentEncoder() || send_codec_inst_.pltype != send_codec.pltype || codec_stack_params_.speech_encoder = nullptr; // Caller must recreate it.
STR_CASE_CMP(send_codec_inst_.plname, send_codec.plname) != 0 || return true;
send_codec_inst_.plfreq != send_codec.plfreq ||
send_codec_inst_.pacsize != send_codec.pacsize ||
send_codec_inst_.channels != send_codec.channels) {
RTC_DCHECK(CodecSupported(send_codec));
AudioEncoder* enc = rent_a_codec_.RentEncoder(send_codec);
if (!enc)
return -1;
codec_stack_params_.speech_encoder = enc;
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
RTC_DCHECK(CurrentEncoder());
}
send_codec_inst_ = send_codec;
CurrentEncoder()->SetTargetBitrate(send_codec_inst_.rate);
return 0;
} }
void CodecManager::RegisterEncoder(AudioEncoder* external_speech_encoder) { CodecInst CodecManager::ForgeCodecInst(
// Make up a CodecInst. const AudioEncoder* external_speech_encoder) {
send_codec_inst_.channels = external_speech_encoder->NumChannels(); CodecInst ci;
send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz(); ci.channels = external_speech_encoder->NumChannels();
send_codec_inst_.pacsize = rtc::CheckedDivExact( ci.plfreq = external_speech_encoder->SampleRateHz();
ci.pacsize = rtc::CheckedDivExact(
static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() * static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
send_codec_inst_.plfreq), ci.plfreq),
100); 100);
send_codec_inst_.pltype = -1; // Not valid. ci.pltype = -1; // Not valid.
send_codec_inst_.rate = -1; // Not valid. ci.rate = -1; // Not valid.
static const char kName[] = "external"; static const char kName[] = "external";
memcpy(send_codec_inst_.plname, kName, sizeof(kName)); memcpy(ci.plname, kName, sizeof(kName));
return ci;
codec_stack_params_.speech_encoder = external_speech_encoder;
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
rtc::Optional<CodecInst> CodecManager::GetCodecInst() const {
int dummy_id = 0;
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec()");
if (!CurrentEncoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec Failed, no codec is registered");
return rtc::Optional<CodecInst>();
}
return rtc::Optional<CodecInst>(send_codec_inst_);
} }
bool CodecManager::SetCopyRed(bool enable) { bool CodecManager::SetCopyRed(bool enable) {
@ -211,22 +138,18 @@ bool CodecManager::SetCopyRed(bool enable) {
"Codec internal FEC and RED cannot be co-enabled."); "Codec internal FEC and RED cannot be co-enabled.");
return false; return false;
} }
if (enable && if (enable && send_codec_inst_ &&
codec_stack_params_.red_payload_types.count(send_codec_inst_.plfreq) < codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
1) { 1) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0, WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
"Cannot enable RED at %i Hz.", send_codec_inst_.plfreq); "Cannot enable RED at %i Hz.", send_codec_inst_->plfreq);
return false; return false;
} }
if (codec_stack_params_.use_red != enable) { codec_stack_params_.use_red = enable;
codec_stack_params_.use_red = enable;
if (CurrentEncoder())
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
return true; return true;
} }
int CodecManager::SetVAD(bool enable, ACMVADMode mode) { bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
// Sanity check of the mode. // Sanity check of the mode.
RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr || RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
mode == VADVeryAggr); mode == VADVeryAggr);
@ -240,49 +163,32 @@ int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
if (enable && stereo_send) { if (enable && stereo_send) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0, WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"VAD/DTX not supported for stereo sending"); "VAD/DTX not supported for stereo sending");
codec_stack_params_.use_cng = false; return false;
return -1;
} }
// If a send codec is registered, set VAD/DTX for the codec. if (CurrentEncoderIsOpus()) {
if (IsOpus(send_codec_inst_)) { // VAD/DTX not supported, but don't fail.
// VAD/DTX not supported. enable = false;
codec_stack_params_.use_cng = false;
return 0;
} }
if (codec_stack_params_.use_cng != enable || codec_stack_params_.use_cng = enable;
codec_stack_params_.vad_mode != mode) { codec_stack_params_.vad_mode = mode;
codec_stack_params_.use_cng = enable; return true;
codec_stack_params_.vad_mode = mode;
if (codec_stack_params_.speech_encoder)
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
return 0;
} }
void CodecManager::VAD(bool* dtx_enabled, bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
bool* vad_enabled,
ACMVADMode* mode) const {
*dtx_enabled = *vad_enabled = codec_stack_params_.use_cng;
*mode = codec_stack_params_.vad_mode;
}
int CodecManager::SetCodecFEC(bool enable_codec_fec) {
if (enable_codec_fec && codec_stack_params_.use_red) { if (enable_codec_fec && codec_stack_params_.use_red) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0, WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
"Codec internal FEC and RED cannot be co-enabled."); "Codec internal FEC and RED cannot be co-enabled.");
return -1; return false;
} }
RTC_CHECK(CurrentEncoder()); codec_stack_params_.use_codec_fec = enable_codec_fec;
codec_stack_params_.use_codec_fec = return true;
CurrentEncoder()->SetFec(enable_codec_fec) && enable_codec_fec;
return codec_stack_params_.use_codec_fec == enable_codec_fec ? 0 : -1;
} }
AudioDecoder* CodecManager::GetAudioDecoder(const CodecInst& codec) { bool CodecManager::CurrentEncoderIsOpus() const {
return IsIsac(codec) ? rent_a_codec_.RentIsacDecoder() : nullptr; return send_codec_inst_ ? IsOpus(*send_codec_inst_) : false;
} }
} // namespace acm2 } // namespace acm2

View File

@ -33,46 +33,33 @@ class CodecManager final {
CodecManager(); CodecManager();
~CodecManager(); ~CodecManager();
int RegisterEncoder(const CodecInst& send_codec); // Parses the given specification. On success, returns true and updates the
// stored CodecInst and stack parameters; on error, returns false.
bool RegisterEncoder(const CodecInst& send_codec);
void RegisterEncoder(AudioEncoder* external_speech_encoder); static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
rtc::Optional<CodecInst> GetCodecInst() const; const CodecInst* GetCodecInst() const {
return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
}
const RentACodec::StackParameters* GetStackParams() const {
return &codec_stack_params_;
}
RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
bool SetCopyRed(bool enable); bool SetCopyRed(bool enable);
int SetVAD(bool enable, ACMVADMode mode); bool SetVAD(bool enable, ACMVADMode mode);
void VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const; bool SetCodecFEC(bool enable_codec_fec);
int SetCodecFEC(bool enable_codec_fec); bool CurrentEncoderIsOpus() const;
// Returns a pointer to AudioDecoder of the given codec. For iSAC, encoding
// and decoding have to be performed on a shared codec instance. By calling
// this method, we get the codec instance that ACM owns.
// If |codec| does not share an instance between encoder and decoder, returns
// null.
AudioDecoder* GetAudioDecoder(const CodecInst& codec);
bool red_enabled() const { return codec_stack_params_.use_red; }
bool codec_fec_enabled() const { return codec_stack_params_.use_codec_fec; }
AudioEncoder* CurrentEncoder() { return rent_a_codec_.GetEncoderStack(); }
const AudioEncoder* CurrentEncoder() const {
return rent_a_codec_.GetEncoderStack();
}
bool CurrentEncoderIsOpus() const { return encoder_is_opus_; }
private: private:
rtc::ThreadChecker thread_checker_; rtc::ThreadChecker thread_checker_;
CodecInst send_codec_inst_; rtc::Optional<CodecInst> send_codec_inst_;
RentACodec rent_a_codec_;
RentACodec::StackParameters codec_stack_params_; RentACodec::StackParameters codec_stack_params_;
bool encoder_is_opus_;
RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager); RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
}; };

View File

@ -11,6 +11,7 @@
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h" #include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
#include "webrtc/modules/audio_coding/acm2/codec_manager.h" #include "webrtc/modules/audio_coding/acm2/codec_manager.h"
#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
namespace webrtc { namespace webrtc {
namespace acm2 { namespace acm2 {
@ -46,20 +47,26 @@ TEST(CodecManagerTest, ExternalEncoderFec) {
} }
CodecManager cm; CodecManager cm;
EXPECT_FALSE(cm.codec_fec_enabled()); RentACodec rac;
cm.RegisterEncoder(enc0.get()); EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
EXPECT_FALSE(cm.codec_fec_enabled()); cm.GetStackParams()->speech_encoder = enc0.get();
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
enc0->Mark("A"); enc0->Mark("A");
EXPECT_EQ(0, cm.SetCodecFEC(true)); EXPECT_EQ(true, cm.SetCodecFEC(true));
EXPECT_TRUE(cm.codec_fec_enabled()); EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
cm.RegisterEncoder(enc1.get()); EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
EXPECT_TRUE(cm.codec_fec_enabled()); cm.GetStackParams()->speech_encoder = enc1.get();
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
EXPECT_EQ(0, cm.SetCodecFEC(false)); EXPECT_EQ(true, cm.SetCodecFEC(false));
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
enc0->Mark("B"); enc0->Mark("B");
EXPECT_FALSE(cm.codec_fec_enabled()); EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
cm.RegisterEncoder(enc0.get()); cm.GetStackParams()->speech_encoder = enc0.get();
EXPECT_FALSE(cm.codec_fec_enabled()); EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
} }
} // namespace acm2 } // namespace acm2