Move Rent-A-Codec out of CodecManager

So that the two of them sit next to each other at the top level of
AudioCodingModuleImpl. CodecManager now manages the specifications for
Rent-A-Codec, rather than managing encoders directly.

BUG=webrtc:5028

Review URL: https://codereview.webrtc.org/1520283006

Cr-Commit-Position: refs/heads/master@{#11048}
This commit is contained in:
kwiberg
2015-12-16 04:19:08 -08:00
committed by Commit bot
parent a29386c26d
commit a6db4958c9
6 changed files with 167 additions and 221 deletions

View File

@ -133,7 +133,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (!HaveValidEncoder("Process"))
return -1;
AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();
AudioEncoder* audio_encoder = rent_a_codec_.GetEncoderStack();
// Scale the timestamp to the codec's RTP timestamp rate.
uint32_t rtp_timestamp =
first_frame_ ? input_data.input_timestamp
@ -198,19 +198,43 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
// Can be called multiple times for Codec, CNG, RED.
int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.RegisterEncoder(send_codec);
if (!codec_manager_.RegisterEncoder(send_codec)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (!sp->speech_encoder && codec_manager_.GetCodecInst()) {
// We have no speech encoder, but we have a specification for making one.
AudioEncoder* enc =
rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst());
if (!enc)
return -1;
sp->speech_encoder = enc;
}
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
}
void AudioCodingModuleImpl::RegisterExternalSendCodec(
AudioEncoder* external_speech_encoder) {
CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.RegisterEncoder(external_speech_encoder);
auto* sp = codec_manager_.GetStackParams();
sp->speech_encoder = external_speech_encoder;
rent_a_codec_.RentEncoderStack(sp);
}
// Get current send codec.
rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.GetCodecInst();
auto* ci = codec_manager_.GetCodecInst();
if (ci) {
return rtc::Optional<CodecInst>(*ci);
}
auto* enc = codec_manager_.GetStackParams()->speech_encoder;
if (enc) {
return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc));
}
return rtc::Optional<CodecInst>();
}
// Get current send frequency.
@ -219,19 +243,21 @@ int AudioCodingModuleImpl::SendFrequency() const {
"SendFrequency()");
CriticalSectionScoped lock(acm_crit_sect_.get());
if (!codec_manager_.CurrentEncoder()) {
const auto* enc = rent_a_codec_.GetEncoderStack();
if (!enc) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendFrequency Failed, no codec is registered");
return -1;
}
return codec_manager_.CurrentEncoder()->SampleRateHz();
return enc->SampleRateHz();
}
void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
CriticalSectionScoped lock(acm_crit_sect_.get());
if (codec_manager_.CurrentEncoder()) {
codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps);
auto* enc = rent_a_codec_.GetEncoderStack();
if (enc) {
enc->SetTargetBitrate(bitrate_bps);
}
}
@ -298,10 +324,12 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
}
// Check whether we need an up-mix or down-mix?
bool remix = ptr_frame->num_channels_ !=
codec_manager_.CurrentEncoder()->NumChannels();
const int current_num_channels =
rent_a_codec_.GetEncoderStack()->NumChannels();
const bool same_num_channels =
ptr_frame->num_channels_ == current_num_channels;
if (remix) {
if (!same_num_channels) {
if (ptr_frame->num_channels_ == 1) {
if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
return -1;
@ -316,14 +344,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
const int16_t* ptr_audio = ptr_frame->data_;
// For pushing data to primary, point the |ptr_audio| to correct buffer.
if (codec_manager_.CurrentEncoder()->NumChannels() !=
ptr_frame->num_channels_)
if (!same_num_channels)
ptr_audio = input_data->buffer;
input_data->input_timestamp = ptr_frame->timestamp_;
input_data->audio = ptr_audio;
input_data->length_per_channel = ptr_frame->samples_per_channel_;
input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels();
input_data->audio_channel = current_num_channels;
return 0;
}
@ -335,13 +362,14 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// is required, |*ptr_out| points to |in_frame|.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) {
bool resample = (in_frame.sample_rate_hz_ !=
codec_manager_.CurrentEncoder()->SampleRateHz());
const auto* enc = rent_a_codec_.GetEncoderStack();
const bool resample = in_frame.sample_rate_hz_ != enc->SampleRateHz();
// This variable is true if primary codec and secondary codec (if exists)
// are both mono and input is stereo.
bool down_mix = (in_frame.num_channels_ == 2) &&
(codec_manager_.CurrentEncoder()->NumChannels() == 1);
// TODO(henrik.lundin): This condition should probably be
// in_frame.num_channels_ > enc->NumChannels()
const bool down_mix = in_frame.num_channels_ == 2 && enc->NumChannels() == 1;
if (!first_10ms_data_) {
expected_in_ts_ = in_frame.timestamp_;
@ -351,10 +379,8 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
// TODO(turajs): Do we need a warning here.
expected_codec_ts_ +=
(in_frame.timestamp_ - expected_in_ts_) *
static_cast<uint32_t>(
(static_cast<double>(
codec_manager_.CurrentEncoder()->SampleRateHz()) /
static_cast<double>(in_frame.sample_rate_hz_)));
static_cast<uint32_t>(static_cast<double>(enc->SampleRateHz()) /
static_cast<double>(in_frame.sample_rate_hz_));
expected_in_ts_ = in_frame.timestamp_;
}
@ -393,8 +419,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
dest_ptr_audio = preprocess_frame_.data_;
int samples_per_channel = resampler_.Resample10Msec(
src_ptr_audio, in_frame.sample_rate_hz_,
codec_manager_.CurrentEncoder()->SampleRateHz(),
src_ptr_audio, in_frame.sample_rate_hz_, enc->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
@ -405,8 +430,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
}
preprocess_frame_.samples_per_channel_ =
static_cast<size_t>(samples_per_channel);
preprocess_frame_.sample_rate_hz_ =
codec_manager_.CurrentEncoder()->SampleRateHz();
preprocess_frame_.sample_rate_hz_ = enc->SampleRateHz();
}
expected_codec_ts_ +=
@ -422,17 +446,21 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
bool AudioCodingModuleImpl::REDStatus() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.red_enabled();
return codec_manager_.GetStackParams()->use_red;
}
// Configure RED status i.e on/off.
int AudioCodingModuleImpl::SetREDStatus(
int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
#ifdef WEBRTC_CODEC_RED
bool enable_red) {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
if (!codec_manager_.SetCopyRed(enable_red)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
#else
bool /* enable_red */) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
" WEBRTC_CODEC_RED is undefined");
return -1;
@ -445,18 +473,29 @@ int AudioCodingModuleImpl::SetREDStatus(
bool AudioCodingModuleImpl::CodecFEC() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.codec_fec_enabled();
return codec_manager_.GetStackParams()->use_codec_fec;
}
int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCodecFEC(enable_codec_fec);
if (!codec_manager_.SetCodecFEC(enable_codec_fec)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
if (enable_codec_fec) {
return sp->use_codec_fec ? 0 : -1;
} else {
RTC_DCHECK(!sp->use_codec_fec);
return 0;
}
}
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
CriticalSectionScoped lock(acm_crit_sect_.get());
if (HaveValidEncoder("SetPacketLossRate")) {
codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate /
rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate /
100.0);
}
return 0;
@ -471,14 +510,22 @@ int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
RTC_DCHECK_EQ(enable_dtx, enable_vad);
CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetVAD(enable_dtx, mode);
if (!codec_manager_.SetVAD(enable_dtx, mode)) {
return -1;
}
auto* sp = codec_manager_.GetStackParams();
if (sp->speech_encoder)
rent_a_codec_.RentEncoderStack(sp);
return 0;
}
// Get VAD/DTX settings.
int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
ACMVADMode* mode) const {
CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.VAD(dtx_enabled, vad_enabled, mode);
const auto* sp = codec_manager_.GetStackParams();
*dtx_enabled = *vad_enabled = sp->use_cng;
*mode = sp->vad_mode;
return 0;
}
@ -565,9 +612,11 @@ int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
// Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does
// not own its decoder.
return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
codec.plfreq, codec_manager_.GetAudioDecoder(codec),
codec.plname);
return receiver_.AddCodec(
*codec_index, codec.pltype, codec.channels, codec.plfreq,
STR_CASE_CMP(codec.plname, "isac") == 0 ? rent_a_codec_.RentIsacDecoder()
: nullptr,
codec.plname);
}
int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
@ -709,7 +758,7 @@ int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
FATAL();
return 0;
}
return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1;
return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1;
}
// Informs Opus encoder of the maximum playback rate the receiver will render.
@ -720,7 +769,7 @@ int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
}
if (!codec_manager_.CurrentEncoderIsOpus())
return -1;
codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz);
rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz);
return 0;
}
@ -731,7 +780,7 @@ int AudioCodingModuleImpl::EnableOpusDtx() {
}
if (!codec_manager_.CurrentEncoderIsOpus())
return -1;
return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1;
return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1;
}
int AudioCodingModuleImpl::DisableOpusDtx() {
@ -741,7 +790,7 @@ int AudioCodingModuleImpl::DisableOpusDtx() {
}
if (!codec_manager_.CurrentEncoderIsOpus())
return -1;
return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1;
return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1;
}
int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
@ -749,7 +798,7 @@ int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
}
bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
if (!codec_manager_.CurrentEncoder()) {
if (!rent_a_codec_.GetEncoderStack()) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"%s failed: No send codec is registered.", caller_name);
return false;

View File

@ -249,6 +249,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_);
CodecManager codec_manager_ GUARDED_BY(acm_crit_sect_);
RentACodec rent_a_codec_ GUARDED_BY(acm_crit_sect_);
// This is to keep track of CN instances where we can send DTMFs.
uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);

View File

@ -1619,10 +1619,6 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
.Times(AtLeast(1))
.WillRepeatedly(
Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
.Times(AtLeast(1))
.WillRepeatedly(
Invoke(&encoder, &AudioEncoderPcmU::Max10MsFramesInAPacket));
EXPECT_CALL(mock_encoder, GetTargetBitrate())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));

View File

@ -55,14 +55,6 @@ int IsValidSendCodec(const CodecInst& send_codec) {
return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
}
bool IsIsac(const CodecInst& codec) {
return
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
!STR_CASE_CMP(codec.plname, "isac") ||
#endif
false;
}
bool IsOpus(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_OPUS
@ -71,138 +63,73 @@ bool IsOpus(const CodecInst& codec) {
false;
}
bool IsPcmU(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcmu");
}
bool IsPcmA(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcma");
}
bool IsPcm16B(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "l16");
}
bool IsIlbc(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_ILBC
!STR_CASE_CMP(codec.plname, "ilbc") ||
#endif
false;
}
bool IsG722(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_G722
!STR_CASE_CMP(codec.plname, "g722") ||
#endif
false;
}
bool CodecSupported(const CodecInst& codec) {
return IsOpus(codec) || IsPcmU(codec) || IsPcmA(codec) || IsPcm16B(codec) ||
IsIlbc(codec) || IsG722(codec) || IsIsac(codec);
}
const CodecInst kEmptyCodecInst = {-1, "noCodecRegistered", 0, 0, 0, 0};
} // namespace
CodecManager::CodecManager()
: send_codec_inst_(kEmptyCodecInst), encoder_is_opus_(false) {
CodecManager::CodecManager() {
thread_checker_.DetachFromThread();
}
CodecManager::~CodecManager() = default;
int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
int codec_id = IsValidSendCodec(send_codec);
// Check for reported errors from function IsValidSendCodec().
if (codec_id < 0) {
return -1;
return false;
}
int dummy_id = 0;
switch (RentACodec::RegisterRedPayloadType(
&codec_stack_params_.red_payload_types, send_codec)) {
case RentACodec::RegistrationResult::kOk:
return 0;
return true;
case RentACodec::RegistrationResult::kBadFreq:
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"RegisterSendCodec() failed, invalid frequency for RED"
" registration");
return -1;
return false;
case RentACodec::RegistrationResult::kSkip:
break;
}
switch (RentACodec::RegisterCngPayloadType(
&codec_stack_params_.cng_payload_types, send_codec)) {
case RentACodec::RegistrationResult::kOk:
return 0;
return true;
case RentACodec::RegistrationResult::kBadFreq:
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"RegisterSendCodec() failed, invalid frequency for CNG"
" registration");
return -1;
return false;
case RentACodec::RegistrationResult::kSkip:
break;
}
encoder_is_opus_ = IsOpus(send_codec);
if (encoder_is_opus_) {
if (IsOpus(send_codec)) {
// VAD/DTX not supported.
codec_stack_params_.use_cng = false;
}
// Recreate the encoder if anything except the send bitrate has changed.
if (!CurrentEncoder() || send_codec_inst_.pltype != send_codec.pltype ||
STR_CASE_CMP(send_codec_inst_.plname, send_codec.plname) != 0 ||
send_codec_inst_.plfreq != send_codec.plfreq ||
send_codec_inst_.pacsize != send_codec.pacsize ||
send_codec_inst_.channels != send_codec.channels) {
RTC_DCHECK(CodecSupported(send_codec));
AudioEncoder* enc = rent_a_codec_.RentEncoder(send_codec);
if (!enc)
return -1;
codec_stack_params_.speech_encoder = enc;
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
RTC_DCHECK(CurrentEncoder());
}
send_codec_inst_ = send_codec;
CurrentEncoder()->SetTargetBitrate(send_codec_inst_.rate);
return 0;
send_codec_inst_ = rtc::Optional<CodecInst>(send_codec);
codec_stack_params_.speech_encoder = nullptr; // Caller must recreate it.
return true;
}
void CodecManager::RegisterEncoder(AudioEncoder* external_speech_encoder) {
// Make up a CodecInst.
send_codec_inst_.channels = external_speech_encoder->NumChannels();
send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
send_codec_inst_.pacsize = rtc::CheckedDivExact(
CodecInst CodecManager::ForgeCodecInst(
const AudioEncoder* external_speech_encoder) {
CodecInst ci;
ci.channels = external_speech_encoder->NumChannels();
ci.plfreq = external_speech_encoder->SampleRateHz();
ci.pacsize = rtc::CheckedDivExact(
static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
send_codec_inst_.plfreq),
ci.plfreq),
100);
send_codec_inst_.pltype = -1; // Not valid.
send_codec_inst_.rate = -1; // Not valid.
ci.pltype = -1; // Not valid.
ci.rate = -1; // Not valid.
static const char kName[] = "external";
memcpy(send_codec_inst_.plname, kName, sizeof(kName));
codec_stack_params_.speech_encoder = external_speech_encoder;
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
rtc::Optional<CodecInst> CodecManager::GetCodecInst() const {
int dummy_id = 0;
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec()");
if (!CurrentEncoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec Failed, no codec is registered");
return rtc::Optional<CodecInst>();
}
return rtc::Optional<CodecInst>(send_codec_inst_);
memcpy(ci.plname, kName, sizeof(kName));
return ci;
}
bool CodecManager::SetCopyRed(bool enable) {
@ -211,22 +138,18 @@ bool CodecManager::SetCopyRed(bool enable) {
"Codec internal FEC and RED cannot be co-enabled.");
return false;
}
if (enable &&
codec_stack_params_.red_payload_types.count(send_codec_inst_.plfreq) <
if (enable && send_codec_inst_ &&
codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
1) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
"Cannot enable RED at %i Hz.", send_codec_inst_.plfreq);
"Cannot enable RED at %i Hz.", send_codec_inst_->plfreq);
return false;
}
if (codec_stack_params_.use_red != enable) {
codec_stack_params_.use_red = enable;
if (CurrentEncoder())
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
codec_stack_params_.use_red = enable;
return true;
}
int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
// Sanity check of the mode.
RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
mode == VADVeryAggr);
@ -240,49 +163,32 @@ int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
if (enable && stereo_send) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"VAD/DTX not supported for stereo sending");
codec_stack_params_.use_cng = false;
return -1;
return false;
}
// If a send codec is registered, set VAD/DTX for the codec.
if (IsOpus(send_codec_inst_)) {
// VAD/DTX not supported.
codec_stack_params_.use_cng = false;
return 0;
if (CurrentEncoderIsOpus()) {
// VAD/DTX not supported, but don't fail.
enable = false;
}
if (codec_stack_params_.use_cng != enable ||
codec_stack_params_.vad_mode != mode) {
codec_stack_params_.use_cng = enable;
codec_stack_params_.vad_mode = mode;
if (codec_stack_params_.speech_encoder)
rent_a_codec_.RentEncoderStack(&codec_stack_params_);
}
return 0;
codec_stack_params_.use_cng = enable;
codec_stack_params_.vad_mode = mode;
return true;
}
void CodecManager::VAD(bool* dtx_enabled,
bool* vad_enabled,
ACMVADMode* mode) const {
*dtx_enabled = *vad_enabled = codec_stack_params_.use_cng;
*mode = codec_stack_params_.vad_mode;
}
int CodecManager::SetCodecFEC(bool enable_codec_fec) {
bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
if (enable_codec_fec && codec_stack_params_.use_red) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
"Codec internal FEC and RED cannot be co-enabled.");
return -1;
return false;
}
RTC_CHECK(CurrentEncoder());
codec_stack_params_.use_codec_fec =
CurrentEncoder()->SetFec(enable_codec_fec) && enable_codec_fec;
return codec_stack_params_.use_codec_fec == enable_codec_fec ? 0 : -1;
codec_stack_params_.use_codec_fec = enable_codec_fec;
return true;
}
AudioDecoder* CodecManager::GetAudioDecoder(const CodecInst& codec) {
return IsIsac(codec) ? rent_a_codec_.RentIsacDecoder() : nullptr;
bool CodecManager::CurrentEncoderIsOpus() const {
return send_codec_inst_ ? IsOpus(*send_codec_inst_) : false;
}
} // namespace acm2

View File

@ -33,46 +33,33 @@ class CodecManager final {
CodecManager();
~CodecManager();
int RegisterEncoder(const CodecInst& send_codec);
// Parses the given specification. On success, returns true and updates the
// stored CodecInst and stack parameters; on error, returns false.
bool RegisterEncoder(const CodecInst& send_codec);
void RegisterEncoder(AudioEncoder* external_speech_encoder);
static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
rtc::Optional<CodecInst> GetCodecInst() const;
const CodecInst* GetCodecInst() const {
return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
}
const RentACodec::StackParameters* GetStackParams() const {
return &codec_stack_params_;
}
RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
bool SetCopyRed(bool enable);
int SetVAD(bool enable, ACMVADMode mode);
bool SetVAD(bool enable, ACMVADMode mode);
void VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const;
bool SetCodecFEC(bool enable_codec_fec);
int SetCodecFEC(bool enable_codec_fec);
// Returns a pointer to AudioDecoder of the given codec. For iSAC, encoding
// and decoding have to be performed on a shared codec instance. By calling
// this method, we get the codec instance that ACM owns.
// If |codec| does not share an instance between encoder and decoder, returns
// null.
AudioDecoder* GetAudioDecoder(const CodecInst& codec);
bool red_enabled() const { return codec_stack_params_.use_red; }
bool codec_fec_enabled() const { return codec_stack_params_.use_codec_fec; }
AudioEncoder* CurrentEncoder() { return rent_a_codec_.GetEncoderStack(); }
const AudioEncoder* CurrentEncoder() const {
return rent_a_codec_.GetEncoderStack();
}
bool CurrentEncoderIsOpus() const { return encoder_is_opus_; }
bool CurrentEncoderIsOpus() const;
private:
rtc::ThreadChecker thread_checker_;
CodecInst send_codec_inst_;
RentACodec rent_a_codec_;
rtc::Optional<CodecInst> send_codec_inst_;
RentACodec::StackParameters codec_stack_params_;
bool encoder_is_opus_;
RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
};

View File

@ -11,6 +11,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
namespace webrtc {
namespace acm2 {
@ -46,20 +47,26 @@ TEST(CodecManagerTest, ExternalEncoderFec) {
}
CodecManager cm;
EXPECT_FALSE(cm.codec_fec_enabled());
cm.RegisterEncoder(enc0.get());
EXPECT_FALSE(cm.codec_fec_enabled());
RentACodec rac;
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
cm.GetStackParams()->speech_encoder = enc0.get();
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
enc0->Mark("A");
EXPECT_EQ(0, cm.SetCodecFEC(true));
EXPECT_TRUE(cm.codec_fec_enabled());
cm.RegisterEncoder(enc1.get());
EXPECT_TRUE(cm.codec_fec_enabled());
EXPECT_EQ(true, cm.SetCodecFEC(true));
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
cm.GetStackParams()->speech_encoder = enc1.get();
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
EXPECT_EQ(0, cm.SetCodecFEC(false));
EXPECT_EQ(true, cm.SetCodecFEC(false));
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
enc0->Mark("B");
EXPECT_FALSE(cm.codec_fec_enabled());
cm.RegisterEncoder(enc0.get());
EXPECT_FALSE(cm.codec_fec_enabled());
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
cm.GetStackParams()->speech_encoder = enc0.get();
EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
}
} // namespace acm2