Remove dual stream functionality in ACM

This is old code that is no longer in use. The clean-up is part of the
ACM redesign work. With this change, there is no longer need for the
ProcessDualStream method, which is removed. Consequently, the method
ProcessSingleStream is renamed to Process.

BUG=3520
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/39489004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@8074 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org
2015-01-15 09:36:30 +00:00
parent 9ce01e6416
commit 1f67b53c88
5 changed files with 8 additions and 965 deletions

View File

@ -96,23 +96,6 @@ int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
return 0;
}
// Return 1 if timestamp t1 is less than timestamp t2, while compensating for
// wrap-around.
static int TimestampLessThan(uint32_t t1, uint32_t t2) {
uint32_t kHalfFullRange = static_cast<uint32_t>(0xFFFFFFFF) / 2;
if (t1 == t2) {
return 0;
} else if (t1 < t2) {
if (t2 - t1 < kHalfFullRange)
return 1;
return 0;
} else {
if (t1 - t2 < kHalfFullRange)
return 0;
return 1;
}
}
} // namespace
AudioCodingModuleImpl::AudioCodingModuleImpl(
@ -141,7 +124,6 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
previous_pltype_(255),
aux_rtp_header_(NULL),
receiver_initialized_(false),
secondary_send_codec_inst_(),
codec_timestamp_(expected_codec_ts_),
first_10ms_data_(false),
callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
@ -154,10 +136,6 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
strncpy(send_codec_inst_.plname, no_name, RTP_PAYLOAD_NAME_SIZE - 1);
send_codec_inst_.pltype = -1;
strncpy(secondary_send_codec_inst_.plname, no_name,
RTP_PAYLOAD_NAME_SIZE - 1);
secondary_send_codec_inst_.pltype = -1;
for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) {
codecs_[i] = NULL;
mirror_codec_idx_[i] = -1;
@ -272,218 +250,8 @@ int64_t AudioCodingModuleImpl::TimeUntilNextProcess() {
(send_codec_inst_.plfreq / 1000);
}
int32_t AudioCodingModuleImpl::Process() {
bool dual_stream;
{
CriticalSectionScoped lock(acm_crit_sect_);
dual_stream = (secondary_encoder_.get() != NULL);
}
if (dual_stream) {
return ProcessDualStream();
}
return ProcessSingleStream();
}
int AudioCodingModuleImpl::EncodeFragmentation(int fragmentation_index,
int payload_type,
uint32_t current_timestamp,
ACMGenericCodec* encoder,
uint8_t* stream) {
int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE;
uint32_t rtp_timestamp;
WebRtcACMEncodingType encoding_type;
if (encoder->Encode(stream, &len_bytes, &rtp_timestamp, &encoding_type) < 0) {
return -1;
}
assert(encoding_type == kActiveNormalEncoded);
assert(len_bytes > 0);
fragmentation_.fragmentationLength[fragmentation_index] = len_bytes;
fragmentation_.fragmentationPlType[fragmentation_index] = payload_type;
fragmentation_.fragmentationTimeDiff[fragmentation_index] =
static_cast<uint16_t>(current_timestamp - rtp_timestamp);
fragmentation_.fragmentationVectorSize++;
return len_bytes;
}
// Primary payloads are sent immediately, whereas a single secondary payload is
// buffered to be combined with "the next payload."
// Normally "the next payload" would be a primary payload. In case two
// consecutive secondary payloads are generated with no primary payload in
// between, then two secondary payloads are packed in one RED.
int AudioCodingModuleImpl::ProcessDualStream() {
uint8_t stream[kMaxNumFragmentationVectors * MAX_PAYLOAD_SIZE_BYTE];
uint32_t current_timestamp;
size_t length_bytes = 0;
RTPFragmentationHeader my_fragmentation;
uint8_t my_red_payload_type;
{
CriticalSectionScoped lock(acm_crit_sect_);
// Check if there is an encoder before.
if (!HaveValidEncoder("ProcessDualStream") ||
secondary_encoder_.get() == NULL) {
return -1;
}
ACMGenericCodec* primary_encoder = codecs_[current_send_codec_idx_];
// If primary encoder has a full frame of audio to generate payload.
bool primary_ready_to_encode = primary_encoder->HasFrameToEncode();
// If the secondary encoder has a frame of audio to generate a payload.
bool secondary_ready_to_encode = secondary_encoder_->HasFrameToEncode();
if (!primary_ready_to_encode && !secondary_ready_to_encode) {
// Nothing to send.
return 0;
}
size_t len_bytes_previous_secondary = fragmentation_.fragmentationLength[2];
assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE);
bool has_previous_payload = len_bytes_previous_secondary > 0;
uint32_t primary_timestamp = primary_encoder->EarliestTimestamp();
uint32_t secondary_timestamp = secondary_encoder_->EarliestTimestamp();
if (!has_previous_payload && !primary_ready_to_encode &&
secondary_ready_to_encode) {
// Secondary payload will be the ONLY bit-stream. Encode by secondary
// encoder, store the payload, and return. No packet is sent.
int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE;
WebRtcACMEncodingType encoding_type;
if (secondary_encoder_->Encode(red_buffer_, &len_bytes,
&last_red_timestamp_,
&encoding_type) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"ProcessDual(): Encoding of secondary encoder Failed");
return -1;
}
assert(len_bytes > 0);
assert(encoding_type == kActiveNormalEncoded);
assert(len_bytes <= MAX_PAYLOAD_SIZE_BYTE);
fragmentation_.fragmentationLength[2] = len_bytes;
return 0;
}
// Initialize with invalid but different values, so later can have sanity
// check if they are different.
int index_primary = -1;
int index_secondary = -2;
int index_previous_secondary = -3;
if (primary_ready_to_encode) {
index_primary = secondary_ready_to_encode ?
TimestampLessThan(primary_timestamp, secondary_timestamp) : 0;
index_primary += has_previous_payload ?
TimestampLessThan(primary_timestamp, last_red_timestamp_) : 0;
}
if (secondary_ready_to_encode) {
// Timestamp of secondary payload can only be less than primary payload,
// but is always larger than the timestamp of previous secondary payload.
index_secondary = primary_ready_to_encode ?
(1 - TimestampLessThan(primary_timestamp, secondary_timestamp)) : 0;
}
if (has_previous_payload) {
index_previous_secondary = primary_ready_to_encode ?
(1 - TimestampLessThan(primary_timestamp, last_red_timestamp_)) : 0;
// If secondary is ready it always have a timestamp larger than previous
// secondary. So the index is either 0 or 1.
index_previous_secondary += secondary_ready_to_encode ? 1 : 0;
}
// Indices must not be equal.
assert(index_primary != index_secondary);
assert(index_primary != index_previous_secondary);
assert(index_secondary != index_previous_secondary);
// One of the payloads has to be at position zero.
assert(index_primary == 0 || index_secondary == 0 ||
index_previous_secondary == 0);
// Timestamp of the RED payload.
if (index_primary == 0) {
current_timestamp = primary_timestamp;
} else if (index_secondary == 0) {
current_timestamp = secondary_timestamp;
} else {
current_timestamp = last_red_timestamp_;
}
fragmentation_.fragmentationVectorSize = 0;
if (has_previous_payload) {
assert(index_previous_secondary >= 0 &&
index_previous_secondary < kMaxNumFragmentationVectors);
assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE);
memcpy(&stream[index_previous_secondary * MAX_PAYLOAD_SIZE_BYTE],
red_buffer_, sizeof(stream[0]) * len_bytes_previous_secondary);
fragmentation_.fragmentationLength[index_previous_secondary] =
len_bytes_previous_secondary;
fragmentation_.fragmentationPlType[index_previous_secondary] =
secondary_send_codec_inst_.pltype;
fragmentation_.fragmentationTimeDiff[index_previous_secondary] =
static_cast<uint16_t>(current_timestamp - last_red_timestamp_);
fragmentation_.fragmentationVectorSize++;
}
if (primary_ready_to_encode) {
assert(index_primary >= 0 && index_primary < kMaxNumFragmentationVectors);
int i = index_primary * MAX_PAYLOAD_SIZE_BYTE;
if (EncodeFragmentation(index_primary, send_codec_inst_.pltype,
current_timestamp, primary_encoder,
&stream[i]) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"ProcessDualStream(): Encoding of primary encoder Failed");
return -1;
}
}
if (secondary_ready_to_encode) {
assert(index_secondary >= 0 &&
index_secondary < kMaxNumFragmentationVectors - 1);
int i = index_secondary * MAX_PAYLOAD_SIZE_BYTE;
if (EncodeFragmentation(index_secondary,
secondary_send_codec_inst_.pltype,
current_timestamp, secondary_encoder_.get(),
&stream[i]) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"ProcessDualStream(): Encoding of secondary encoder "
"Failed");
return -1;
}
}
// Copy to local variable, as it will be used outside the ACM lock.
my_fragmentation.CopyFrom(fragmentation_);
my_red_payload_type = red_pltype_;
length_bytes = 0;
for (int n = 0; n < fragmentation_.fragmentationVectorSize; n++) {
length_bytes += fragmentation_.fragmentationLength[n];
}
}
{
CriticalSectionScoped lock(callback_crit_sect_);
if (packetization_callback_ != NULL) {
// Callback with payload data, including redundant data (RED).
if (packetization_callback_->SendData(kAudioFrameSpeech,
my_red_payload_type,
current_timestamp, stream,
length_bytes,
&my_fragmentation) < 0) {
return -1;
}
}
}
{
CriticalSectionScoped lock(acm_crit_sect_);
// Now that data is sent, clean up fragmentation.
ResetFragmentation(0);
}
return 0;
}
// Process any pending tasks such as timeouts.
int AudioCodingModuleImpl::ProcessSingleStream() {
int32_t AudioCodingModuleImpl::Process() {
// Make room for 1 RED payload.
uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE];
// TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if
@ -503,7 +271,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
{
CriticalSectionScoped lock(acm_crit_sect_);
// Check if there is an encoder before.
if (!HaveValidEncoder("ProcessSingleStream")) {
if (!HaveValidEncoder("Process")) {
return -1;
}
status = codecs_[current_send_codec_idx_]->Encode(stream, &length_bytes,
@ -512,7 +280,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
if (status < 0) {
// Encode failed.
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"ProcessSingleStream(): Encoding Failed");
"Process(): Encoding Failed");
length_bytes = 0;
return -1;
} else if (status == 0) {
@ -716,15 +484,11 @@ int AudioCodingModuleImpl::InitializeSender() {
// Initialize RED.
is_first_red_ = true;
if (red_enabled_ || secondary_encoder_.get() != NULL) {
if (red_enabled_) {
if (red_buffer_ != NULL) {
memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
}
if (red_enabled_) {
ResetFragmentation(kNumRedFragmentationVectors);
} else {
ResetFragmentation(0);
}
ResetFragmentation(kNumRedFragmentationVectors);
}
return 0;
@ -820,69 +584,6 @@ static int IsValidSendCodec(const CodecInst& send_codec,
return codec_id;
}
int AudioCodingModuleImpl::RegisterSecondarySendCodec(
const CodecInst& send_codec) {
CriticalSectionScoped lock(acm_crit_sect_);
if (!send_codec_registered_) {
return -1;
}
// Primary and Secondary codecs should have the same sampling rates.
if (send_codec.plfreq != send_codec_inst_.plfreq) {
return -1;
}
int mirror_id;
int codec_id = IsValidSendCodec(send_codec, false, id_, &mirror_id);
if (codec_id < 0) {
return -1;
}
ACMGenericCodec* encoder = CreateCodec(send_codec);
WebRtcACMCodecParams codec_params;
// Initialize the codec before registering. For secondary codec VAD & DTX are
// disabled.
memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
codec_params.enable_vad = false;
codec_params.enable_dtx = false;
codec_params.vad_mode = VADNormal;
// Force initialization.
if (encoder->InitEncoder(&codec_params, true) < 0) {
// Could not initialize, therefore cannot be registered.
delete encoder;
return -1;
}
secondary_encoder_.reset(encoder);
memcpy(&secondary_send_codec_inst_, &send_codec, sizeof(send_codec));
// Disable VAD & DTX.
SetVADSafe(false, false, VADNormal);
// Cleaning.
if (red_buffer_) {
memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
}
ResetFragmentation(0);
return 0;
}
void AudioCodingModuleImpl::UnregisterSecondarySendCodec() {
CriticalSectionScoped lock(acm_crit_sect_);
if (secondary_encoder_.get() == NULL) {
return;
}
secondary_encoder_.reset();
ResetFragmentation(0);
}
int AudioCodingModuleImpl::SecondarySendCodec(
CodecInst* secondary_codec) const {
CriticalSectionScoped lock(acm_crit_sect_);
if (secondary_encoder_.get() == NULL) {
return -1;
}
memcpy(secondary_codec, &secondary_send_codec_inst_,
sizeof(secondary_send_codec_inst_));
return 0;
}
// Can be called multiple times for Codec, CNG, RED.
int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
int mirror_id;
@ -973,15 +674,6 @@ int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
is_send_codec = false;
}
// If there is secondary codec registered and the new send codec has a
// sampling rate different than that of secondary codec, then unregister the
// secondary codec.
if (secondary_encoder_.get() != NULL &&
secondary_send_codec_inst_.plfreq != send_codec.plfreq) {
secondary_encoder_.reset();
ResetFragmentation(0);
}
// If new codec, or new settings, register.
if (!is_send_codec) {
if (codecs_[mirror_id] == NULL) {
@ -1265,10 +957,6 @@ int AudioCodingModuleImpl::Add10MsData(
// Check whether we need an up-mix or down-mix?
bool remix = ptr_frame->num_channels_ != send_codec_inst_.channels;
if (secondary_encoder_.get() != NULL) {
remix = remix ||
(ptr_frame->num_channels_ != secondary_send_codec_inst_.channels);
}
// If a re-mix is required (up or down), this buffer will store re-mixed
// version of the input.
@ -1296,18 +984,6 @@ int AudioCodingModuleImpl::Add10MsData(
send_codec_inst_.channels) < 0)
return -1;
if (secondary_encoder_.get() != NULL) {
// For pushing data to secondary, point the |ptr_audio| to correct buffer.
ptr_audio = ptr_frame->data_;
if (secondary_send_codec_inst_.channels != ptr_frame->num_channels_)
ptr_audio = buffer;
if (secondary_encoder_->Add10MsData(
ptr_frame->timestamp_, ptr_audio, ptr_frame->samples_per_channel_,
secondary_send_codec_inst_.channels) < 0)
return -1;
}
return 0;
}
@ -1318,23 +994,12 @@ int AudioCodingModuleImpl::Add10MsData(
// is required, |*ptr_out| points to |in_frame|.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) {
// Primary and secondary (if exists) should have the same sampling rate.
assert((secondary_encoder_.get() != NULL) ?
secondary_send_codec_inst_.plfreq == send_codec_inst_.plfreq : true);
bool resample = (in_frame.sample_rate_hz_ != send_codec_inst_.plfreq);
// This variable is true if primary codec and secondary codec (if exists)
// are both mono and input is stereo.
bool down_mix;
if (secondary_encoder_.get() != NULL) {
down_mix = (in_frame.num_channels_ == 2) &&
(send_codec_inst_.channels == 1) &&
(secondary_send_codec_inst_.channels == 1);
} else {
down_mix = (in_frame.num_channels_ == 2) &&
(send_codec_inst_.channels == 1);
}
bool down_mix =
(in_frame.num_channels_ == 2) && (send_codec_inst_.channels == 1);
if (!first_10ms_data_) {
expected_in_ts_ = in_frame.timestamp_;
@ -1520,17 +1185,6 @@ int AudioCodingModuleImpl::SetVADSafe(bool enable_dtx,
return -1;
}
// We don't support VAD/DTX when dual-streaming is enabled, i.e.
// secondary-encoder is registered.
if ((enable_dtx || enable_vad) && secondary_encoder_.get() != NULL) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"VAD/DTX not supported when dual-streaming is enabled.");
dtx_enabled_ = false;
vad_enabled_ = false;
vad_mode_ = mode;
return -1;
}
// Store VAD/DTX settings. Values can be changed in the call to "SetVAD"
// below.
dtx_enabled_ = enable_dtx;

View File

@ -58,17 +58,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// Can be called multiple times for Codec, CNG, RED.
virtual int RegisterSendCodec(const CodecInst& send_codec) OVERRIDE;
// Register Secondary codec for dual-streaming. Dual-streaming is activated
// right after the secondary codec is registered.
virtual int RegisterSecondarySendCodec(const CodecInst& send_codec) OVERRIDE;
// Unregister the secondary codec. Dual-streaming is deactivated right after
// deregistering secondary codec.
virtual void UnregisterSecondarySendCodec() OVERRIDE;
// Get the secondary codec.
virtual int SecondarySendCodec(CodecInst* secondary_codec) const OVERRIDE;
// Get current send codec.
virtual int SendCodec(CodecInst* current_codec) const OVERRIDE;
@ -266,14 +255,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
int SetVADSafe(bool enable_dtx, bool enable_vad, ACMVADMode mode)
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
// Process buffered audio when dual-streaming is not enabled (When RED is
// enabled still this function is used.)
int ProcessSingleStream();
// Process buffered audio when dual-streaming is enabled, i.e. secondary send
// codec is registered.
int ProcessDualStream();
// Preprocessing of input audio, including resampling and down-mixing if
// required, before pushing audio into encoder's buffer.
//
@ -293,13 +274,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// to |index|.
int UpdateUponReceivingCodec(int index);
int EncodeFragmentation(int fragmentation_index,
int payload_type,
uint32_t current_timestamp,
ACMGenericCodec* encoder,
uint8_t* stream)
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
void ResetFragmentation(int vector_size)
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
@ -348,7 +322,7 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// TODO(turajs): |red_buffer_| is allocated in constructor, why having them
// as pointers and not an array. If concerned about the memory, then make a
// set-up function to allocate them only when they are going to be used, i.e.
// RED or Dual-streaming is enabled.
// RED is enabled.
uint8_t* red_buffer_ GUARDED_BY(acm_crit_sect_);
// TODO(turajs): we actually don't need |fragmentation_| as a member variable.
@ -374,8 +348,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
bool receiver_initialized_ GUARDED_BY(acm_crit_sect_);
AudioFrame preprocess_frame_ GUARDED_BY(acm_crit_sect_);
CodecInst secondary_send_codec_inst_ GUARDED_BY(acm_crit_sect_);
scoped_ptr<ACMGenericCodec> secondary_encoder_ GUARDED_BY(acm_crit_sect_);
uint32_t codec_timestamp_ GUARDED_BY(acm_crit_sect_);
bool first_10ms_data_ GUARDED_BY(acm_crit_sect_);

View File

@ -244,33 +244,6 @@ class AudioCodingModule: public Module {
//
virtual int32_t RegisterSendCodec(const CodecInst& send_codec) = 0;
///////////////////////////////////////////////////////////////////////////
// int RegisterSecondarySendCodec()
// Register a secondary encoder to enable dual-streaming. If a secondary
// codec is already registered, it will be removed before the new one is
// registered.
//
// Note: The secondary encoder will be unregistered if a primary codec
// is set with a sampling rate which does not match that of the existing
// secondary codec.
//
// Input:
// -send_codec : Parameters of the codec to be registered, c.f.
// common_types.h for the definition of
// CodecInst.
//
// Return value:
// -1 if failed to register,
// 0 if succeeded.
//
virtual int RegisterSecondarySendCodec(const CodecInst& send_codec) = 0;
///////////////////////////////////////////////////////////////////////////
// void UnregisterSecondarySendCodec()
// Unregister the secondary encoder to disable dual-streaming.
//
virtual void UnregisterSecondarySendCodec() = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t SendCodec()
// Get parameters for the codec currently registered as send codec.
@ -284,19 +257,6 @@ class AudioCodingModule: public Module {
//
virtual int32_t SendCodec(CodecInst* current_send_codec) const = 0;
///////////////////////////////////////////////////////////////////////////
// int SecondarySendCodec()
// Get the codec parameters for the current secondary send codec.
//
// Output:
// -secondary_codec : parameters of the secondary send codec.
//
// Return value:
// -1 if failed to get send codec,
// 0 if succeeded.
//
virtual int SecondarySendCodec(CodecInst* secondary_codec) const = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t SendFrequency()
// Get the sampling frequency of the current encoder in Hertz.

View File

@ -1,542 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
class DualStreamTest : public AudioPacketizationCallback,
public ::testing::Test {
protected:
DualStreamTest();
~DualStreamTest();
void RunTest(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate,
bool start_in_sync,
int num_channels_input);
void ApiTest();
virtual int32_t SendData(
FrameType frameType,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) OVERRIDE;
void Perform(bool start_in_sync, int num_channels_input);
void InitializeSender(int frame_size_primary_samples,
int num_channels_primary, int sampling_rate);
void PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary, int sampling_rate);
void Validate(bool start_in_sync, size_t tolerance);
bool EqualTimestamp(int stream, int position);
size_t EqualPayloadLength(int stream, int position);
bool EqualPayloadData(int stream, int position);
static const int kMaxNumStoredPayloads = 2;
enum {
kPrimary = 0,
kSecondary,
kMaxNumStreams
};
scoped_ptr<AudioCodingModule> acm_dual_stream_;
scoped_ptr<AudioCodingModule> acm_ref_primary_;
scoped_ptr<AudioCodingModule> acm_ref_secondary_;
CodecInst primary_encoder_;
CodecInst secondary_encoder_;
CodecInst red_encoder_;
int payload_ref_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_dual_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
size_t payload_len_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
size_t payload_len_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
uint8_t payload_data_ref_[kMaxNumStreams][MAX_PAYLOAD_SIZE_BYTE
* kMaxNumStoredPayloads];
uint8_t payload_data_dual_[kMaxNumStreams][MAX_PAYLOAD_SIZE_BYTE
* kMaxNumStoredPayloads];
int num_received_payloads_dual_[kMaxNumStreams];
int num_received_payloads_ref_[kMaxNumStreams];
int num_compared_payloads_[kMaxNumStreams];
uint32_t last_timestamp_[kMaxNumStreams];
bool received_payload_[kMaxNumStreams];
};
DualStreamTest::DualStreamTest()
: acm_dual_stream_(AudioCodingModule::Create(0)),
acm_ref_primary_(AudioCodingModule::Create(1)),
acm_ref_secondary_(AudioCodingModule::Create(2)),
payload_ref_is_stored_(),
payload_dual_is_stored_(),
timestamp_ref_(),
num_received_payloads_dual_(),
num_received_payloads_ref_(),
num_compared_payloads_(),
last_timestamp_(),
received_payload_() {}
DualStreamTest::~DualStreamTest() {}
void DualStreamTest::PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary,
int sampling_rate) {
CodecInst my_codec;
// Invalid values. To check later on if the codec are found in the database.
primary_encoder_.pltype = -1;
secondary_encoder_.pltype = -1;
red_encoder_.pltype = -1;
for (int n = 0; n < AudioCodingModule::NumberOfCodecs(); n++) {
AudioCodingModule::Codec(n, &my_codec);
if (strcmp(my_codec.plname, "ISAC") == 0
&& my_codec.plfreq == sampling_rate) {
my_codec.rate = 32000;
my_codec.pacsize = 30 * sampling_rate / 1000;
memcpy(&secondary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "L16") == 0
&& my_codec.channels == num_channels_primary
&& my_codec.plfreq == sampling_rate) {
my_codec.pacsize = frame_size_primary_ms * sampling_rate / 1000;
memcpy(&primary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "red") == 0) {
memcpy(&red_encoder_, &my_codec, sizeof(my_codec));
}
}
ASSERT_GE(primary_encoder_.pltype, 0);
ASSERT_GE(secondary_encoder_.pltype, 0);
ASSERT_GE(red_encoder_.pltype, 0);
}
void DualStreamTest::InitializeSender(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate) {
ASSERT_TRUE(acm_dual_stream_.get() != NULL);
ASSERT_TRUE(acm_ref_primary_.get() != NULL);
ASSERT_TRUE(acm_ref_secondary_.get() != NULL);
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(0, acm_ref_primary_->InitializeSender());
ASSERT_EQ(0, acm_ref_secondary_->InitializeSender());
PopulateCodecInstances(frame_size_primary_samples, num_channels_primary,
sampling_rate);
ASSERT_EQ(0, acm_ref_primary_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_ref_secondary_->RegisterSendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_ref_primary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_ref_secondary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_dual_stream_->RegisterTransportCallback(this));
}
void DualStreamTest::Perform(bool start_in_sync, int num_channels_input) {
PCMFile pcm_file;
std::string file_name = test::ResourcePath(
(num_channels_input == 1) ?
"audio_coding/testfile32kHz" : "audio_coding/teststereo32kHz",
"pcm");
pcm_file.Open(file_name, 32000, "rb");
pcm_file.ReadStereo(num_channels_input == 2);
AudioFrame audio_frame;
size_t tolerance = 0;
if (num_channels_input == 2 && primary_encoder_.channels == 2
&& secondary_encoder_.channels == 1) {
tolerance = 12;
}
if (!start_in_sync) {
pcm_file.Read10MsData(audio_frame);
// Unregister secondary codec and feed only the primary
acm_dual_stream_->UnregisterSecondarySendCodec();
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
}
const int kNumFramesToProcess = 100;
int frame_cntr = 0;
while (!pcm_file.EndOfFile() && frame_cntr < kNumFramesToProcess) {
pcm_file.Read10MsData(audio_frame);
frame_cntr++;
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_secondary_->Add10MsData(audio_frame));
EXPECT_GE(acm_dual_stream_->Process(), 0);
EXPECT_GE(acm_ref_primary_->Process(), 0);
EXPECT_GE(acm_ref_secondary_->Process(), 0);
if (start_in_sync || frame_cntr > 7) {
// If we haven't started in sync the first few audio frames might
// slightly differ due to the difference in the state of the resamplers
// of dual-ACM and reference-ACM.
Validate(start_in_sync, tolerance);
} else {
// SendData stores the payloads, if we are not comparing we have to free
// the space by resetting these flags.
memset(payload_ref_is_stored_, 0, sizeof(payload_ref_is_stored_));
memset(payload_dual_is_stored_, 0, sizeof(payload_dual_is_stored_));
}
}
pcm_file.Close();
// Make sure that number of received payloads match. In case of secondary
// encoder, the dual-stream might deliver one lesser payload. The reason is
// that some secondary payloads are stored to be sent with a payload generated
// later and the input file may end before the "next" payload .
EXPECT_EQ(num_received_payloads_ref_[kPrimary],
num_received_payloads_dual_[kPrimary]);
EXPECT_TRUE(
num_received_payloads_ref_[kSecondary]
== num_received_payloads_dual_[kSecondary]
|| num_received_payloads_ref_[kSecondary]
== (num_received_payloads_dual_[kSecondary] + 1));
// Make sure all received payloads are compared.
if (start_in_sync) {
EXPECT_EQ(num_received_payloads_dual_[kPrimary],
num_compared_payloads_[kPrimary]);
EXPECT_EQ(num_received_payloads_dual_[kSecondary],
num_compared_payloads_[kSecondary]);
} else {
// In asynchronous test we don't compare couple of first frames, so we
// should account for them in our counting.
EXPECT_GE(num_compared_payloads_[kPrimary],
num_received_payloads_dual_[kPrimary] - 4);
EXPECT_GE(num_compared_payloads_[kSecondary],
num_received_payloads_dual_[kSecondary] - 4);
}
}
bool DualStreamTest::EqualTimestamp(int stream_index, int position) {
if (timestamp_dual_[stream_index][position]
!= timestamp_ref_[stream_index][position]) {
return false;
}
return true;
}
size_t DualStreamTest::EqualPayloadLength(int stream_index, int position) {
size_t dual = payload_len_dual_[stream_index][position];
size_t ref = payload_len_ref_[stream_index][position];
return (dual > ref) ? (dual - ref) : (ref - dual);
}
bool DualStreamTest::EqualPayloadData(int stream_index, int position) {
assert(
payload_len_dual_[stream_index][position]
== payload_len_ref_[stream_index][position]);
int offset = position * MAX_PAYLOAD_SIZE_BYTE;
for (size_t n = 0; n < payload_len_dual_[stream_index][position]; n++) {
if (payload_data_dual_[stream_index][offset + n]
!= payload_data_ref_[stream_index][offset + n]) {
return false;
}
}
return true;
}
void DualStreamTest::Validate(bool start_in_sync, size_t tolerance) {
for (int stream_index = 0; stream_index < kMaxNumStreams; stream_index++) {
size_t my_tolerance = stream_index == kPrimary ? 0 : tolerance;
for (int position = 0; position < kMaxNumStoredPayloads; position++) {
if (payload_ref_is_stored_[stream_index][position] == 1
&& payload_dual_is_stored_[stream_index][position] == 1) {
// Check timestamps only if codecs started in sync or it is primary.
if (start_in_sync || stream_index == 0)
EXPECT_TRUE(EqualTimestamp(stream_index, position));
EXPECT_LE(EqualPayloadLength(stream_index, position), my_tolerance);
if (my_tolerance == 0)
EXPECT_TRUE(EqualPayloadData(stream_index, position));
num_compared_payloads_[stream_index]++;
payload_ref_is_stored_[stream_index][position] = 0;
payload_dual_is_stored_[stream_index][position] = 0;
}
}
}
}
int32_t DualStreamTest::SendData(FrameType frameType, uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
int position;
int stream_index;
if (payload_type == red_encoder_.pltype) {
if (fragmentation == NULL) {
assert(false);
return -1;
}
// As the oldest payloads are in the higher indices of fragmentation,
// to be able to check the increment of timestamps are correct we loop
// backward.
for (int n = fragmentation->fragmentationVectorSize - 1; n >= 0; --n) {
if (fragmentation->fragmentationPlType[n] == primary_encoder_.pltype) {
// Received primary payload from dual stream.
stream_index = kPrimary;
} else if (fragmentation->fragmentationPlType[n]
== secondary_encoder_.pltype) {
// Received secondary payload from dual stream.
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_dual_[stream_index]++;
if (payload_dual_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_dual_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_dual_[stream_index][position] = timestamp
- fragmentation->fragmentationTimeDiff[n];
payload_len_dual_[stream_index][position] = fragmentation
->fragmentationLength[n];
memcpy(
&payload_data_dual_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
&payload_data[fragmentation->fragmentationOffset[n]],
fragmentation->fragmentationLength[n]);
payload_dual_is_stored_[stream_index][position] = 1;
// Check if timestamps are incremented correctly.
if (received_payload_[stream_index]) {
int t = timestamp_dual_[stream_index][position]
- last_timestamp_[stream_index];
if ((stream_index == kPrimary) && (t != primary_encoder_.pacsize)) {
assert(false);
return -1;
}
if ((stream_index == kSecondary) && (t != secondary_encoder_.pacsize)) {
assert(false);
return -1;
}
} else {
received_payload_[stream_index] = true;
}
last_timestamp_[stream_index] = timestamp_dual_[stream_index][position];
}
} else {
if (fragmentation != NULL) {
assert(false);
return -1;
}
if (payload_type == primary_encoder_.pltype) {
stream_index = kPrimary;
} else if (payload_type == secondary_encoder_.pltype) {
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_ref_[stream_index]++;
if (payload_ref_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_ref_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_ref_[stream_index][position] = timestamp;
payload_len_ref_[stream_index][position] = payload_size;
memcpy(&payload_data_ref_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
payload_data, payload_size);
payload_ref_is_stored_[stream_index][position] = 1;
}
return 0;
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInput_StereoPrimaryWb20Ms)) {
InitializeSender(20, 2, 16000);
Perform(true, 1);
}
// Mono input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimarySwb20Ms)) {
InitializeSender(20, 1, 32000);
Perform(true, 1);
}
// Mono input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputStereoPrimarySwb20Ms)) {
InitializeSender(20, 2, 32000);
Perform(true, 1);
}
// Mono input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 40 ms frame
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputStereoPrimaryWb40Ms)) {
InitializeSender(40, 2, 16000);
Perform(true, 1);
}
// Stereo input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimaryWb20Ms)) {
InitializeSender(20, 2, 16000);
Perform(true, 2);
}
// Stereo input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimarySwb20Ms)) {
InitializeSender(20, 1, 32000);
Perform(true, 2);
}
// Stereo input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimarySwb20Ms)) {
InitializeSender(20, 2, 32000);
Perform(true, 2);
}
// Stereo input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimaryWb40Ms)) {
InitializeSender(40, 2, 16000);
Perform(true, 2);
}
// Asynchronous test, ACM is fed with data then secondary coder is registered.
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactAsyncMonoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(false, 1);
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactAsyncMonoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(false, 1);
}
TEST_F(DualStreamTest, DISABLED_ON_ANDROID(Api)) {
PopulateCodecInstances(20, 1, 16000);
CodecInst my_codec;
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
// Not allowed to register secondary codec if primary is not registered yet.
ASSERT_EQ(-1,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Make sure vad is activated.
bool vad_status;
bool dtx_status;
ACMVADMode vad_mode;
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADNormal, vad_mode);
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, memcmp(&my_codec, &secondary_encoder_, sizeof(my_codec)));
// Test if VAD get disabled after registering secondary codec.
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_FALSE(vad_status);
EXPECT_FALSE(dtx_status);
// Activating VAD should fail.
ASSERT_EQ(-1, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Unregister secondary encoder and it should be possible to activate VAD.
acm_dual_stream_->UnregisterSecondarySendCodec();
// Should fail.
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADVeryAggr));
// Make sure VAD is activated.
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADVeryAggr, vad_mode);
}
} // namespace webrtc