Delete a chain of methods in ViE, VoE and ACM

The end goal is to remove AcmReceiver::SetInitialDelay. This change is
in preparation for that goal. It turns out that
AcmReceiver::SetInitialDelay was only invoked through the following
call chain, where each method in the chain is never referenced from
anywhere else (except from tests in some cases):

ViEChannel::SetReceiverBufferingMode
-> ViESyncModule::SetTargetBufferingDelay
-> VoEVideoSync::SetInitialPlayoutDelay
-> Channel::SetInitialPlayoutDelay
-> AudioCodingModule::SetInitialPlayoutDelay
-> AcmReceiver::SetInitialDelay

The start of the chain, ViEChannel::SetReceiverBufferingMode was never
referenced.

This change deletes all the methods above except
AcmReceiver::SetInitialDelay itself, which will be handled in a
follow-up change.

BUG=webrtc:3520

Review URL: https://codereview.webrtc.org/1421013006

Cr-Commit-Position: refs/heads/master@{#10471}
This commit is contained in:
henrik.lundin
2015-11-01 11:43:30 -08:00
committed by Commit bot
parent e502bbe138
commit 74f0f3551e
18 changed files with 1 additions and 341 deletions

View File

@ -765,17 +765,6 @@ int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
return receiver_.RemoveCodec(payload_type);
}
int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
{
CriticalSectionScoped lock(acm_crit_sect_.get());
// Initialize receiver, if it is not initialized. Otherwise, initial delay
// is reset upon initialization of the receiver.
if (!receiver_initialized_)
InitializeReceiverSafe();
}
return receiver_.SetInitialDelay(delay_ms);
}
int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
return receiver_.EnableNack(max_nack_list_size);
}

View File

@ -150,10 +150,6 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
// Smallest latency NetEq will maintain.
int LeastRequiredDelayMs() const override;
// Impose an initial delay on playout. ACM plays silence until |delay_ms|
// audio is accumulated in NetEq buffer, then starts decoding payloads.
int SetInitialPlayoutDelay(int delay_ms) override;
// Get playout timestamp.
int PlayoutTimestamp(uint32_t* timestamp) override;

View File

@ -249,30 +249,6 @@ TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(InitializedToZero)) {
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
// should result in generating silence, check the associated field.
TEST_F(AudioCodingModuleTestOldApi,
DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
RegisterCodec();
AudioDecodingCallStats stats;
const int kInitialDelay = 100;
acm_->SetInitialPlayoutDelay(kInitialDelay);
int num_calls = 0;
for (int time_ms = 0; time_ms < kInitialDelay;
time_ms += kFrameSizeMs, ++num_calls) {
InsertPacketAndPullAudio();
}
acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Insert some packets and pull audio. Check statistics are valid. Then,
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.

View File

@ -704,23 +704,6 @@ class AudioCodingModule {
virtual int32_t GetNetworkStatistics(
NetworkStatistics* network_statistics) = 0;
//
// Set an initial delay for playout.
// An initial delay yields ACM playout silence until equivalent of |delay_ms|
// audio payload is accumulated in NetEq jitter. Thereafter, ACM pulls audio
// from NetEq in its regular fashion, and the given delay is maintained
// through out the call, unless channel conditions yield to a higher jitter
// buffer delay.
//
// Input:
// -delay_ms : delay in milliseconds.
//
// Return values:
// -1 if failed to set the delay.
// 0 if delay is set successfully.
//
virtual int SetInitialPlayoutDelay(int delay_ms) = 0;
//
// Enable NACK and set the maximum size of the NACK list. If NACK is already
// enable then the maximum NACK list size is modified accordingly.

View File

@ -33,7 +33,6 @@ DEFINE_int32(sample_rate_hz, 16000, "Sampling rate in Hertz.");
DEFINE_int32(num_channels, 1, "Number of Channels.");
DEFINE_string(input_file, "", "Input file, PCM16 32 kHz, optional.");
DEFINE_int32(delay, 0, "Delay in millisecond.");
DEFINE_int32(init_delay, 0, "Initial delay in millisecond.");
DEFINE_bool(dtx, false, "Enable DTX at the sender side.");
DEFINE_bool(packet_loss, false, "Apply packet loss, c.f. Channel{.cc, .h}.");
DEFINE_bool(fec, false, "Use Forward Error Correction (FEC).");
@ -89,10 +88,6 @@ class DelayTest {
"Couldn't initialize receiver.\n";
ASSERT_EQ(0, acm_b_->InitializeReceiver()) <<
"Couldn't initialize receiver.\n";
if (FLAGS_init_delay > 0) {
ASSERT_EQ(0, acm_b_->SetInitialPlayoutDelay(FLAGS_init_delay)) <<
"Failed to set initial delay.\n";
}
if (FLAGS_delay > 0) {
ASSERT_EQ(0, acm_b_->SetMinimumPlayoutDelay(FLAGS_delay)) <<
@ -172,7 +167,7 @@ class DelayTest {
void OpenOutFile(const char* output_id) {
std::stringstream file_stream;
file_stream << "delay_test_" << FLAGS_codec << "_" << FLAGS_sample_rate_hz
<< "Hz" << "_" << FLAGS_init_delay << "ms_" << FLAGS_delay << "ms.pcm";
<< "Hz" << "_" << FLAGS_delay << "ms.pcm";
std::cout << "Output file: " << file_stream.str() << std::endl << std::endl;
std::string file_name = webrtc::test::OutputPath() + file_stream.str();
out_file_b_.Open(file_name.c_str(), 32000, "wb");

View File

@ -1,175 +0,0 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
#include <assert.h>
#include <math.h>
#include <iostream>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
namespace {
double FrameRms(AudioFrame& frame) {
size_t samples = frame.num_channels_ * frame.samples_per_channel_;
double rms = 0;
for (size_t n = 0; n < samples; ++n)
rms += frame.data_[n] * frame.data_[n];
rms /= samples;
rms = sqrt(rms);
return rms;
}
}
class InitialPlayoutDelayTest : public ::testing::Test {
protected:
InitialPlayoutDelayTest()
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
channel_a2b_(NULL) {}
~InitialPlayoutDelayTest() {
if (channel_a2b_ != NULL) {
delete channel_a2b_;
channel_a2b_ = NULL;
}
}
void SetUp() {
ASSERT_TRUE(acm_a_.get() != NULL);
ASSERT_TRUE(acm_b_.get() != NULL);
EXPECT_EQ(0, acm_b_->InitializeReceiver());
EXPECT_EQ(0, acm_a_->InitializeReceiver());
// Register all L16 codecs in receiver.
CodecInst codec;
const int kFsHz[3] = { 8000, 16000, 32000 };
const int kChannels[2] = { 1, 2 };
for (int n = 0; n < 3; ++n) {
for (int k = 0; k < 2; ++k) {
AudioCodingModule::Codec("L16", &codec, kFsHz[n], kChannels[k]);
acm_b_->RegisterReceiveCodec(codec);
}
}
// Create and connect the channel
channel_a2b_ = new Channel;
acm_a_->RegisterTransportCallback(channel_a2b_);
channel_a2b_->RegisterReceiverACM(acm_b_.get());
}
void NbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 8000, 1);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void WbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 16000, 1);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void SwbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 32000, 1);
codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
}
void NbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 8000, 2);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void WbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 16000, 2);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void SwbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 32000, 2);
codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
}
private:
void Run(CodecInst codec, int initial_delay_ms) {
AudioFrame in_audio_frame;
AudioFrame out_audio_frame;
int num_frames = 0;
const int kAmp = 10000;
in_audio_frame.sample_rate_hz_ = codec.plfreq;
in_audio_frame.num_channels_ = codec.channels;
in_audio_frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
size_t samples = in_audio_frame.num_channels_ *
in_audio_frame.samples_per_channel_;
for (size_t n = 0; n < samples; ++n) {
in_audio_frame.data_[n] = kAmp;
}
uint32_t timestamp = 0;
double rms = 0;
ASSERT_EQ(0, acm_a_->RegisterSendCodec(codec));
acm_b_->SetInitialPlayoutDelay(initial_delay_ms);
while (rms < kAmp / 2) {
in_audio_frame.timestamp_ = timestamp;
timestamp += static_cast<uint32_t>(in_audio_frame.samples_per_channel_);
ASSERT_GE(acm_a_->Add10MsData(in_audio_frame), 0);
ASSERT_EQ(0, acm_b_->PlayoutData10Ms(codec.plfreq, &out_audio_frame));
rms = FrameRms(out_audio_frame);
++num_frames;
}
ASSERT_GE(num_frames * 10, initial_delay_ms);
ASSERT_LE(num_frames * 10, initial_delay_ms + 100);
}
rtc::scoped_ptr<AudioCodingModule> acm_a_;
rtc::scoped_ptr<AudioCodingModule> acm_b_;
Channel* channel_a2b_;
};
TEST_F(InitialPlayoutDelayTest, NbMono) { NbMono(); }
TEST_F(InitialPlayoutDelayTest, WbMono) { WbMono(); }
TEST_F(InitialPlayoutDelayTest, SwbMono) { SwbMono(); }
TEST_F(InitialPlayoutDelayTest, NbStereo) { NbStereo(); }
TEST_F(InitialPlayoutDelayTest, WbStereo) { WbStereo(); }
TEST_F(InitialPlayoutDelayTest, SwbStereo) { SwbStereo(); }
} // namespace webrtc

View File

@ -42,7 +42,6 @@ DEFINE_string(receive_ts, "last_rec_timestamp", "Receive timestamp file");
DEFINE_string(delay, "", "Log for delay.");
// Other setups
DEFINE_int32(init_delay, 0, "Initial delay.");
DEFINE_bool(verbose, false, "Verbosity.");
DEFINE_double(loss_rate, 0, "Rate of packet loss < 1");
@ -122,9 +121,6 @@ class InsertPacketWithTiming {
<< " Hz." << std::endl;
// Other setups
if (FLAGS_init_delay > 0)
EXPECT_EQ(0, receive_acm_->SetInitialPlayoutDelay(FLAGS_init_delay));
if (FLAGS_loss_rate > 0)
loss_threshold_ = RAND_MAX * FLAGS_loss_rate;
else

View File

@ -432,7 +432,6 @@
'audio_coding/main/test/TimedTrace.cc',
'audio_coding/main/test/TwoWayCommunication.cc',
'audio_coding/main/test/iSACTest.cc',
'audio_coding/main/test/initial_delay_unittest.cc',
'audio_coding/main/test/opus_test.cc',
'audio_coding/main/test/target_delay_unittest.cc',
'audio_coding/main/test/utility.cc',

View File

@ -441,7 +441,6 @@ class FakeVoiceEngine final : public VoiceEngineImpl {
// VoEVideoSync
int GetPlayoutBufferSize(int& buffer_ms) override { return -1; }
int SetMinimumPlayoutDelay(int channel, int delay_ms) override { return -1; }
int SetInitialPlayoutDelay(int channel, int delay_ms) override { return -1; }
int GetDelayEstimate(int channel,
int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) override {

View File

@ -40,7 +40,6 @@ namespace webrtc {
const int kMaxDecodeWaitTimeMs = 50;
static const int kMaxTargetDelayMs = 10000;
static const float kMaxIncompleteTimeMultiplier = 3.5f;
// Helper class receiving statistics callbacks.
class ChannelStatsObserver : public CallStatsObserver {
@ -575,33 +574,6 @@ int ViEChannel::SetSenderBufferingMode(int target_delay_ms) {
return 0;
}
int ViEChannel::SetReceiverBufferingMode(int target_delay_ms) {
if ((target_delay_ms < 0) || (target_delay_ms > kMaxTargetDelayMs)) {
LOG(LS_ERROR) << "Invalid receive buffer delay value.";
return -1;
}
int max_nack_list_size;
int max_incomplete_time_ms;
if (target_delay_ms == 0) {
// Real-time mode - restore default settings.
max_nack_reordering_threshold_ = kMaxPacketAgeToNack;
max_nack_list_size = kMaxNackListSize;
max_incomplete_time_ms = 0;
} else {
max_nack_list_size = 3 * GetRequiredNackListSize(target_delay_ms) / 4;
max_nack_reordering_threshold_ = max_nack_list_size;
// Calculate the max incomplete time and round to int.
max_incomplete_time_ms = static_cast<int>(kMaxIncompleteTimeMultiplier *
target_delay_ms + 0.5f);
}
vcm_->SetNackSettings(max_nack_list_size, max_nack_reordering_threshold_,
max_incomplete_time_ms);
vcm_->SetMinReceiverDelay(target_delay_ms);
if (vie_sync_.SetTargetBufferingDelay(target_delay_ms) < 0)
return -1;
return 0;
}
int ViEChannel::GetRequiredNackListSize(int target_delay_ms) {
// The max size of the nack list should be large enough to accommodate the
// the number of packets (frames) resulting from the increased delay.

View File

@ -108,7 +108,6 @@ class ViEChannel : public VCMFrameTypeCallback,
int payload_type_fec);
bool IsSendingFecEnabled();
int SetSenderBufferingMode(int target_delay_ms);
int SetReceiverBufferingMode(int target_delay_ms);
int SetSendTimestampOffsetStatus(bool enable, int id);
int SetReceiveTimestampOffsetStatus(bool enable, int id);
int SetSendAbsoluteSendTimeStatus(bool enable, int id);

View File

@ -171,18 +171,4 @@ int32_t ViESyncModule::Process() {
return 0;
}
int ViESyncModule::SetTargetBufferingDelay(int target_delay_ms) {
CriticalSectionScoped cs(data_cs_.get());
if (!voe_sync_interface_) {
LOG(LS_ERROR) << "voe_sync_interface_ NULL, can't set playout delay.";
return -1;
}
sync_->SetTargetBufferingDelay(target_delay_ms);
// Setting initial playout delay to voice engine (video engine is updated via
// the VCM interface).
voe_sync_interface_->SetInitialPlayoutDelay(voe_channel_id_,
target_delay_ms);
return 0;
}
} // namespace webrtc

View File

@ -40,9 +40,6 @@ class ViESyncModule : public Module {
int VoiceChannel();
// Set target delay for buffering mode (0 = real-time mode).
int SetTargetBufferingDelay(int target_delay_ms);
// Implements Module.
int64_t TimeUntilNextProcess() override;
int32_t Process() override;

View File

@ -3414,29 +3414,6 @@ int Channel::LeastRequiredDelayMs() const {
return audio_coding_->LeastRequiredDelayMs();
}
int Channel::SetInitialPlayoutDelay(int delay_ms)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetInitialPlayoutDelay()");
if ((delay_ms < kVoiceEngineMinMinPlayoutDelayMs) ||
(delay_ms > kVoiceEngineMaxMinPlayoutDelayMs))
{
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetInitialPlayoutDelay() invalid min delay");
return -1;
}
if (audio_coding_->SetInitialPlayoutDelay(delay_ms) != 0)
{
_engineStatisticsPtr->SetLastError(
VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
"SetInitialPlayoutDelay() failed to set min playout delay");
return -1;
}
return 0;
}
int
Channel::SetMinimumPlayoutDelay(int delayMs)
{

View File

@ -280,7 +280,6 @@ public:
bool GetDelayEstimate(int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) const;
int LeastRequiredDelayMs() const;
int SetInitialPlayoutDelay(int delay_ms);
int SetMinimumPlayoutDelay(int delayMs);
int GetPlayoutTimestamp(unsigned int& timestamp);
int SetInitTimestamp(unsigned int timestamp);

View File

@ -64,13 +64,6 @@ class WEBRTC_DLLEXPORT VoEVideoSync {
// computes based on inter-arrival times and its playout mode.
virtual int SetMinimumPlayoutDelay(int channel, int delay_ms) = 0;
// Sets an initial delay for the playout jitter buffer. The playout of the
// audio is delayed by |delay_ms| in milliseconds. Thereafter, the delay is
// maintained, unless NetEq's internal mechanism requires a higher latency.
// Such a latency is computed based on inter-arrival times and NetEq's
// playout mode.
virtual int SetInitialPlayoutDelay(int channel, int delay_ms) = 0;
// Gets the |jitter_buffer_delay_ms| (including the algorithmic delay), and
// the |playout_buffer_delay_ms| for a specified |channel|.
virtual int GetDelayEstimate(int channel,

View File

@ -116,25 +116,6 @@ int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel, int delayMs) {
return channelPtr->SetMinimumPlayoutDelay(delayMs);
}
int VoEVideoSyncImpl::SetInitialPlayoutDelay(int channel, int delay_ms) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetInitialPlayoutDelay(channel=%d, delay_ms=%d)", channel,
delay_ms);
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetInitialPlayoutDelay() failed to locate channel");
return -1;
}
return channelPtr->SetInitialPlayoutDelay(delay_ms);
}
int VoEVideoSyncImpl::GetDelayEstimate(int channel,
int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) {

View File

@ -23,8 +23,6 @@ class VoEVideoSyncImpl : public VoEVideoSync {
int SetMinimumPlayoutDelay(int channel, int delayMs) override;
int SetInitialPlayoutDelay(int channel, int delay_ms) override;
int GetDelayEstimate(int channel,
int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) override;