Remove various IDs:

- AudioFrame
- AudioCodingModule

BUG=webrtc:4690
TBR=kwiberg@webrtc.org

Review-Url: https://codereview.webrtc.org/3019543002
Cr-Commit-Position: refs/heads/master@{#20005}
This commit is contained in:
solenberg
2017-09-27 10:33:57 -07:00
committed by Commit Bot
parent 94286cb25c
commit 2d0f77585d
25 changed files with 52 additions and 90 deletions

View File

@ -110,7 +110,6 @@ AudioCodingModule::Config MakeAcmConfig(
Clock* clock,
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory) {
AudioCodingModule::Config config;
config.id = 0;
config.clock = clock;
config.decoder_factory = std::move(decoder_factory);
return config;

View File

@ -28,7 +28,7 @@ AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
int source_rate_hz,
int test_duration_ms)
: clock_(0),
acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
acm_(webrtc::AudioCodingModule::Create(&clock_)),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
input_block_size_samples_(

View File

@ -269,7 +269,6 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
rtc::CriticalSection acm_crit_sect_;
rtc::Buffer encode_buffer_ RTC_GUARDED_BY(acm_crit_sect_);
int id_; // TODO(henrik.lundin) Make const.
uint32_t expected_codec_ts_ RTC_GUARDED_BY(acm_crit_sect_);
uint32_t expected_in_ts_ RTC_GUARDED_BY(acm_crit_sect_);
acm2::ACMResampler resampler_ RTC_GUARDED_BY(acm_crit_sect_);
@ -456,8 +455,7 @@ void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
AudioCodingModuleImpl::AudioCodingModuleImpl(
const AudioCodingModule::Config& config)
: id_(config.id),
expected_codec_ts_(0xD87F3F9F),
: expected_codec_ts_(0xD87F3F9F),
expected_in_ts_(0xD87F3F9F),
receiver_(config),
bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
@ -1120,7 +1118,6 @@ int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
LOG(LS_ERROR) << "PlayoutData failed, RecOut Failed";
return -1;
}
audio_frame->id_ = id_;
return 0;
}
@ -1286,7 +1283,7 @@ ANAStats AudioCodingModuleImpl::GetANAStats() const {
} // namespace
AudioCodingModule::Config::Config()
: id(0), neteq_config(), clock(Clock::GetRealTimeClock()) {
: neteq_config(), clock(Clock::GetRealTimeClock()) {
// Post-decode VAD is disabled by default in NetEq, however, Audio
// Conference Mixer relies on VAD decisions and fails without them.
neteq_config.enable_post_decode_vad = true;
@ -1296,17 +1293,15 @@ AudioCodingModule::Config::Config(const Config&) = default;
AudioCodingModule::Config::~Config() = default;
// Create module
AudioCodingModule* AudioCodingModule::Create(int id) {
AudioCodingModule* AudioCodingModule::Create() {
Config config;
config.id = id;
config.clock = Clock::GetRealTimeClock();
config.decoder_factory = CreateBuiltinAudioDecoderFactory();
return Create(config);
}
AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) {
AudioCodingModule* AudioCodingModule::Create(Clock* clock) {
Config config;
config.id = id;
config.clock = clock;
config.decoder_factory = CreateBuiltinAudioDecoderFactory();
return Create(config);

View File

@ -157,8 +157,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
class AudioCodingModuleTestOldApi : public ::testing::Test {
protected:
AudioCodingModuleTestOldApi()
: id_(1),
rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
: rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
clock_(Clock::GetRealTimeClock()) {}
~AudioCodingModuleTestOldApi() {}
@ -166,7 +165,7 @@ class AudioCodingModuleTestOldApi : public ::testing::Test {
void TearDown() {}
void SetUp() {
acm_.reset(AudioCodingModule::Create(id_, clock_));
acm_.reset(AudioCodingModule::Create(clock_));
rtp_utility_->Populate(&rtp_header_);
@ -230,7 +229,6 @@ class AudioCodingModuleTestOldApi : public ::testing::Test {
VerifyEncoding();
}
const int id_;
std::unique_ptr<RtpUtility> rtp_utility_;
std::unique_ptr<AudioCodingModule> acm_;
PacketizationCallbackStubOldApi packet_cb_;
@ -314,7 +312,6 @@ TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
bool muted;
EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame, &muted));
ASSERT_FALSE(muted);
EXPECT_EQ(id_, audio_frame.id_);
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0u);
EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),

View File

@ -70,7 +70,6 @@ class AudioCodingModule {
Config(const Config&);
~Config();
int id;
NetEq::Config neteq_config;
Clock* clock;
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
@ -83,8 +82,8 @@ class AudioCodingModule {
// injected into ACM. ACM will take the ownership of the object clock and
// delete it when destroyed.
//
static AudioCodingModule* Create(int id);
static AudioCodingModule* Create(int id, Clock* clock);
static AudioCodingModule* Create();
static AudioCodingModule* Create(Clock* clock);
static AudioCodingModule* Create(const Config& config);
virtual ~AudioCodingModule() = default;

View File

@ -48,8 +48,8 @@ void APITest::Wait(uint32_t waitLengthMs) {
}
APITest::APITest()
: _acmA(AudioCodingModule::Create(1)),
_acmB(AudioCodingModule::Create(2)),
: _acmA(AudioCodingModule::Create()),
_acmB(AudioCodingModule::Create()),
_channel_A2B(NULL),
_channel_B2A(NULL),
_writeToFile(true),

View File

@ -281,7 +281,7 @@ void EncodeDecodeTest::Perform() {
codePars[1] = 0;
codePars[2] = 0;
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create());
struct CodecInst sendCodecTmp;
numCodecs = acm->NumberOfCodecs();
@ -337,7 +337,7 @@ std::string EncodeDecodeTest::EncodeToFile(int fileType,
int codeId,
int* codePars,
int testMode) {
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(1));
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create());
RTPFile rtpFile;
std::string fileName = webrtc::test::TempFilename(webrtc::test::OutputPath(),
"encode_decode_rtp");

View File

@ -127,7 +127,7 @@ void PacketLossTest::Perform() {
#ifndef WEBRTC_CODEC_OPUS
return;
#else
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create());
int codec_id = acm->Codec("opus", 48000, channels_);

View File

@ -104,8 +104,8 @@ void TestPack::reset_payload_size() {
}
TestAllCodecs::TestAllCodecs(int test_mode)
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
: acm_a_(AudioCodingModule::Create()),
acm_b_(AudioCodingModule::Create()),
channel_a_to_b_(NULL),
test_count_(0),
packet_size_samples_(0),

View File

@ -48,8 +48,8 @@ namespace {
}
TestRedFec::TestRedFec()
: _acmA(AudioCodingModule::Create(0)),
_acmB(AudioCodingModule::Create(1)),
: _acmA(AudioCodingModule::Create()),
_acmB(AudioCodingModule::Create()),
_channelA2B(NULL),
_testCntr(0) {
}

View File

@ -108,8 +108,8 @@ void TestPackStereo::set_lost_packet(bool lost) {
}
TestStereo::TestStereo(int test_mode)
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
: acm_a_(AudioCodingModule::Create()),
acm_b_(AudioCodingModule::Create()),
channel_a2b_(NULL),
test_cntr_(0),
pack_size_samp_(0),

View File

@ -62,8 +62,8 @@ void ActivityMonitor::GetStatistics(uint32_t* counter) {
}
TestVadDtx::TestVadDtx()
: acm_send_(AudioCodingModule::Create(0)),
acm_receive_(AudioCodingModule::Create(1)),
: acm_send_(AudioCodingModule::Create()),
acm_receive_(AudioCodingModule::Create()),
channel_(new Channel),
monitor_(new ActivityMonitor) {
EXPECT_EQ(0, acm_send_->RegisterTransportCallback(channel_.get()));

View File

@ -34,16 +34,14 @@ namespace webrtc {
#define MAX_FILE_NAME_LENGTH_BYTE 500
TwoWayCommunication::TwoWayCommunication(int testMode)
: _acmA(AudioCodingModule::Create(1)),
_acmRefA(AudioCodingModule::Create(3)),
: _acmA(AudioCodingModule::Create()),
_acmRefA(AudioCodingModule::Create()),
_testMode(testMode) {
AudioCodingModule::Config config;
// The clicks will be more obvious in FAX mode. TODO(henrik.lundin) Really?
config.neteq_config.playout_mode = kPlayoutFax;
config.id = 2;
config.decoder_factory = CreateBuiltinAudioDecoderFactory();
_acmB.reset(AudioCodingModule::Create(config));
config.id = 4;
_acmRefB.reset(AudioCodingModule::Create(config));
}
@ -62,7 +60,7 @@ TwoWayCommunication::~TwoWayCommunication() {
void TwoWayCommunication::ChooseCodec(uint8_t* codecID_A,
uint8_t* codecID_B) {
std::unique_ptr<AudioCodingModule> tmpACM(AudioCodingModule::Create(0));
std::unique_ptr<AudioCodingModule> tmpACM(AudioCodingModule::Create());
uint8_t noCodec = tmpACM->NumberOfCodecs();
CodecInst codecInst;
printf("List of Supported Codecs\n");

View File

@ -64,8 +64,8 @@ struct TestSettings {
class DelayTest {
public:
DelayTest()
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
: acm_a_(AudioCodingModule::Create()),
acm_b_(AudioCodingModule::Create()),
channel_a2b_(new Channel),
test_cntr_(0),
encoding_sample_rate_hz_(8000) {}

View File

@ -67,8 +67,8 @@ int16_t SetISAConfig(ACMTestISACConfig& isacConfig, AudioCodingModule* acm,
}
ISACTest::ISACTest(int testMode)
: _acmA(AudioCodingModule::Create(1)),
_acmB(AudioCodingModule::Create(2)),
: _acmA(AudioCodingModule::Create()),
_acmB(AudioCodingModule::Create()),
_testMode(testMode) {}
ISACTest::~ISACTest() {}

View File

@ -61,8 +61,8 @@ class InsertPacketWithTiming {
InsertPacketWithTiming()
: sender_clock_(new SimulatedClock(0)),
receiver_clock_(new SimulatedClock(0)),
send_acm_(AudioCodingModule::Create(0, sender_clock_)),
receive_acm_(AudioCodingModule::Create(0, receiver_clock_)),
send_acm_(AudioCodingModule::Create(sender_clock_)),
receive_acm_(AudioCodingModule::Create(receiver_clock_)),
channel_(new Channel),
seq_num_fid_(fopen(FLAG_seq_num, "rt")),
send_ts_fid_(fopen(FLAG_send_ts, "rt")),

View File

@ -27,7 +27,7 @@
namespace webrtc {
OpusTest::OpusTest()
: acm_receiver_(AudioCodingModule::Create(0)),
: acm_receiver_(AudioCodingModule::Create()),
channel_a2b_(NULL),
counter_(0),
payload_type_(255),

View File

@ -22,7 +22,7 @@ namespace webrtc {
class TargetDelayTest : public ::testing::Test {
protected:
TargetDelayTest() : acm_(AudioCodingModule::Create(0)) {}
TargetDelayTest() : acm_(AudioCodingModule::Create()) {}
~TargetDelayTest() {}

View File

@ -35,12 +35,10 @@ namespace webrtc {
namespace {
constexpr int kDefaultSampleRateHz = 48000;
constexpr int kId = 1;
// Utility function that resets the frame member variables with
// sensible defaults.
void ResetFrame(AudioFrame* frame) {
frame->id_ = kId;
frame->sample_rate_hz_ = kDefaultSampleRateHz;
frame->num_channels_ = 1;

View File

@ -193,7 +193,7 @@ void FrameCombiner::Combine(const std::vector<AudioFrame*>& mix_list,
// value '0', because it is only supported in the one channel case and
// is then updated in the helper functions.
audio_frame_for_mixing->UpdateFrame(
-1, 0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
AudioFrame::kVadUnknown, number_of_channels);
const bool use_limiter_this_round = use_apm_limiter_ && number_of_streams > 1;

View File

@ -53,8 +53,7 @@ AudioFrame audio_frame_for_mixing;
void SetUpFrames(int sample_rate_hz, int number_of_channels) {
for (auto* frame : {&frame1, &frame2}) {
frame->UpdateFrame(-1, 0, nullptr,
rtc::CheckedDivExact(sample_rate_hz, 100),
frame->UpdateFrame(0, nullptr, rtc::CheckedDivExact(sample_rate_hz, 100),
sample_rate_hz, AudioFrame::kNormalSpeech,
AudioFrame::kVadActive, number_of_channels);
}

View File

@ -330,7 +330,18 @@ class AudioFrame {
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
// TODO(solenberg): Remove once downstream users of AudioFrame have updated.
RTC_DEPRECATED
void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
size_t samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
size_t num_channels = 1) {
RTC_UNUSED(id);
UpdateFrame(timestamp, data, samples_per_channel, sample_rate_hz,
speech_type, vad_activity, num_channels);
}
void UpdateFrame(uint32_t timestamp, const int16_t* data,
size_t samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
size_t num_channels = 1);
@ -366,7 +377,6 @@ class AudioFrame {
RTC_DEPRECATED AudioFrame& operator>>=(const int rhs);
RTC_DEPRECATED AudioFrame& operator+=(const AudioFrame& rhs);
int id_;
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
@ -414,7 +424,6 @@ inline void AudioFrame::Reset() {
}
inline void AudioFrame::ResetWithoutMuting() {
id_ = -1;
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
@ -428,15 +437,13 @@ inline void AudioFrame::ResetWithoutMuting() {
profile_timestamp_ms_ = 0;
}
inline void AudioFrame::UpdateFrame(int id,
uint32_t timestamp,
inline void AudioFrame::UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
id_ = id;
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
@ -457,7 +464,6 @@ inline void AudioFrame::UpdateFrame(int id,
inline void AudioFrame::CopyFrom(const AudioFrame& src) {
if (this == &src) return;
id_ = src.id_;
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;

View File

@ -28,7 +28,6 @@ bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
return true;
}
constexpr int kId = 16;
constexpr uint32_t kTimestamp = 27;
constexpr int kSampleRateHz = 16000;
constexpr size_t kNumChannels = 1;
@ -64,10 +63,9 @@ TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
TEST(AudioFrameTest, UpdateFrame) {
AudioFrame frame;
int16_t samples[kNumChannels * kSamplesPerChannel] = {17};
frame.UpdateFrame(kId, kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
frame.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannels);
EXPECT_EQ(kId, frame.id_);
EXPECT_EQ(kTimestamp, frame.timestamp_);
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, frame.sample_rate_hz_);
@ -78,7 +76,7 @@ TEST(AudioFrameTest, UpdateFrame) {
EXPECT_FALSE(frame.muted());
EXPECT_EQ(0, memcmp(samples, frame.data(), sizeof(samples)));
frame.UpdateFrame(kId, kTimestamp, nullptr /* data*/, kSamplesPerChannel,
frame.UpdateFrame(kTimestamp, nullptr /* data*/, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannels);
EXPECT_TRUE(frame.muted());
@ -90,12 +88,11 @@ TEST(AudioFrameTest, CopyFrom) {
AudioFrame frame2;
int16_t samples[kNumChannels * kSamplesPerChannel] = {17};
frame2.UpdateFrame(kId, kTimestamp, samples, kSamplesPerChannel,
frame2.UpdateFrame(kTimestamp, samples, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannels);
frame1.CopyFrom(frame2);
EXPECT_EQ(frame2.id_, frame1.id_);
EXPECT_EQ(frame2.timestamp_, frame1.timestamp_);
EXPECT_EQ(frame2.samples_per_channel_, frame1.samples_per_channel_);
EXPECT_EQ(frame2.sample_rate_hz_, frame1.sample_rate_hz_);
@ -106,7 +103,7 @@ TEST(AudioFrameTest, CopyFrom) {
EXPECT_EQ(frame2.muted(), frame1.muted());
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
frame2.UpdateFrame(kId, kTimestamp, nullptr /* data */, kSamplesPerChannel,
frame2.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannels);
frame1.CopyFrom(frame2);

View File

@ -646,8 +646,6 @@ AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
AudioFrameOperations::Mute(audio_frame);
}
// Convert module ID to internal VoE channel ID
audio_frame->id_ = VoEChannelId(audio_frame->id_);
// Store speech type for dead-or-alive detection
_outputSpeechType = audio_frame->speech_type_;
@ -796,7 +794,6 @@ Channel::Channel(int32_t channelId,
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::Channel() - ctor");
AudioCodingModule::Config acm_config(config.acm_config);
acm_config.id = VoEModuleId(instanceId, channelId);
acm_config.neteq_config.enable_muted_state = true;
audio_coding_.reset(AudioCodingModule::Create(acm_config));
@ -1642,7 +1639,6 @@ void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
// TODO(henrika): try to avoid copying by moving ownership of audio frame
// either into pool of frames or into the task itself.
audio_frame->CopyFrom(audio_input);
audio_frame->id_ = ChannelId();
encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
}
@ -1659,7 +1655,6 @@ void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
CodecInst codec;
const int result = GetSendCodec(codec);
std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
audio_frame->id_ = ChannelId();
// TODO(ossu): Investigate how this could happen. b/62909493
if (result == 0) {
audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
@ -1680,7 +1675,6 @@ void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
RTC_DCHECK_RUN_ON(encoder_queue_);
RTC_DCHECK_GT(audio_input->samples_per_channel_, 0);
RTC_DCHECK_LE(audio_input->num_channels_, 2);
RTC_DCHECK_EQ(audio_input->id_, ChannelId());
bool is_muted = InputMute();
AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);

View File

@ -16,23 +16,13 @@
#ifndef VOICE_ENGINE_VOICE_ENGINE_DEFINES_H_
#define VOICE_ENGINE_VOICE_ENGINE_DEFINES_H_
#include "common_types.h" // NOLINT(build/include)
#include "modules/audio_processing/include/audio_processing.h"
#include "typedefs.h" // NOLINT(build/include)
namespace webrtc {
// VolumeControl
enum { kMinVolumeLevel = 0 };
enum { kMaxVolumeLevel = 255 };
// Min scale factor for per-channel volume scaling
const float kMinOutputVolumeScaling = 0.0f;
// Max scale factor for per-channel volume scaling
const float kMaxOutputVolumeScaling = 10.0f;
// Min scale factor for output volume panning
const float kMinOutputVolumePanning = 0.0f;
// Max scale factor for output volume panning
const float kMaxOutputVolumePanning = 1.0f;
// Audio processing
const NoiseSuppression::Level kDefaultNsMode = NoiseSuppression::kModerate;
@ -48,7 +38,6 @@ const bool kDefaultAgcState =
#else
true;
#endif
const GainControl::Mode kDefaultRxAgcMode = GainControl::kAdaptiveDigital;
// VideoSync
// Lowest minimum playout delay
@ -68,15 +57,6 @@ inline int VoEId(int veId, int chId) {
return (int)((veId << 16) + chId);
}
inline int VoEModuleId(int veId, int chId) {
return (int)((veId << 16) + chId);
}
// Convert module ID to internal VoE channel ID
inline int VoEChannelId(int moduleId) {
return (int)(moduleId & 0xffff);
}
} // namespace webrtc
#if defined(_WIN32)