Remove unused stuff from AudioFrame:

- The interleaved_ field. Never set to anything but 'true'. AudioFrame data appears to always be treated as interleaved.
- The Append() method.
- operator-=().

BUG=

Review URL: https://codereview.webrtc.org/1830713003

Cr-Commit-Position: refs/heads/master@{#12152}
This commit is contained in:
solenberg
2016-03-29 16:42:06 -07:00
committed by Commit bot
parent 059dadf0c4
commit 0d343fa39d
3 changed files with 0 additions and 57 deletions

View File

@ -79,7 +79,6 @@ void SyncBuffer::GetNextAudioInterleaved(size_t requested_len,
ReadInterleavedFromIndex(next_index_, samples_to_read, output->data_);
const size_t samples_read_per_channel = tot_samples_read / Channels();
next_index_ += samples_read_per_channel;
output->interleaved_ = true;
output->num_channels_ = Channels();
output->samples_per_channel_ = samples_read_per_channel;
}

View File

@ -505,21 +505,17 @@ class AudioFrame {
// contents of |data_|).
void Reset();
// |interleaved_| is not changed by this method.
void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
size_t samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
size_t num_channels = 1);
AudioFrame& Append(const AudioFrame& rhs);
void CopyFrom(const AudioFrame& src);
void Mute();
AudioFrame& operator>>=(const int rhs);
AudioFrame& operator+=(const AudioFrame& rhs);
AudioFrame& operator-=(const AudioFrame& rhs);
int id_;
// RTP timestamp of the first sample in the AudioFrame.
@ -536,7 +532,6 @@ class AudioFrame {
size_t num_channels_;
SpeechType speech_type_;
VADActivity vad_activity_;
bool interleaved_;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
@ -561,7 +556,6 @@ inline void AudioFrame::Reset() {
num_channels_ = 0;
speech_type_ = kUndefined;
vad_activity_ = kVadUnknown;
interleaved_ = true;
}
inline void AudioFrame::UpdateFrame(int id,
@ -601,7 +595,6 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) {
speech_type_ = src.speech_type_;
vad_activity_ = src.vad_activity_;
num_channels_ = src.num_channels_;
interleaved_ = src.interleaved_;
const size_t length = samples_per_channel_ * num_channels_;
assert(length <= kMaxDataSizeSamples);
@ -622,30 +615,6 @@ inline AudioFrame& AudioFrame::operator>>=(const int rhs) {
return *this;
}
inline AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if (num_channels_ != rhs.num_channels_) return *this;
if ((vad_activity_ == kVadActive) || rhs.vad_activity_ == kVadActive) {
vad_activity_ = kVadActive;
} else if (vad_activity_ == kVadUnknown || rhs.vad_activity_ == kVadUnknown) {
vad_activity_ = kVadUnknown;
}
if (speech_type_ != rhs.speech_type_) {
speech_type_ = kUndefined;
}
size_t offset = samples_per_channel_ * num_channels_;
for (size_t i = 0; i < rhs.samples_per_channel_ * rhs.num_channels_; i++) {
data_[offset + i] = rhs.data_[i];
}
samples_per_channel_ += rhs.samples_per_channel_;
return *this;
}
namespace {
inline int16_t ClampToInt16(int32_t input) {
if (input < -0x00008000) {
@ -661,7 +630,6 @@ inline int16_t ClampToInt16(int32_t input) {
inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if (num_channels_ != rhs.num_channels_) return *this;
@ -698,29 +666,6 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
return *this;
}
inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if ((samples_per_channel_ != rhs.samples_per_channel_) ||
(num_channels_ != rhs.num_channels_)) {
return *this;
}
if ((vad_activity_ != kVadPassive) || rhs.vad_activity_ != kVadPassive) {
vad_activity_ = kVadUnknown;
}
speech_type_ = kUndefined;
for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) - static_cast<int32_t>(rhs.data_[i]);
data_[i] = ClampToInt16(wrap_guard);
}
return *this;
}
inline bool IsNewerSequenceNumber(uint16_t sequence_number,
uint16_t prev_sequence_number) {
// Distinguish between elements that are exactly 0x8000 apart.

View File

@ -80,7 +80,6 @@ void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
bool current_frame_muted) {
RTC_DCHECK(frame);
RTC_DCHECK(frame->interleaved_);
if (!previous_frame_muted && !current_frame_muted) {
// Not muted, don't touch.
} else if (previous_frame_muted && current_frame_muted) {