Revert "Revert part of r7561, "Refactor audio conversion functions.""

This restores the conversion changes to AudioProcessing originally
added in r7561, with minor alterations to ensure it passes all tests.

TBR=kwiberg

Review URL: https://webrtc-codereview.appspot.com/28899004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7574 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2014-10-31 04:58:14 +00:00
parent 14146e40aa
commit 8328e7c44d
6 changed files with 67 additions and 49 deletions

View File

@ -51,18 +51,11 @@ int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
return -1;
}
void StereoToMono(const float* left, const float* right, float* out,
template <typename T>
void StereoToMono(const T* left, const T* right, T* out,
int samples_per_channel) {
for (int i = 0; i < samples_per_channel; ++i) {
for (int i = 0; i < samples_per_channel; ++i)
out[i] = (left[i] + right[i]) / 2;
}
}
void StereoToMono(const int16_t* left, const int16_t* right, int16_t* out,
int samples_per_channel) {
for (int i = 0; i < samples_per_channel; ++i) {
out[i] = (left[i] + right[i]) >> 1;
}
}
} // namespace
@ -114,13 +107,7 @@ class IFChannelBuffer {
void RefreshI() {
if (!ivalid_) {
assert(fvalid_);
const float* const float_data = fbuf_.data();
int16_t* const int_data = ibuf_.data();
const int length = ibuf_.length();
for (int i = 0; i < length; ++i)
int_data[i] = WEBRTC_SPL_SAT(std::numeric_limits<int16_t>::max(),
float_data[i],
std::numeric_limits<int16_t>::min());
FloatS16ToS16(fbuf_.data(), ibuf_.length(), ibuf_.data());
ivalid_ = true;
}
}
@ -228,10 +215,10 @@ void AudioBuffer::CopyFrom(const float* const* data,
data_ptr = process_buffer_->channels();
}
// Convert to int16.
// Convert to the S16 range.
for (int i = 0; i < num_proc_channels_; ++i) {
FloatToS16(data_ptr[i], proc_samples_per_channel_,
channels_->ibuf()->channel(i));
FloatToFloatS16(data_ptr[i], proc_samples_per_channel_,
channels_->fbuf()->channel(i));
}
}
@ -241,16 +228,15 @@ void AudioBuffer::CopyTo(int samples_per_channel,
assert(samples_per_channel == output_samples_per_channel_);
assert(ChannelsFromLayout(layout) == num_proc_channels_);
// Convert to float.
// Convert to the float range.
float* const* data_ptr = data;
if (output_samples_per_channel_ != proc_samples_per_channel_) {
// Convert to an intermediate buffer for subsequent resampling.
data_ptr = process_buffer_->channels();
}
for (int i = 0; i < num_proc_channels_; ++i) {
S16ToFloat(channels_->ibuf()->channel(i),
proc_samples_per_channel_,
data_ptr[i]);
FloatS16ToFloat(channels_->fbuf()->channel(i), proc_samples_per_channel_,
data_ptr[i]);
}
// Resample.
@ -449,12 +435,7 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
// Downmix directly; no explicit deinterleaving needed.
int16_t* downmixed = channels_->ibuf()->channel(0);
for (int i = 0; i < input_samples_per_channel_; ++i) {
// HACK(ajm): The downmixing in the int16_t path is in practice never
// called from production code. We do this weird scaling to and from float
// to satisfy tests checking for bit-exactness with the float path.
float downmix_float = (S16ToFloat(frame->data_[i * 2]) +
S16ToFloat(frame->data_[i * 2 + 1])) / 2;
downmixed[i] = FloatToS16(downmix_float);
downmixed[i] = (frame->data_[i * 2] + frame->data_[i * 2 + 1]) / 2;
}
} else {
assert(num_proc_channels_ == num_input_channels_);