Revert "Simplification and refactoring of the AudioBuffer code"
This reverts commit 81c0cf287c8514cb1cd6f3baca484d668c6eb128. Reason for revert: internal test failures Original change's description: > Simplification and refactoring of the AudioBuffer code > > This CL performs a major refactoring and simplification > of the AudioBuffer code that. > -Removes 7 of the 9 internal buffers of the AudioBuffer. > -Avoids the implicit copying required to keep the > internal buffers in sync. > -Removes all code relating to handling of fixed-point > sample data in the AudioBuffer. > -Changes the naming of the class methods to reflect > that only floating point is handled. > -Corrects some bugs in the code. > -Extends the handling of internal downmixing to be > more generic. > > Bug: webrtc:10882 > Change-Id: I12c8af156fbe366b154744a0a1b3d926bf7be572 > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/149828 > Commit-Queue: Per Åhgren <peah@webrtc.org> > Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org> > Cr-Commit-Position: refs/heads/master@{#28928} TBR=gustaf@webrtc.org,peah@webrtc.org Change-Id: I2729e3ad24b3a9b40b368b84cb565c859e79b51e No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: webrtc:10882 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/150084 Reviewed-by: Steve Anton <steveanton@webrtc.org> Commit-Queue: Steve Anton <steveanton@webrtc.org> Cr-Commit-Position: refs/heads/master@{#28931}
This commit is contained in:
@ -23,142 +23,114 @@
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class IFChannelBuffer;
|
||||
class PushSincResampler;
|
||||
class SplittingFilter;
|
||||
|
||||
enum Band { kBand0To8kHz = 0, kBand8To16kHz = 1, kBand16To24kHz = 2 };
|
||||
|
||||
// Stores any audio data in a way that allows the audio processing module to
|
||||
// operate on it in a controlled manner.
|
||||
class AudioBuffer {
|
||||
public:
|
||||
AudioBuffer(size_t input_rate,
|
||||
size_t input_num_channels,
|
||||
size_t buffer_rate,
|
||||
size_t buffer_num_channels,
|
||||
size_t output_rate);
|
||||
// TODO(ajm): Switch to take ChannelLayouts.
|
||||
AudioBuffer(size_t input_num_frames,
|
||||
size_t num_input_channels,
|
||||
size_t process_num_frames,
|
||||
size_t num_process_channels,
|
||||
size_t output_num_frames);
|
||||
virtual ~AudioBuffer();
|
||||
|
||||
AudioBuffer(const AudioBuffer&) = delete;
|
||||
AudioBuffer& operator=(const AudioBuffer&) = delete;
|
||||
|
||||
// Specify that downmixing should be done by selecting a single channel.
|
||||
void set_downmixing_to_specific_channel(size_t channel);
|
||||
|
||||
// Specify that downmixing should be done by averaging all channels,.
|
||||
void set_downmixing_by_averaging();
|
||||
|
||||
// Set the number of channels in the buffer. The specified number of channels
|
||||
// cannot be larger than the specified buffer_num_channels. The number is also
|
||||
// reset at each call to CopyFrom or InterleaveFrom.
|
||||
size_t num_channels() const;
|
||||
size_t num_proc_channels() const { return num_proc_channels_; }
|
||||
void set_num_channels(size_t num_channels);
|
||||
size_t num_frames() const;
|
||||
size_t num_frames_per_band() const;
|
||||
size_t num_bands() const;
|
||||
|
||||
size_t num_channels() const { return num_channels_; }
|
||||
size_t num_frames() const { return buffer_num_frames_; }
|
||||
size_t num_frames_per_band() const { return num_split_frames_; }
|
||||
size_t num_bands() const { return num_bands_; }
|
||||
|
||||
// Returns pointer arrays to the full-band channels.
|
||||
// Returns a pointer array to the full-band channels.
|
||||
// Usage:
|
||||
// channels()[channel][sample].
|
||||
// Where:
|
||||
// 0 <= channel < |buffer_num_channels_|
|
||||
// 0 <= sample < |buffer_num_frames_|
|
||||
float* const* channels() { return data_->channels(); }
|
||||
const float* const* channels_const() const { return data_->channels(); }
|
||||
// 0 <= channel < |num_proc_channels_|
|
||||
// 0 <= sample < |proc_num_frames_|
|
||||
float* const* channels_f();
|
||||
const float* const* channels_const_f() const;
|
||||
|
||||
// Returns pointer arrays to the bands for a specific channel.
|
||||
// Returns a pointer array to the bands for a specific channel.
|
||||
// Usage:
|
||||
// split_bands(channel)[band][sample].
|
||||
// Where:
|
||||
// 0 <= channel < |buffer_num_channels_|
|
||||
// 0 <= channel < |num_proc_channels_|
|
||||
// 0 <= band < |num_bands_|
|
||||
// 0 <= sample < |num_split_frames_|
|
||||
const float* const* split_bands_const(size_t channel) const {
|
||||
return split_data_.get() ? split_data_->bands(channel)
|
||||
: data_->bands(channel);
|
||||
}
|
||||
float* const* split_bands(size_t channel) {
|
||||
return split_data_.get() ? split_data_->bands(channel)
|
||||
: data_->bands(channel);
|
||||
}
|
||||
float* const* split_bands_f(size_t channel);
|
||||
const float* const* split_bands_const_f(size_t channel) const;
|
||||
|
||||
// Returns a pointer array to the channels for a specific band.
|
||||
// Usage:
|
||||
// split_channels(band)[channel][sample].
|
||||
// Where:
|
||||
// 0 <= band < |num_bands_|
|
||||
// 0 <= channel < |buffer_num_channels_|
|
||||
// 0 <= channel < |num_proc_channels_|
|
||||
// 0 <= sample < |num_split_frames_|
|
||||
const float* const* split_channels_const(Band band) const {
|
||||
if (split_data_.get()) {
|
||||
return split_data_->channels(band);
|
||||
} else {
|
||||
return band == kBand0To8kHz ? data_->channels() : nullptr;
|
||||
}
|
||||
}
|
||||
const float* const* split_channels_const_f(Band band) const;
|
||||
|
||||
// Copies data into the buffer.
|
||||
void CopyFrom(const AudioFrame* frame);
|
||||
// Use for int16 interleaved data.
|
||||
void DeinterleaveFrom(const AudioFrame* audioFrame);
|
||||
// If |data_changed| is false, only the non-audio data members will be copied
|
||||
// to |frame|.
|
||||
void InterleaveTo(AudioFrame* frame) const;
|
||||
|
||||
// Use for float deinterleaved data.
|
||||
void CopyFrom(const float* const* data, const StreamConfig& stream_config);
|
||||
|
||||
// Copies data from the buffer.
|
||||
void CopyTo(AudioFrame* frame) const;
|
||||
void CopyTo(const StreamConfig& stream_config, float* const* data);
|
||||
|
||||
// Splits the buffer data into frequency bands.
|
||||
// Splits the signal into different bands.
|
||||
void SplitIntoFrequencyBands();
|
||||
|
||||
// Recombines the frequency bands into a full-band signal.
|
||||
// Recombine the different bands into one signal.
|
||||
void MergeFrequencyBands();
|
||||
|
||||
// Copies the split bands data into the integer two-dimensional array.
|
||||
void ExportSplitChannelData(size_t channel, int16_t* const* split_band_data);
|
||||
void CopySplitChannelDataTo(size_t channel, int16_t* const* split_band_data);
|
||||
|
||||
// Copies the data in the integer two-dimensional array into the split_bands
|
||||
// data.
|
||||
void ImportSplitChannelData(size_t channel,
|
||||
const int16_t* const* split_band_data);
|
||||
void CopySplitChannelDataFrom(size_t channel,
|
||||
const int16_t* const* split_band_data);
|
||||
|
||||
static const size_t kMaxSplitFrameLength = 160;
|
||||
static const size_t kMaxNumBands = 3;
|
||||
|
||||
// Deprecated methods, will be removed soon.
|
||||
float* const* channels_f() { return channels(); }
|
||||
const float* const* channels_const_f() const { return channels_const(); }
|
||||
const float* const* split_bands_const_f(size_t channel) const {
|
||||
return split_bands_const(channel);
|
||||
}
|
||||
float* const* split_bands_f(size_t channel) { return split_bands(channel); }
|
||||
const float* const* split_channels_const_f(Band band) const {
|
||||
return split_channels_const(band);
|
||||
}
|
||||
void DeinterleaveFrom(const AudioFrame* frame) { CopyFrom(frame); }
|
||||
void InterleaveTo(AudioFrame* frame) const { CopyTo(frame); }
|
||||
|
||||
private:
|
||||
FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,
|
||||
SetNumChannelsSetsChannelBuffersNumChannels);
|
||||
void RestoreNumChannels();
|
||||
// Called from DeinterleaveFrom() and CopyFrom().
|
||||
void InitForNewData();
|
||||
|
||||
// The audio is passed into DeinterleaveFrom() or CopyFrom() with input
|
||||
// format (samples per channel and number of channels).
|
||||
const size_t input_num_frames_;
|
||||
const size_t input_num_channels_;
|
||||
const size_t buffer_num_frames_;
|
||||
const size_t buffer_num_channels_;
|
||||
const size_t num_input_channels_;
|
||||
// The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
|
||||
// format.
|
||||
const size_t proc_num_frames_;
|
||||
const size_t num_proc_channels_;
|
||||
// The audio is returned by InterleaveTo() and CopyTo() with output samples
|
||||
// per channels and the current number of channels. This last one can be
|
||||
// changed at any time using set_num_channels().
|
||||
const size_t output_num_frames_;
|
||||
|
||||
size_t num_channels_;
|
||||
|
||||
size_t num_bands_;
|
||||
size_t num_split_frames_;
|
||||
|
||||
std::unique_ptr<ChannelBuffer<float>> data_;
|
||||
std::unique_ptr<ChannelBuffer<float>> split_data_;
|
||||
std::unique_ptr<IFChannelBuffer> data_;
|
||||
std::unique_ptr<IFChannelBuffer> split_data_;
|
||||
std::unique_ptr<SplittingFilter> splitting_filter_;
|
||||
std::unique_ptr<ChannelBuffer<float>> output_buffer_;
|
||||
std::unique_ptr<IFChannelBuffer> input_buffer_;
|
||||
std::unique_ptr<IFChannelBuffer> output_buffer_;
|
||||
std::unique_ptr<ChannelBuffer<float>> process_buffer_;
|
||||
std::vector<std::unique_ptr<PushSincResampler>> input_resamplers_;
|
||||
std::vector<std::unique_ptr<PushSincResampler>> output_resamplers_;
|
||||
bool downmix_by_averaging_ = true;
|
||||
size_t channel_for_downmixing_ = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
Reference in New Issue
Block a user