Add a deinterleaved float interface to AudioProcessing.

This is mainly to support the native audio format in Chrome. Although
this implementation just moves the float->int conversion under the hood,
we will transition AudioProcessing towards supporting this format
throughout.

- Add a test which verifies we get identical output with the float and
int interfaces.
- The float and int wrappers are tasked with conversion to the
AudioBuffer format. A new shared Process/Analyze method does most of
the work.
- Add a new field to the debug.proto to hold deinterleaved data.
- Add helpers to audio_utils.cc, and start using numeric_limits.
- Note that there was no performance difference between numeric_limits
and a literal value when measured on Linux using gcc or clang.

BUG=2894
R=aluebs@webrtc.org, bjornv@webrtc.org, henrikg@webrtc.org, tommi@webrtc.org, turaj@webrtc.org, xians@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/9179004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5641 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2014-03-04 20:58:13 +00:00
parent b90991dade
commit 17e40641b3
12 changed files with 660 additions and 250 deletions

View File

@ -135,6 +135,16 @@ struct ExperimentalAgc {
//
class AudioProcessing {
public:
enum ChannelLayout {
kMono,
// Left, right.
kStereo,
// Mono, keyboard mic.
kMonoAndKeyboard,
// Left, right, keyboard mic.
kStereoAndKeyboard
};
// Creates an APM instance. Use one instance for every primary audio stream
// requiring processing. On the client-side, this would typically be one
// instance for the near-end stream, and additional instances for each far-end
@ -205,6 +215,17 @@ class AudioProcessing {
// method, it will trigger an initialization.
virtual int ProcessStream(AudioFrame* frame) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element
// of |data| points to a channel buffer, arranged according to
// |input_layout|. At output, the channels will be arranged according to
// |output_layout|.
// TODO(ajm): Output layout conversion does not yet work.
virtual int ProcessStream(float* const* data,
int samples_per_channel,
int sample_rate_hz,
ChannelLayout input_layout,
ChannelLayout output_layout) = 0;
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
// will not be modified. On the client-side, this is the far-end (or to be
// rendered) audio.
@ -222,6 +243,13 @@ class AudioProcessing {
// TODO(ajm): add const to input; requires an implementation fix.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element
// of |data| points to a channel buffer, arranged according to |layout|.
virtual int AnalyzeReverseStream(const float* const* data,
int samples_per_channel,
int sample_rate_hz,
ChannelLayout layout) = 0;
// This must be called if and only if echo processing is enabled.
//
// Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end