
BUG=4573,2982,2175,3590 TEST=modules_unittests --gtest_filter=AudioDevice*, AppRTCDemo and WebRTCDemo Summary: - Removes dependency of the 'enable_android_opensl' compiler flag. Instead, OpenSL ES is always supported, and will enabled for devices that supports low-latency output. - WebRTC no longer supports OpenSL ES for the input/recording side. - Removes old code and demos using OpenSL ES for audio input. - Improves accuracy of total delay estimates (better AEC performance). - Reduces roundtrip audio latency; especially when OpenSL can be used. Performance verified on: Nexus 5, 6, 7 and 9. Samsung Galaxy S4 and S6. Android One device. R=magjed@webrtc.org, phoglund@webrtc.org, tommi@webrtc.org Review URL: https://webrtc-codereview.appspot.com/51759004 Cr-Commit-Position: refs/heads/master@{#9208}
70 lines
2.6 KiB
C++
70 lines
2.6 KiB
C++
/*
|
|
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|
|
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|
|
|
|
#include "webrtc/base/scoped_ptr.h"
|
|
#include "webrtc/typedefs.h"
|
|
|
|
namespace webrtc {
|
|
|
|
class AudioDeviceBuffer;
|
|
|
|
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
|
// corresponding to 10ms of data. It then allows for this data to be pulled in
|
|
// a finer or coarser granularity. I.e. interacting with this class instead of
|
|
// directly with the AudioDeviceBuffer one can ask for any number of audio data
|
|
// samples.
|
|
class FineAudioBuffer {
|
|
public:
|
|
// |device_buffer| is a buffer that provides 10ms of audio data.
|
|
// |desired_frame_size_bytes| is the number of bytes of audio data
|
|
// (not samples) |GetBufferData| should return on success.
|
|
// |sample_rate| is the sample rate of the audio data. This is needed because
|
|
// |device_buffer| delivers 10ms of data. Given the sample rate the number
|
|
// of samples can be calculated.
|
|
FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
|
int desired_frame_size_bytes,
|
|
int sample_rate);
|
|
~FineAudioBuffer();
|
|
|
|
// Returns the required size of |buffer| when calling GetBufferData. If the
|
|
// buffer is smaller memory trampling will happen.
|
|
// |desired_frame_size_bytes| and |samples_rate| are as described in the
|
|
// constructor.
|
|
int RequiredBufferSizeBytes();
|
|
|
|
// |buffer| must be of equal or greater size than what is returned by
|
|
// RequiredBufferSize. This is to avoid unnecessary memcpy.
|
|
void GetBufferData(int8_t* buffer);
|
|
|
|
private:
|
|
// Device buffer that provides 10ms chunks of data.
|
|
AudioDeviceBuffer* device_buffer_;
|
|
// Number of bytes delivered per GetBufferData
|
|
int desired_frame_size_bytes_;
|
|
int sample_rate_;
|
|
int samples_per_10_ms_;
|
|
// Convenience parameter to avoid converting from samples
|
|
int bytes_per_10_ms_;
|
|
|
|
// Storage for samples that are not yet asked for.
|
|
rtc::scoped_ptr<int8_t[]> cache_buffer_;
|
|
// Location of first unread sample.
|
|
int cached_buffer_start_;
|
|
// Number of bytes stored in cache.
|
|
int cached_bytes_;
|
|
};
|
|
|
|
} // namespace webrtc
|
|
|
|
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|