Cleanup of iOS AudioDevice implementation

TBR=tkchin
BUG=webrtc:4789
TEST=modules_unittests --gtest_filter=AudioDeviceTest* and AppRTCDemo

Review URL: https://codereview.webrtc.org/1206783002 .

Cr-Commit-Position: refs/heads/master@{#9578}
This commit is contained in:
henrika
2015-07-14 17:04:08 +02:00
parent d6f1a38165
commit ba35d05a49
18 changed files with 2790 additions and 2388 deletions

View File

@ -37,6 +37,7 @@ static const int kMaxLogLineSize = 1024 - 60;
#include <vector>
#include "webrtc/base/logging.h"
#include "webrtc/base/platform_thread.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/base/stringutils.h"
@ -111,10 +112,8 @@ LogMessage::LogMessage(const char* file, int line, LoggingSeverity sev,
}
if (thread_) {
#if defined(WEBRTC_WIN)
DWORD id = GetCurrentThreadId();
print_stream_ << "[" << std::hex << id << std::dec << "] ";
#endif // WEBRTC_WIN
PlatformThreadId id = CurrentThreadId();
print_stream_ << "[" << std::dec << id << "] ";
}
if (err_ctx != ERRCTX_NONE) {

View File

@ -16,6 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/audio_device_config.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
@ -23,60 +24,6 @@
namespace webrtc {
class AudioParameters {
public:
enum { kBitsPerSample = 16 };
AudioParameters()
: sample_rate_(0),
channels_(0),
frames_per_buffer_(0),
frames_per_10ms_buffer_(0),
bits_per_sample_(kBitsPerSample) {}
AudioParameters(int sample_rate, int channels, int frames_per_buffer)
: sample_rate_(sample_rate),
channels_(channels),
frames_per_buffer_(frames_per_buffer),
frames_per_10ms_buffer_(sample_rate / 100),
bits_per_sample_(kBitsPerSample) {}
void reset(int sample_rate, int channels, int frames_per_buffer) {
sample_rate_ = sample_rate;
channels_ = channels;
frames_per_buffer_ = frames_per_buffer;
frames_per_10ms_buffer_ = (sample_rate / 100);
}
int sample_rate() const { return sample_rate_; }
int channels() const { return channels_; }
int frames_per_buffer() const { return frames_per_buffer_; }
int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
int bits_per_sample() const { return bits_per_sample_; }
bool is_valid() const {
return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
}
int GetBytesPerFrame() const { return channels_ * bits_per_sample_ / 8; }
int GetBytesPerBuffer() const {
return frames_per_buffer_ * GetBytesPerFrame();
}
int GetBytesPer10msBuffer() const {
return frames_per_10ms_buffer_ * GetBytesPerFrame();
}
float GetBufferSizeInMilliseconds() const {
if (sample_rate_ == 0)
return 0.0f;
return frames_per_buffer_ / (sample_rate_ / 1000.0f);
}
private:
int sample_rate_;
int channels_;
// Lowest possible size of native audio buffer. Measured in number of frames.
// This size is injected into the OpenSL ES output (since it does not "talk
// Java") implementation but is currently not utilized by the Java
// implementation since it aquires the same value internally.
int frames_per_buffer_;
int frames_per_10ms_buffer_;
int bits_per_sample_;
};
// Implements support for functions in the WebRTC audio stack for Android that
// relies on the AudioManager in android.media. It also populates an
// AudioParameter structure with native audio parameters detected at

View File

@ -103,6 +103,7 @@
'audio_device_impl.h',
'ios/audio_device_ios.h',
'ios/audio_device_ios.mm',
'ios/audio_device_not_implemented_ios.mm',
'linux/alsasymboltable_linux.cc',
'linux/alsasymboltable_linux.h',
'linux/audio_device_alsa_linux.cc',
@ -177,6 +178,7 @@
'-framework AudioToolbox',
'-framework AVFoundation',
'-framework Foundation',
'-framework UIKit',
],
},
},

View File

@ -130,8 +130,6 @@ int32_t AudioDeviceBuffer::InitRecording()
int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz)
{
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingSampleRate(fsHz=%u)", fsHz);
CriticalSectionScoped lock(&_critSect);
_recSampleRate = fsHz;
return 0;
@ -143,8 +141,6 @@ int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz)
int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz)
{
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutSampleRate(fsHz=%u)", fsHz);
CriticalSectionScoped lock(&_critSect);
_playSampleRate = fsHz;
return 0;
@ -174,8 +170,6 @@ int32_t AudioDeviceBuffer::PlayoutSampleRate() const
int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
{
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingChannels(channels=%u)", channels);
CriticalSectionScoped lock(&_critSect);
_recChannels = channels;
_recBytesPerSample = 2*channels; // 16 bits per sample in mono, 32 bits in stereo
@ -188,8 +182,6 @@ int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
int32_t AudioDeviceBuffer::SetPlayoutChannels(uint8_t channels)
{
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutChannels(channels=%u)", channels);
CriticalSectionScoped lock(&_critSect);
_playChannels = channels;
// 16 bits per sample in mono, 32 bits in stereo

View File

@ -9,73 +9,68 @@
*/
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/base/logging.h"
namespace webrtc {
int32_t AudioDeviceGeneric::SetRecordingSampleRate(
const uint32_t samplesPerSec)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Set recording sample rate not supported on this platform");
return -1;
const uint32_t samplesPerSec) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int32_t AudioDeviceGeneric::SetPlayoutSampleRate(
const uint32_t samplesPerSec)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Set playout sample rate not supported on this platform");
return -1;
int32_t AudioDeviceGeneric::SetPlayoutSampleRate(const uint32_t samplesPerSec) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Set loudspeaker status not supported on this platform");
return -1;
int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Get loudspeaker status not supported on this platform");
return -1;
int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int32_t AudioDeviceGeneric::ResetAudioDevice()
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Reset audio device not supported on this platform");
return -1;
int32_t AudioDeviceGeneric::ResetAudioDevice() {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int32_t AudioDeviceGeneric::SoundDeviceControl(unsigned int par1,
unsigned int par2, unsigned int par3, unsigned int par4)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Sound device control not supported on this platform");
return -1;
unsigned int par2,
unsigned int par3,
unsigned int par4) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
bool AudioDeviceGeneric::BuiltInAECIsAvailable() const {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Built-in AEC not supported on this platform");
LOG_F(LS_ERROR) << "Not supported on this platform";
return false;
}
int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Built-in AEC not supported on this platform");
return -1;
int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
bool AudioDeviceGeneric::BuiltInAECIsEnabled() const
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Windows AEC not supported on this platform");
return false;
bool AudioDeviceGeneric::BuiltInAECIsEnabled() const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return false;
}
int AudioDeviceGeneric::GetPlayoutAudioParameters(
AudioParameters* params) const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int AudioDeviceGeneric::GetRecordAudioParameters(
AudioParameters* params) const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
} // namespace webrtc

View File

@ -16,168 +16,160 @@
namespace webrtc {
class AudioDeviceGeneric
{
class AudioDeviceGeneric {
public:
// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const = 0;
// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const = 0;
// Main initializaton and termination
virtual int32_t Init() = 0;
virtual int32_t Terminate() = 0;
virtual bool Initialized() const = 0;
// Main initializaton and termination
virtual int32_t Init() = 0;
virtual int32_t Terminate() = 0;
virtual bool Initialized() const = 0;
// Device enumeration
virtual int16_t PlayoutDevices() = 0;
virtual int16_t RecordingDevices() = 0;
virtual int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) = 0;
virtual int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) = 0;
// Device enumeration
virtual int16_t PlayoutDevices() = 0;
virtual int16_t RecordingDevices() = 0;
virtual int32_t PlayoutDeviceName(
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) = 0;
virtual int32_t RecordingDeviceName(
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) = 0;
// Device selection
virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
virtual int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) = 0;
virtual int32_t SetRecordingDevice(uint16_t index) = 0;
virtual int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) = 0;
// Device selection
virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
virtual int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) = 0;
virtual int32_t SetRecordingDevice(uint16_t index) = 0;
virtual int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) = 0;
// Audio transport initialization
virtual int32_t PlayoutIsAvailable(bool& available) = 0;
virtual int32_t InitPlayout() = 0;
virtual bool PlayoutIsInitialized() const = 0;
virtual int32_t RecordingIsAvailable(bool& available) = 0;
virtual int32_t InitRecording() = 0;
virtual bool RecordingIsInitialized() const = 0;
// Audio transport initialization
virtual int32_t PlayoutIsAvailable(bool& available) = 0;
virtual int32_t InitPlayout() = 0;
virtual bool PlayoutIsInitialized() const = 0;
virtual int32_t RecordingIsAvailable(bool& available) = 0;
virtual int32_t InitRecording() = 0;
virtual bool RecordingIsInitialized() const = 0;
// Audio transport control
virtual int32_t StartPlayout() = 0;
virtual int32_t StopPlayout() = 0;
virtual bool Playing() const = 0;
virtual int32_t StartRecording() = 0;
virtual int32_t StopRecording() = 0;
virtual bool Recording() const = 0;
// Audio transport control
virtual int32_t StartPlayout() = 0;
virtual int32_t StopPlayout() = 0;
virtual bool Playing() const = 0;
virtual int32_t StartRecording() = 0;
virtual int32_t StopRecording() = 0;
virtual bool Recording() const = 0;
// Microphone Automatic Gain Control (AGC)
virtual int32_t SetAGC(bool enable) = 0;
virtual bool AGC() const = 0;
// Microphone Automatic Gain Control (AGC)
virtual int32_t SetAGC(bool enable) = 0;
virtual bool AGC() const = 0;
// Volume control based on the Windows Wave API (Windows only)
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
uint16_t volumeRight) = 0;
virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const = 0;
// Volume control based on the Windows Wave API (Windows only)
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
uint16_t volumeRight) = 0;
virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const = 0;
// Audio mixer initialization
virtual int32_t InitSpeaker() = 0;
virtual bool SpeakerIsInitialized() const = 0;
virtual int32_t InitMicrophone() = 0;
virtual bool MicrophoneIsInitialized() const = 0;
// Audio mixer initialization
virtual int32_t InitSpeaker() = 0;
virtual bool SpeakerIsInitialized() const = 0;
virtual int32_t InitMicrophone() = 0;
virtual bool MicrophoneIsInitialized() const = 0;
// Speaker volume controls
virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const = 0;
// Speaker volume controls
virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
virtual int32_t SpeakerVolumeStepSize(
uint16_t& stepSize) const = 0;
// Microphone volume controls
virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0;
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0;
virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const = 0;
// Microphone volume controls
virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
virtual int32_t MaxMicrophoneVolume(
uint32_t& maxVolume) const = 0;
virtual int32_t MinMicrophoneVolume(
uint32_t& minVolume) const = 0;
virtual int32_t MicrophoneVolumeStepSize(
uint16_t& stepSize) const = 0;
// Speaker mute control
virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
virtual int32_t SetSpeakerMute(bool enable) = 0;
virtual int32_t SpeakerMute(bool& enabled) const = 0;
// Speaker mute control
virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
virtual int32_t SetSpeakerMute(bool enable) = 0;
virtual int32_t SpeakerMute(bool& enabled) const = 0;
// Microphone mute control
virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneMute(bool enable) = 0;
virtual int32_t MicrophoneMute(bool& enabled) const = 0;
// Microphone mute control
virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneMute(bool enable) = 0;
virtual int32_t MicrophoneMute(bool& enabled) const = 0;
// Microphone boost control
virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneBoost(bool enable) = 0;
virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
// Microphone boost control
virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
virtual int32_t SetMicrophoneBoost(bool enable) = 0;
virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
// Stereo support
virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
virtual int32_t SetStereoPlayout(bool enable) = 0;
virtual int32_t StereoPlayout(bool& enabled) const = 0;
virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
virtual int32_t SetStereoRecording(bool enable) = 0;
virtual int32_t StereoRecording(bool& enabled) const = 0;
// Stereo support
virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
virtual int32_t SetStereoPlayout(bool enable) = 0;
virtual int32_t StereoPlayout(bool& enabled) const = 0;
virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
virtual int32_t SetStereoRecording(bool enable) = 0;
virtual int32_t StereoRecording(bool& enabled) const = 0;
// Delay information and control
virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
uint16_t sizeMS = 0) = 0;
virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const = 0;
virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
// Delay information and control
virtual int32_t SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
uint16_t sizeMS = 0) = 0;
virtual int32_t PlayoutBuffer(
AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const = 0;
virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
// CPU load
virtual int32_t CPULoad(uint16_t& load) const = 0;
// CPU load
virtual int32_t CPULoad(uint16_t& load) const = 0;
// Native sample rate controls (samples/sec)
virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
// Native sample rate controls (samples/sec)
virtual int32_t SetRecordingSampleRate(
const uint32_t samplesPerSec);
virtual int32_t SetPlayoutSampleRate(
const uint32_t samplesPerSec);
// Speaker audio routing (for mobile devices)
virtual int32_t SetLoudspeakerStatus(bool enable);
virtual int32_t GetLoudspeakerStatus(bool& enable) const;
// Speaker audio routing (for mobile devices)
virtual int32_t SetLoudspeakerStatus(bool enable);
virtual int32_t GetLoudspeakerStatus(bool& enable) const;
// Reset Audio Device (for mobile devices)
virtual int32_t ResetAudioDevice();
// Reset Audio Device (for mobile devices)
virtual int32_t ResetAudioDevice();
// Sound Audio Device control (for WinCE only)
virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
unsigned int par2 = 0,
unsigned int par3 = 0,
unsigned int par4 = 0);
// Sound Audio Device control (for WinCE only)
virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
unsigned int par2 = 0,
unsigned int par3 = 0,
unsigned int par4 = 0);
// Android only
virtual bool BuiltInAECIsAvailable() const;
// Android only
virtual bool BuiltInAECIsAvailable() const;
// Windows Core Audio and Android only.
virtual int32_t EnableBuiltInAEC(bool enable);
// Windows Core Audio and Android only.
virtual int32_t EnableBuiltInAEC(bool enable);
// Windows Core Audio only.
virtual bool BuiltInAECIsEnabled() const;
// Windows Core Audio only.
virtual bool BuiltInAECIsEnabled() const;
// iOS only.
// TODO(henrika): add Android support.
virtual int GetPlayoutAudioParameters(AudioParameters* params) const;
virtual int GetRecordAudioParameters(AudioParameters* params) const;
public:
virtual bool PlayoutWarning() const = 0;
virtual bool PlayoutError() const = 0;
virtual bool RecordingWarning() const = 0;
virtual bool RecordingError() const = 0;
virtual void ClearPlayoutWarning() = 0;
virtual void ClearPlayoutError() = 0;
virtual void ClearRecordingWarning() = 0;
virtual void ClearRecordingError() = 0;
virtual bool PlayoutWarning() const = 0;
virtual bool PlayoutError() const = 0;
virtual bool RecordingWarning() const = 0;
virtual bool RecordingError() const = 0;
virtual void ClearPlayoutWarning() = 0;
virtual void ClearPlayoutError() = 0;
virtual void ClearRecordingWarning() = 0;
virtual void ClearRecordingError() = 0;
public:
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
virtual ~AudioDeviceGeneric() {}
virtual ~AudioDeviceGeneric() {}
};
} // namespace webrtc

View File

@ -325,7 +325,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
if (audioLayer == kPlatformDefaultAudio)
{
// Create iOS Audio Device implementation.
ptrAudioDevice = new AudioDeviceIOS(Id());
ptrAudioDevice = new AudioDeviceIOS();
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "iPhone Audio APIs will be utilized");
}
// END #if defined(WEBRTC_IOS)
@ -1899,6 +1899,16 @@ bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
return _ptrAudioDevice->BuiltInAECIsAvailable();
}
int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
AudioParameters* params) const {
return _ptrAudioDevice->GetPlayoutAudioParameters(params);
}
int AudioDeviceModuleImpl::GetRecordAudioParameters(
AudioParameters* params) const {
return _ptrAudioDevice->GetRecordAudioParameters(params);
}
// ============================================================================
// Private Methods
// ============================================================================

View File

@ -18,217 +18,209 @@
#include "webrtc/modules/audio_device/audio_device_buffer.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
namespace webrtc
{
namespace webrtc {
class AudioDeviceGeneric;
class AudioManager;
class CriticalSectionWrapper;
class AudioDeviceModuleImpl : public AudioDeviceModule
{
public:
enum PlatformType
{
kPlatformNotSupported = 0,
kPlatformWin32 = 1,
kPlatformWinCe = 2,
kPlatformLinux = 3,
kPlatformMac = 4,
kPlatformAndroid = 5,
kPlatformIOS = 6
};
class AudioDeviceModuleImpl : public AudioDeviceModule {
public:
enum PlatformType {
kPlatformNotSupported = 0,
kPlatformWin32 = 1,
kPlatformWinCe = 2,
kPlatformLinux = 3,
kPlatformMac = 4,
kPlatformAndroid = 5,
kPlatformIOS = 6
};
int32_t CheckPlatform();
int32_t CreatePlatformSpecificObjects();
int32_t AttachAudioBuffer();
int32_t CheckPlatform();
int32_t CreatePlatformSpecificObjects();
int32_t AttachAudioBuffer();
AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
virtual ~AudioDeviceModuleImpl();
AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
virtual ~AudioDeviceModuleImpl();
public: // RefCountedModule
int64_t TimeUntilNextProcess() override;
int32_t Process() override;
int64_t TimeUntilNextProcess() override;
int32_t Process() override;
public:
// Factory methods (resource allocation/deallocation)
static AudioDeviceModule* Create(
const int32_t id,
const AudioLayer audioLayer = kPlatformDefaultAudio);
// Factory methods (resource allocation/deallocation)
static AudioDeviceModule* Create(
const int32_t id,
const AudioLayer audioLayer = kPlatformDefaultAudio);
// Retrieve the currently utilized audio layer
int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
// Retrieve the currently utilized audio layer
int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
// Error handling
ErrorCode LastError() const override;
int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
// Error handling
ErrorCode LastError() const override;
int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
// Full-duplex transportation of PCM audio
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
// Full-duplex transportation of PCM audio
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
// Main initializaton and termination
int32_t Init() override;
int32_t Terminate() override;
bool Initialized() const override;
// Main initializaton and termination
int32_t Init() override;
int32_t Terminate() override;
bool Initialized() const override;
// Device enumeration
int16_t PlayoutDevices() override;
int16_t RecordingDevices() override;
int32_t PlayoutDeviceName(uint16_t index,
// Device enumeration
int16_t PlayoutDevices() override;
int16_t RecordingDevices() override;
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
// Device selection
int32_t SetPlayoutDevice(uint16_t index) override;
int32_t SetPlayoutDevice(WindowsDeviceType device) override;
int32_t SetRecordingDevice(uint16_t index) override;
int32_t SetRecordingDevice(WindowsDeviceType device) override;
// Device selection
int32_t SetPlayoutDevice(uint16_t index) override;
int32_t SetPlayoutDevice(WindowsDeviceType device) override;
int32_t SetRecordingDevice(uint16_t index) override;
int32_t SetRecordingDevice(WindowsDeviceType device) override;
// Audio transport initialization
int32_t PlayoutIsAvailable(bool* available) override;
int32_t InitPlayout() override;
bool PlayoutIsInitialized() const override;
int32_t RecordingIsAvailable(bool* available) override;
int32_t InitRecording() override;
bool RecordingIsInitialized() const override;
// Audio transport initialization
int32_t PlayoutIsAvailable(bool* available) override;
int32_t InitPlayout() override;
bool PlayoutIsInitialized() const override;
int32_t RecordingIsAvailable(bool* available) override;
int32_t InitRecording() override;
bool RecordingIsInitialized() const override;
// Audio transport control
int32_t StartPlayout() override;
int32_t StopPlayout() override;
bool Playing() const override;
int32_t StartRecording() override;
int32_t StopRecording() override;
bool Recording() const override;
// Audio transport control
int32_t StartPlayout() override;
int32_t StopPlayout() override;
bool Playing() const override;
int32_t StartRecording() override;
int32_t StopRecording() override;
bool Recording() const override;
// Microphone Automatic Gain Control (AGC)
int32_t SetAGC(bool enable) override;
bool AGC() const override;
// Microphone Automatic Gain Control (AGC)
int32_t SetAGC(bool enable) override;
bool AGC() const override;
// Volume control based on the Windows Wave API (Windows only)
int32_t SetWaveOutVolume(uint16_t volumeLeft,
uint16_t volumeRight) override;
int32_t WaveOutVolume(uint16_t* volumeLeft,
uint16_t* volumeRight) const override;
// Volume control based on the Windows Wave API (Windows only)
int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
int32_t WaveOutVolume(uint16_t* volumeLeft,
uint16_t* volumeRight) const override;
// Audio mixer initialization
int32_t InitSpeaker() override;
bool SpeakerIsInitialized() const override;
int32_t InitMicrophone() override;
bool MicrophoneIsInitialized() const override;
// Audio mixer initialization
int32_t InitSpeaker() override;
bool SpeakerIsInitialized() const override;
int32_t InitMicrophone() override;
bool MicrophoneIsInitialized() const override;
// Speaker volume controls
int32_t SpeakerVolumeIsAvailable(bool* available) override;
int32_t SetSpeakerVolume(uint32_t volume) override;
int32_t SpeakerVolume(uint32_t* volume) const override;
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
// Speaker volume controls
int32_t SpeakerVolumeIsAvailable(bool* available) override;
int32_t SetSpeakerVolume(uint32_t volume) override;
int32_t SpeakerVolume(uint32_t* volume) const override;
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
// Microphone volume controls
int32_t MicrophoneVolumeIsAvailable(bool* available) override;
int32_t SetMicrophoneVolume(uint32_t volume) override;
int32_t MicrophoneVolume(uint32_t* volume) const override;
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
// Microphone volume controls
int32_t MicrophoneVolumeIsAvailable(bool* available) override;
int32_t SetMicrophoneVolume(uint32_t volume) override;
int32_t MicrophoneVolume(uint32_t* volume) const override;
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
// Speaker mute control
int32_t SpeakerMuteIsAvailable(bool* available) override;
int32_t SetSpeakerMute(bool enable) override;
int32_t SpeakerMute(bool* enabled) const override;
// Speaker mute control
int32_t SpeakerMuteIsAvailable(bool* available) override;
int32_t SetSpeakerMute(bool enable) override;
int32_t SpeakerMute(bool* enabled) const override;
// Microphone mute control
int32_t MicrophoneMuteIsAvailable(bool* available) override;
int32_t SetMicrophoneMute(bool enable) override;
int32_t MicrophoneMute(bool* enabled) const override;
// Microphone mute control
int32_t MicrophoneMuteIsAvailable(bool* available) override;
int32_t SetMicrophoneMute(bool enable) override;
int32_t MicrophoneMute(bool* enabled) const override;
// Microphone boost control
int32_t MicrophoneBoostIsAvailable(bool* available) override;
int32_t SetMicrophoneBoost(bool enable) override;
int32_t MicrophoneBoost(bool* enabled) const override;
// Microphone boost control
int32_t MicrophoneBoostIsAvailable(bool* available) override;
int32_t SetMicrophoneBoost(bool enable) override;
int32_t MicrophoneBoost(bool* enabled) const override;
// Stereo support
int32_t StereoPlayoutIsAvailable(bool* available) const override;
int32_t SetStereoPlayout(bool enable) override;
int32_t StereoPlayout(bool* enabled) const override;
int32_t StereoRecordingIsAvailable(bool* available) const override;
int32_t SetStereoRecording(bool enable) override;
int32_t StereoRecording(bool* enabled) const override;
int32_t SetRecordingChannel(const ChannelType channel) override;
int32_t RecordingChannel(ChannelType* channel) const override;
// Stereo support
int32_t StereoPlayoutIsAvailable(bool* available) const override;
int32_t SetStereoPlayout(bool enable) override;
int32_t StereoPlayout(bool* enabled) const override;
int32_t StereoRecordingIsAvailable(bool* available) const override;
int32_t SetStereoRecording(bool enable) override;
int32_t StereoRecording(bool* enabled) const override;
int32_t SetRecordingChannel(const ChannelType channel) override;
int32_t RecordingChannel(ChannelType* channel) const override;
// Delay information and control
int32_t SetPlayoutBuffer(const BufferType type,
uint16_t sizeMS = 0) override;
int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
int32_t PlayoutDelay(uint16_t* delayMS) const override;
int32_t RecordingDelay(uint16_t* delayMS) const override;
// Delay information and control
int32_t SetPlayoutBuffer(const BufferType type, uint16_t sizeMS = 0) override;
int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
int32_t PlayoutDelay(uint16_t* delayMS) const override;
int32_t RecordingDelay(uint16_t* delayMS) const override;
// CPU load
int32_t CPULoad(uint16_t* load) const override;
// CPU load
int32_t CPULoad(uint16_t* load) const override;
// Recording of raw PCM data
int32_t StartRawOutputFileRecording(
const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
int32_t StopRawOutputFileRecording() override;
int32_t StartRawInputFileRecording(
const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
int32_t StopRawInputFileRecording() override;
// Recording of raw PCM data
int32_t StartRawOutputFileRecording(
const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
int32_t StopRawOutputFileRecording() override;
int32_t StartRawInputFileRecording(
const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
int32_t StopRawInputFileRecording() override;
// Native sample rate controls (samples/sec)
int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
// Native sample rate controls (samples/sec)
int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
// Mobile device specific functions
int32_t ResetAudioDevice() override;
int32_t SetLoudspeakerStatus(bool enable) override;
int32_t GetLoudspeakerStatus(bool* enabled) const override;
// Mobile device specific functions
int32_t ResetAudioDevice() override;
int32_t SetLoudspeakerStatus(bool enable) override;
int32_t GetLoudspeakerStatus(bool* enabled) const override;
bool BuiltInAECIsAvailable() const override;
bool BuiltInAECIsAvailable() const override;
int32_t EnableBuiltInAEC(bool enable) override;
bool BuiltInAECIsEnabled() const override;
int32_t EnableBuiltInAEC(bool enable) override;
bool BuiltInAECIsEnabled() const override;
public:
int32_t Id() {return _id;}
int GetPlayoutAudioParameters(AudioParameters* params) const override;
int GetRecordAudioParameters(AudioParameters* params) const override;
int32_t Id() { return _id; }
#if defined(WEBRTC_ANDROID)
// Only use this acccessor for test purposes on Android.
AudioManager* GetAndroidAudioManagerForTest() {
return _audioManagerAndroid.get();
}
// Only use this acccessor for test purposes on Android.
AudioManager* GetAndroidAudioManagerForTest() {
return _audioManagerAndroid.get();
}
#endif
AudioDeviceBuffer* GetAudioDeviceBuffer() {
return &_audioDeviceBuffer;
}
AudioDeviceBuffer* GetAudioDeviceBuffer() { return &_audioDeviceBuffer; }
private:
PlatformType Platform() const;
AudioLayer PlatformAudioLayer() const;
private:
PlatformType Platform() const;
AudioLayer PlatformAudioLayer() const;
private:
CriticalSectionWrapper& _critSect;
CriticalSectionWrapper& _critSectEventCb;
CriticalSectionWrapper& _critSectAudioCb;
CriticalSectionWrapper& _critSect;
CriticalSectionWrapper& _critSectEventCb;
CriticalSectionWrapper& _critSectAudioCb;
AudioDeviceObserver* _ptrCbAudioDeviceObserver;
AudioDeviceObserver* _ptrCbAudioDeviceObserver;
AudioDeviceGeneric* _ptrAudioDevice;
AudioDeviceGeneric* _ptrAudioDevice;
AudioDeviceBuffer _audioDeviceBuffer;
AudioDeviceBuffer _audioDeviceBuffer;
#if defined(WEBRTC_ANDROID)
rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
#endif
int32_t _id;
AudioLayer _platformAudioLayer;
int64_t _lastProcessTime;
PlatformType _platformType;
bool _initialized;
mutable ErrorCode _lastError;
int32_t _id;
AudioLayer _platformAudioLayer;
int64_t _lastProcessTime;
PlatformType _platformType;
bool _initialized;
mutable ErrorCode _lastError;
};
} // namespace webrtc

View File

@ -202,8 +202,17 @@ class AudioDeviceModule : public RefCountedModule {
// Don't use.
virtual bool BuiltInAECIsEnabled() const { return false; }
// Only supported on iOS.
// TODO(henrika): Make pure virtual after updating Chromium.
virtual int GetPlayoutAudioParameters(AudioParameters* params) const {
return -1;
}
virtual int GetRecordAudioParameters(AudioParameters* params) const {
return -1;
}
protected:
virtual ~AudioDeviceModule() {};
virtual ~AudioDeviceModule() {}
};
AudioDeviceModule* CreateAudioDeviceModule(

View File

@ -26,113 +26,164 @@ static const int kAdmMaxPlayoutBufferSizeMs = 250;
// AudioDeviceObserver
// ----------------------------------------------------------------------------
class AudioDeviceObserver
{
public:
enum ErrorCode
{
kRecordingError = 0,
kPlayoutError = 1
};
enum WarningCode
{
kRecordingWarning = 0,
kPlayoutWarning = 1
};
class AudioDeviceObserver {
public:
enum ErrorCode { kRecordingError = 0, kPlayoutError = 1 };
enum WarningCode { kRecordingWarning = 0, kPlayoutWarning = 1 };
virtual void OnErrorIsReported(const ErrorCode error) = 0;
virtual void OnWarningIsReported(const WarningCode warning) = 0;
virtual void OnErrorIsReported(const ErrorCode error) = 0;
virtual void OnWarningIsReported(const WarningCode warning) = 0;
protected:
virtual ~AudioDeviceObserver() {}
protected:
virtual ~AudioDeviceObserver() {}
};
// ----------------------------------------------------------------------------
// AudioTransport
// ----------------------------------------------------------------------------
class AudioTransport
{
public:
virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) = 0;
class AudioTransport {
public:
virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) = 0;
virtual int32_t NeedMorePlayData(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) = 0;
virtual int32_t NeedMorePlayData(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) = 0;
// Method to pass captured data directly and unmixed to network channels.
// |channel_ids| contains a list of VoE channels which are the
// sinks to the capture data. |audio_delay_milliseconds| is the sum of
// recording delay and playout delay of the hardware. |current_volume| is
// in the range of [0, 255], representing the current microphone analog
// volume. |key_pressed| is used by the typing detection.
// |need_audio_processing| specify if the data needs to be processed by APM.
// Currently WebRtc supports only one APM, and Chrome will make sure only
// one stream goes through APM. When |need_audio_processing| is false, the
// values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
// will be ignored.
// The return value is the new microphone volume, in the range of |0, 255].
// When the volume does not need to be updated, it returns 0.
// TODO(xians): Remove this interface after Chrome and Libjingle switches
// to OnData().
virtual int OnDataAvailable(const int voe_channels[],
int number_of_voe_channels,
const int16_t* audio_data,
int sample_rate,
int number_of_channels,
int number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool key_pressed,
bool need_audio_processing) { return 0; }
// Method to pass captured data directly and unmixed to network channels.
// |channel_ids| contains a list of VoE channels which are the
// sinks to the capture data. |audio_delay_milliseconds| is the sum of
// recording delay and playout delay of the hardware. |current_volume| is
// in the range of [0, 255], representing the current microphone analog
// volume. |key_pressed| is used by the typing detection.
// |need_audio_processing| specify if the data needs to be processed by APM.
// Currently WebRtc supports only one APM, and Chrome will make sure only
// one stream goes through APM. When |need_audio_processing| is false, the
// values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
// will be ignored.
// The return value is the new microphone volume, in the range of |0, 255].
// When the volume does not need to be updated, it returns 0.
// TODO(xians): Remove this interface after Chrome and Libjingle switches
// to OnData().
virtual int OnDataAvailable(const int voe_channels[],
int number_of_voe_channels,
const int16_t* audio_data,
int sample_rate,
int number_of_channels,
int number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool key_pressed,
bool need_audio_processing) {
return 0;
}
// Method to pass the captured audio data to the specific VoE channel.
// |voe_channel| is the id of the VoE channel which is the sink to the
// capture data.
// TODO(xians): Remove this interface after Libjingle switches to
// PushCaptureData().
virtual void OnData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
int number_of_frames) {}
// Method to pass the captured audio data to the specific VoE channel.
// |voe_channel| is the id of the VoE channel which is the sink to the
// capture data.
// TODO(xians): Remove this interface after Libjingle switches to
// PushCaptureData().
virtual void OnData(int voe_channel,
const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) {}
// Method to push the captured audio data to the specific VoE channel.
// The data will not undergo audio processing.
// |voe_channel| is the id of the VoE channel which is the sink to the
// capture data.
// TODO(xians): Make the interface pure virtual after Libjingle
// has its implementation.
virtual void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
int number_of_frames) {}
// Method to push the captured audio data to the specific VoE channel.
// The data will not undergo audio processing.
// |voe_channel| is the id of the VoE channel which is the sink to the
// capture data.
// TODO(xians): Make the interface pure virtual after Libjingle
// has its implementation.
virtual void PushCaptureData(int voe_channel,
const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) {}
// Method to pull mixed render audio data from all active VoE channels.
// The data will not be passed as reference for audio processing internally.
// TODO(xians): Support getting the unmixed render data from specific VoE
// channel.
virtual void PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {}
// Method to pull mixed render audio data from all active VoE channels.
// The data will not be passed as reference for audio processing internally.
// TODO(xians): Support getting the unmixed render data from specific VoE
// channel.
virtual void PullRenderData(int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {}
protected:
virtual ~AudioTransport() {}
protected:
virtual ~AudioTransport() {}
};
// Helper class for storage of fundamental audio parameters such as sample rate,
// number of channels, native buffer size etc.
// Note that one audio frame can contain more than one channel sample and each
// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
// stereo contains 2 * (16/8) = 4 bytes of data.
class AudioParameters {
public:
// This implementation does only support 16-bit PCM samples.
enum { kBitsPerSample = 16 };
AudioParameters()
: sample_rate_(0),
channels_(0),
frames_per_buffer_(0),
frames_per_10ms_buffer_(0) {}
AudioParameters(int sample_rate, int channels, int frames_per_buffer)
: sample_rate_(sample_rate),
channels_(channels),
frames_per_buffer_(frames_per_buffer),
frames_per_10ms_buffer_(sample_rate / 100) {}
void reset(int sample_rate, int channels, int frames_per_buffer) {
sample_rate_ = sample_rate;
channels_ = channels;
frames_per_buffer_ = frames_per_buffer;
frames_per_10ms_buffer_ = (sample_rate / 100);
}
int bits_per_sample() const { return kBitsPerSample; }
int sample_rate() const { return sample_rate_; }
int channels() const { return channels_; }
int frames_per_buffer() const { return frames_per_buffer_; }
int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
bool is_valid() const {
return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
}
int GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
int GetBytesPerBuffer() const {
return frames_per_buffer_ * GetBytesPerFrame();
}
int GetBytesPer10msBuffer() const {
return frames_per_10ms_buffer_ * GetBytesPerFrame();
}
float GetBufferSizeInMilliseconds() const {
if (sample_rate_ == 0)
return 0.0f;
return frames_per_buffer_ / (sample_rate_ / 1000.0f);
}
private:
int sample_rate_;
int channels_;
int frames_per_buffer_;
int frames_per_10ms_buffer_;
};
} // namespace webrtc

View File

@ -13,17 +13,14 @@
#include <AudioUnit/AudioUnit.h>
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
namespace webrtc {
const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
const uint32_t N_DEVICE_CHANNELS = 8;
const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
@ -33,137 +30,117 @@ const uint16_t N_REC_BUFFERS = 20;
class AudioDeviceIOS : public AudioDeviceGeneric {
public:
AudioDeviceIOS(const int32_t id);
AudioDeviceIOS();
~AudioDeviceIOS();
// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const;
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
// Main initializaton and termination
virtual int32_t Init();
virtual int32_t Terminate();
virtual bool Initialized() const;
int32_t Init() override;
int32_t Terminate() override;
bool Initialized() const override { return _initialized; }
// Device enumeration
virtual int16_t PlayoutDevices();
virtual int16_t RecordingDevices();
virtual int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]);
virtual int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]);
int32_t InitPlayout() override;
bool PlayoutIsInitialized() const override { return _playIsInitialized; }
// Device selection
virtual int32_t SetPlayoutDevice(uint16_t index);
virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
virtual int32_t SetRecordingDevice(uint16_t index);
virtual int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device);
int32_t InitRecording() override;
bool RecordingIsInitialized() const override { return _recIsInitialized; }
// Audio transport initialization
virtual int32_t PlayoutIsAvailable(bool& available);
virtual int32_t InitPlayout();
virtual bool PlayoutIsInitialized() const;
virtual int32_t RecordingIsAvailable(bool& available);
virtual int32_t InitRecording();
virtual bool RecordingIsInitialized() const;
int32_t StartPlayout() override;
int32_t StopPlayout() override;
bool Playing() const override { return _playing; }
// Audio transport control
virtual int32_t StartPlayout();
virtual int32_t StopPlayout();
virtual bool Playing() const;
virtual int32_t StartRecording();
virtual int32_t StopRecording();
virtual bool Recording() const;
int32_t StartRecording() override;
int32_t StopRecording() override;
bool Recording() const override { return _recording; }
// Microphone Automatic Gain Control (AGC)
virtual int32_t SetAGC(bool enable);
virtual bool AGC() const;
int32_t SetLoudspeakerStatus(bool enable) override;
int32_t GetLoudspeakerStatus(bool& enabled) const override;
// Volume control based on the Windows Wave API (Windows only)
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const;
// TODO(henrika): investigate if we can reduce the complexity here.
// Do we even need delay estimates?
int32_t PlayoutDelay(uint16_t& delayMS) const override;
int32_t RecordingDelay(uint16_t& delayMS) const override;
// Audio mixer initialization
virtual int32_t InitSpeaker();
virtual bool SpeakerIsInitialized() const;
virtual int32_t InitMicrophone();
virtual bool MicrophoneIsInitialized() const;
int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const override;
// Speaker volume controls
virtual int32_t SpeakerVolumeIsAvailable(bool& available);
virtual int32_t SetSpeakerVolume(uint32_t volume);
virtual int32_t SpeakerVolume(uint32_t& volume) const;
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
// These methods are unique for the iOS implementation.
// Microphone volume controls
virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
virtual int32_t SetMicrophoneVolume(uint32_t volume);
virtual int32_t MicrophoneVolume(uint32_t& volume) const;
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
// Native audio parameters stored during construction.
int GetPlayoutAudioParameters(AudioParameters* params) const override;
int GetRecordAudioParameters(AudioParameters* params) const override;
// Microphone mute control
virtual int32_t MicrophoneMuteIsAvailable(bool& available);
virtual int32_t SetMicrophoneMute(bool enable);
virtual int32_t MicrophoneMute(bool& enabled) const;
// These methods are currently not implemented on iOS.
// See audio_device_not_implemented_ios.mm for dummy implementations.
// Speaker mute control
virtual int32_t SpeakerMuteIsAvailable(bool& available);
virtual int32_t SetSpeakerMute(bool enable);
virtual int32_t SpeakerMute(bool& enabled) const;
// Microphone boost control
virtual int32_t MicrophoneBoostIsAvailable(bool& available);
virtual int32_t SetMicrophoneBoost(bool enable);
virtual int32_t MicrophoneBoost(bool& enabled) const;
// Stereo support
virtual int32_t StereoPlayoutIsAvailable(bool& available);
virtual int32_t SetStereoPlayout(bool enable);
virtual int32_t StereoPlayout(bool& enabled) const;
virtual int32_t StereoRecordingIsAvailable(bool& available);
virtual int32_t SetStereoRecording(bool enable);
virtual int32_t StereoRecording(bool& enabled) const;
// Delay information and control
virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
uint16_t sizeMS);
virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const;
virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
virtual int32_t RecordingDelay(uint16_t& delayMS) const;
// CPU load
virtual int32_t CPULoad(uint16_t& load) const;
public:
virtual bool PlayoutWarning() const;
virtual bool PlayoutError() const;
virtual bool RecordingWarning() const;
virtual bool RecordingError() const;
virtual void ClearPlayoutWarning();
virtual void ClearPlayoutError();
virtual void ClearRecordingWarning();
virtual void ClearRecordingError();
public:
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
// Reset Audio Device (for mobile devices only)
virtual int32_t ResetAudioDevice();
// enable or disable loud speaker (for iphone only)
virtual int32_t SetLoudspeakerStatus(bool enable);
virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
int32_t ResetAudioDevice() override;
int32_t PlayoutIsAvailable(bool& available) override;
int32_t RecordingIsAvailable(bool& available) override;
int32_t SetAGC(bool enable) override;
bool AGC() const override;
int16_t PlayoutDevices() override;
int16_t RecordingDevices() override;
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
int32_t SetPlayoutDevice(uint16_t index) override;
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) override;
int32_t SetRecordingDevice(uint16_t index) override;
int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) override;
int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
int32_t WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const override;
int32_t InitSpeaker() override;
bool SpeakerIsInitialized() const override;
int32_t InitMicrophone() override;
bool MicrophoneIsInitialized() const override;
int32_t SpeakerVolumeIsAvailable(bool& available) override;
int32_t SetSpeakerVolume(uint32_t volume) override;
int32_t SpeakerVolume(uint32_t& volume) const override;
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override;
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
int32_t SetMicrophoneVolume(uint32_t volume) override;
int32_t MicrophoneVolume(uint32_t& volume) const override;
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const override;
int32_t MicrophoneMuteIsAvailable(bool& available) override;
int32_t SetMicrophoneMute(bool enable) override;
int32_t MicrophoneMute(bool& enabled) const override;
int32_t SpeakerMuteIsAvailable(bool& available) override;
int32_t SetSpeakerMute(bool enable) override;
int32_t SpeakerMute(bool& enabled) const override;
int32_t MicrophoneBoostIsAvailable(bool& available) override;
int32_t SetMicrophoneBoost(bool enable) override;
int32_t MicrophoneBoost(bool& enabled) const override;
int32_t StereoPlayoutIsAvailable(bool& available) override;
int32_t SetStereoPlayout(bool enable) override;
int32_t StereoPlayout(bool& enabled) const override;
int32_t StereoRecordingIsAvailable(bool& available) override;
int32_t SetStereoRecording(bool enable) override;
int32_t StereoRecording(bool& enabled) const override;
int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
uint16_t sizeMS) override;
int32_t CPULoad(uint16_t& load) const override;
bool PlayoutWarning() const override;
bool PlayoutError() const override;
bool RecordingWarning() const override;
bool RecordingError() const override;
void ClearPlayoutWarning() override{};
void ClearPlayoutError() override{};
void ClearRecordingWarning() override{};
void ClearRecordingError() override{};
private:
// TODO(henrika): try to remove these.
void Lock() {
_critSect.Enter();
}
@ -172,10 +149,6 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
_critSect.Leave();
}
int32_t Id() {
return _id;
}
// Init and shutdown
int32_t InitPlayOrRecord();
int32_t ShutdownPlayOrRecord();
@ -209,18 +182,24 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
bool CaptureWorkerThread();
private:
AudioDeviceBuffer* _ptrAudioBuffer;
rtc::ThreadChecker thread_checker_;
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
// and therefore outlives this object.
AudioDeviceBuffer* audio_device_buffer_;
CriticalSectionWrapper& _critSect;
rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
int32_t _id;
rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
AudioUnit _auVoiceProcessing;
void* _audioInterruptionObserver;
private:
bool _initialized;
bool _isShutDown;
bool _recording;
@ -228,15 +207,8 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
bool _recIsInitialized;
bool _playIsInitialized;
bool _recordingDeviceIsSpecified;
bool _playoutDeviceIsSpecified;
bool _micIsInitialized;
bool _speakerIsInitialized;
bool _AGC;
// The sampling rate to use with Audio Device Buffer
uint32_t _adbSampFreq;
int _adbSampFreq;
// Delay calculation
uint32_t _recordingDelay;
@ -245,12 +217,6 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
uint32_t _recordingDelayHWAndOS;
uint32_t _recordingDelayMeasurementCounter;
// Errors and warnings count
uint16_t _playWarning;
uint16_t _playError;
uint16_t _recWarning;
uint16_t _recError;
// Playout buffer, needed for 44.0 / 44.1 kHz mismatch
int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
uint32_t _playoutBufferUsed; // How much is filled

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,286 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
namespace webrtc {
int32_t AudioDeviceIOS::ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const {
audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
return 0;
}
int32_t AudioDeviceIOS::ResetAudioDevice() {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int16_t AudioDeviceIOS::PlayoutDevices() {
// TODO(henrika): improve.
LOG_F(LS_WARNING) << "Not implemented";
return (int16_t)1;
}
int16_t AudioDeviceIOS::RecordingDevices() {
// TODO(henrika): improve.
LOG_F(LS_WARNING) << "Not implemented";
return (int16_t)1;
}
int32_t AudioDeviceIOS::InitSpeaker() {
return 0;
}
bool AudioDeviceIOS::SpeakerIsInitialized() const {
return true;
}
int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SetWaveOutVolume(uint16_t, uint16_t) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::WaveOutVolume(uint16_t&, uint16_t&) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
LOG_F(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
bool AudioDeviceIOS::PlayoutWarning() const {
return false;
}
bool AudioDeviceIOS::PlayoutError() const {
return false;
}
bool AudioDeviceIOS::RecordingWarning() const {
return false;
}
bool AudioDeviceIOS::RecordingError() const {
return false;
}
int32_t AudioDeviceIOS::InitMicrophone() {
return 0;
}
bool AudioDeviceIOS::MicrophoneIsInitialized() const {
return true;
}
int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
enabled = false;
return 0;
}
int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
LOG_F(LS_WARNING) << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
enabled = false;
return 0;
}
int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
LOG_F(LS_WARNING) << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
enabled = false;
return 0;
}
int32_t AudioDeviceIOS::SetAGC(bool enable) {
if (enable) {
RTC_NOTREACHED() << "Should never be called";
}
return -1;
}
bool AudioDeviceIOS::AGC() const {
return false;
}
int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
available = false;
return 0;
}
int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
LOG_F(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceIOS::SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
available = true;
return 0;
}
int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
available = true;
return 0;
}
int32_t AudioDeviceIOS::SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
uint16_t sizeMS) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
int32_t AudioDeviceIOS::CPULoad(uint16_t&) const {
RTC_NOTREACHED() << "Not implemented";
return -1;
}
} // namespace webrtc

View File

@ -0,0 +1,788 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include <limits>
#include <list>
#include <numeric>
#include <string>
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
using std::cout;
using std::endl;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::Gt;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::NotNull;
using ::testing::Return;
// #define ENABLE_DEBUG_PRINTF
#ifdef ENABLE_DEBUG_PRINTF
#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
#else
#define PRINTD(...) ((void)0)
#endif
#define PRINT(...) fprintf(stderr, __VA_ARGS__);
namespace webrtc {
// Number of callbacks (input or output) the tests waits for before we set
// an event indicating that the test was OK.
static const int kNumCallbacks = 10;
// Max amount of time we wait for an event to be set while counting callbacks.
static const int kTestTimeOutInMilliseconds = 10 * 1000;
// Number of bits per PCM audio sample.
static const int kBitsPerSample = 16;
// Number of bytes per PCM audio sample.
static const int kBytesPerSample = kBitsPerSample / 8;
// Average number of audio callbacks per second assuming 10ms packet size.
static const int kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
static const int kFilePlayTimeInSec = 15;
// Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored.
static const int kFullDuplexTimeInSec = 10;
// Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access).
// Only used in the RunPlayoutAndRecordingInFullDuplex test.
static const int kNumIgnoreFirstCallbacks = 50;
// Sets the number of impulses per second in the latency test.
// TODO(henrika): fine tune this setting for iOS.
static const int kImpulseFrequencyInHz = 1;
// Length of round-trip latency measurements. Number of transmitted impulses
// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
// TODO(henrika): fine tune this setting for iOS.
static const int kMeasureLatencyTimeInSec = 5;
// Utilized in round-trip latency measurements to avoid capturing noise samples.
// TODO(henrika): fine tune this setting for iOS.
static const int kImpulseThreshold = 50;
static const char kTag[] = "[..........] ";
enum TransportType {
kPlayout = 0x1,
kRecording = 0x2,
};
// Interface for processing the audio stream. Real implementations can e.g.
// run audio in loopback, read audio from a file or perform latency
// measurements.
class AudioStreamInterface {
public:
virtual void Write(const void* source, int num_frames) = 0;
virtual void Read(void* destination, int num_frames) = 0;
protected:
virtual ~AudioStreamInterface() {}
};
// Reads audio samples from a PCM file where the file is stored in memory at
// construction.
class FileAudioStream : public AudioStreamInterface {
public:
FileAudioStream(int num_callbacks,
const std::string& file_name,
int sample_rate)
: file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
file_size_in_bytes_ = test::GetFileSize(file_name);
sample_rate_ = sample_rate;
EXPECT_GE(file_size_in_callbacks(), num_callbacks)
<< "Size of test file is not large enough to last during the test.";
const int num_16bit_samples =
test::GetFileSize(file_name) / kBytesPerSample;
file_.reset(new int16_t[num_16bit_samples]);
FILE* audio_file = fopen(file_name.c_str(), "rb");
EXPECT_NE(audio_file, nullptr);
int num_samples_read =
fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
EXPECT_EQ(num_samples_read, num_16bit_samples);
fclose(audio_file);
}
// AudioStreamInterface::Write() is not implemented.
void Write(const void* source, int num_frames) override {}
// Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer.
void Read(void* destination, int num_frames) override {
memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
num_frames * sizeof(int16_t));
file_pos_ += num_frames;
}
int file_size_in_seconds() const {
return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
}
int file_size_in_callbacks() const {
return file_size_in_seconds() * kNumCallbacksPerSecond;
}
private:
int file_size_in_bytes_;
int sample_rate_;
rtc::scoped_ptr<int16_t[]> file_;
int file_pos_;
};
// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
// buffers of fixed size and allows Write and Read operations. The idea is to
// store recorded audio buffers (using Write) and then read (using Read) these
// stored buffers with as short delay as possible when the audio layer needs
// data to play out. The number of buffers in the FIFO will stabilize under
// normal conditions since there will be a balance between Write and Read calls.
// The container is a std::list container and access is protected with a lock
// since both sides (playout and recording) are driven by its own thread.
class FifoAudioStream : public AudioStreamInterface {
public:
explicit FifoAudioStream(int frames_per_buffer)
: frames_per_buffer_(frames_per_buffer),
bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
fifo_(new AudioBufferList),
largest_size_(0),
total_written_elements_(0),
write_count_(0) {
EXPECT_NE(fifo_.get(), nullptr);
}
~FifoAudioStream() { Flush(); }
// Allocate new memory, copy |num_frames| samples from |source| into memory
// and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element.
void Write(const void* source, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("+");
if (write_count_++ < kNumIgnoreFirstCallbacks) {
return;
}
int16_t* memory = new int16_t[frames_per_buffer_];
memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
rtc::CritScope lock(&lock_);
fifo_->push_back(memory);
const int size = fifo_->size();
if (size > largest_size_) {
largest_size_ = size;
PRINTD("(%d)", largest_size_);
}
total_written_elements_ += size;
}
// Read pointer to data buffer from front of list, copy |num_frames| of stored
// data into |destination| and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element.
void Read(void* destination, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("-");
rtc::CritScope lock(&lock_);
if (fifo_->empty()) {
memset(destination, 0, bytes_per_buffer_);
} else {
int16_t* memory = fifo_->front();
fifo_->pop_front();
memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
delete memory;
}
}
int size() const { return fifo_->size(); }
int largest_size() const { return largest_size_; }
int average_size() const {
return (total_written_elements_ == 0)
? 0.0
: 0.5 +
static_cast<float>(total_written_elements_) /
(write_count_ - kNumIgnoreFirstCallbacks);
}
private:
void Flush() {
for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
delete *it;
}
fifo_->clear();
}
using AudioBufferList = std::list<int16_t*>;
rtc::CriticalSection lock_;
const int frames_per_buffer_;
const int bytes_per_buffer_;
rtc::scoped_ptr<AudioBufferList> fifo_;
int largest_size_;
int total_written_elements_;
int write_count_;
};
// Inserts periodic impulses and measures the latency between the time of
// transmission and time of receiving the same impulse.
// Usage requires a special hardware called Audio Loopback Dongle.
// See http://source.android.com/devices/audio/loopback.html for details.
class LatencyMeasuringAudioStream : public AudioStreamInterface {
public:
explicit LatencyMeasuringAudioStream(int frames_per_buffer)
: clock_(Clock::GetRealTimeClock()),
frames_per_buffer_(frames_per_buffer),
bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
play_count_(0),
rec_count_(0),
pulse_time_(0) {}
// Insert periodic impulses in first two samples of |destination|.
void Read(void* destination, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) {
PRINT("[");
}
play_count_++;
memset(destination, 0, bytes_per_buffer_);
if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
if (pulse_time_ == 0) {
pulse_time_ = clock_->TimeInMilliseconds();
}
PRINT(".");
const int16_t impulse = std::numeric_limits<int16_t>::max();
int16_t* ptr16 = static_cast<int16_t*>(destination);
for (int i = 0; i < 2; ++i) {
*ptr16++ = impulse;
}
}
}
// Detect received impulses in |source|, derive time between transmission and
// detection and add the calculated delay to list of latencies.
void Write(const void* source, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++;
if (pulse_time_ == 0) {
// Avoid detection of new impulse response until a new impulse has
// been transmitted (sets |pulse_time_| to value larger than zero).
return;
}
const int16_t* ptr16 = static_cast<const int16_t*>(source);
std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
// Find max value in the audio buffer.
int max = *std::max_element(vec.begin(), vec.end());
// Find index (element position in vector) of the max element.
int index_of_max =
std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
if (max > kImpulseThreshold) {
PRINTD("(%d,%d)", max, index_of_max);
int64_t now_time = clock_->TimeInMilliseconds();
int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
PRINTD("[%d]", extra_delay);
// Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received
// at sample N where N > 0. The term |extra_delay| accounts for N and it
// is a value between 0 and 10ms.
latencies_.push_back(now_time - pulse_time_ + extra_delay);
pulse_time_ = 0;
} else {
PRINTD("-");
}
}
int num_latency_values() const { return latencies_.size(); }
int min_latency() const {
if (latencies_.empty())
return 0;
return *std::min_element(latencies_.begin(), latencies_.end());
}
int max_latency() const {
if (latencies_.empty())
return 0;
return *std::max_element(latencies_.begin(), latencies_.end());
}
int average_latency() const {
if (latencies_.empty())
return 0;
return 0.5 +
static_cast<double>(
std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
latencies_.size();
}
void PrintResults() const {
PRINT("] ");
for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
PRINT("%d ", *it);
}
PRINT("\n");
PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
max_latency(), average_latency());
}
int IndexToMilliseconds(double index) const {
return 10.0 * (index / frames_per_buffer_) + 0.5;
}
private:
Clock* clock_;
const int frames_per_buffer_;
const int bytes_per_buffer_;
int play_count_;
int rec_count_;
int64_t pulse_time_;
std::vector<int> latencies_;
};
// Mocks the AudioTransport object and proxies actions for the two callbacks
// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
// of AudioStreamInterface.
class MockAudioTransport : public AudioTransport {
public:
explicit MockAudioTransport(int type)
: num_callbacks_(0),
type_(type),
play_count_(0),
rec_count_(0),
audio_stream_(nullptr) {}
virtual ~MockAudioTransport() {}
MOCK_METHOD10(RecordedDataIsAvailable,
int32_t(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel));
MOCK_METHOD8(NeedMorePlayData,
int32_t(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));
// Set default actions of the mock object. We are delegating to fake
// implementations (of AudioStreamInterface) here.
void HandleCallbacks(EventWrapper* test_is_done,
AudioStreamInterface* audio_stream,
int num_callbacks) {
test_is_done_ = test_is_done;
audio_stream_ = audio_stream;
num_callbacks_ = num_callbacks;
if (play_mode()) {
ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
.WillByDefault(
Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
}
if (rec_mode()) {
ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
.WillByDefault(
Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
}
}
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) {
EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
rec_count_++;
// Process the recorded audio stream if an AudioStreamInterface
// implementation exists.
if (audio_stream_) {
audio_stream_->Write(audioSamples, nSamples);
}
if (ReceivedEnoughCallbacks()) {
test_is_done_->Set();
}
return 0;
}
int32_t RealNeedMorePlayData(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
play_count_++;
nSamplesOut = nSamples;
// Read (possibly processed) audio stream samples to be played out if an
// AudioStreamInterface implementation exists.
if (audio_stream_) {
audio_stream_->Read(audioSamples, nSamples);
}
if (ReceivedEnoughCallbacks()) {
test_is_done_->Set();
}
return 0;
}
bool ReceivedEnoughCallbacks() {
bool recording_done = false;
if (rec_mode())
recording_done = rec_count_ >= num_callbacks_;
else
recording_done = true;
bool playout_done = false;
if (play_mode())
playout_done = play_count_ >= num_callbacks_;
else
playout_done = true;
return recording_done && playout_done;
}
bool play_mode() const { return type_ & kPlayout; }
bool rec_mode() const { return type_ & kRecording; }
private:
EventWrapper* test_is_done_;
int num_callbacks_;
int type_;
int play_count_;
int rec_count_;
AudioStreamInterface* audio_stream_;
};
// AudioDeviceTest test fixture.
class AudioDeviceTest : public ::testing::Test {
protected:
AudioDeviceTest() : test_is_done_(EventWrapper::Create()) {
old_sev_ = rtc::LogMessage::GetLogToDebug();
// Set suitable logging level here. Change to rtc::LS_INFO for more verbose
// output. See webrtc/base/logging.h for complete list of options.
rtc::LogMessage::LogToDebug(rtc::LS_INFO);
// Add extra logging fields here (timestamps and thread id).
// rtc::LogMessage::LogTimestamps();
rtc::LogMessage::LogThreads();
// Creates an audio device using a default audio layer.
audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
EXPECT_NE(audio_device_.get(), nullptr);
EXPECT_EQ(0, audio_device_->Init());
EXPECT_EQ(0,
audio_device()->GetPlayoutAudioParameters(&playout_parameters_));
EXPECT_EQ(0, audio_device()->GetRecordAudioParameters(&record_parameters_));
}
virtual ~AudioDeviceTest() {
EXPECT_EQ(0, audio_device_->Terminate());
rtc::LogMessage::LogToDebug(old_sev_);
}
// TODO(henrika): don't use hardcoded values below.
int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
int record_sample_rate() const { return record_parameters_.sample_rate(); }
int playout_channels() const { return playout_parameters_.channels(); }
int record_channels() const { return record_parameters_.channels(); }
int playout_frames_per_10ms_buffer() const {
return playout_parameters_.frames_per_10ms_buffer();
}
int record_frames_per_10ms_buffer() const {
return record_parameters_.frames_per_10ms_buffer();
}
int total_delay_ms() const {
// TODO(henrika): improve this part.
return 100;
}
rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
return audio_device_;
}
AudioDeviceModuleImpl* audio_device_impl() const {
return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
}
AudioDeviceBuffer* audio_device_buffer() const {
return audio_device_impl()->GetAudioDeviceBuffer();
}
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
AudioDeviceModule::AudioLayer audio_layer) {
rtc::scoped_refptr<AudioDeviceModule> module(
AudioDeviceModuleImpl::Create(0, audio_layer));
return module;
}
// Returns file name relative to the resource root given a sample rate.
std::string GetFileName(int sample_rate) {
EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100 ||
sample_rate == 16000);
char fname[64];
snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
sample_rate / 1000);
std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
EXPECT_TRUE(test::FileExists(file_name));
#ifdef ENABLE_DEBUG_PRINTF
PRINTD("file name: %s\n", file_name.c_str());
const int bytes = test::GetFileSize(file_name);
PRINTD("file size: %d [bytes]\n", bytes);
PRINTD("file size: %d [samples]\n", bytes / kBytesPerSample);
const int seconds = bytes / (sample_rate * kBytesPerSample);
PRINTD("file size: %d [secs]\n", seconds);
PRINTD("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
#endif
return file_name;
}
void StartPlayout() {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
EXPECT_FALSE(audio_device()->Playing());
EXPECT_EQ(0, audio_device()->InitPlayout());
EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
EXPECT_EQ(0, audio_device()->StartPlayout());
EXPECT_TRUE(audio_device()->Playing());
}
void StopPlayout() {
EXPECT_EQ(0, audio_device()->StopPlayout());
EXPECT_FALSE(audio_device()->Playing());
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
}
void StartRecording() {
EXPECT_FALSE(audio_device()->RecordingIsInitialized());
EXPECT_FALSE(audio_device()->Recording());
EXPECT_EQ(0, audio_device()->InitRecording());
EXPECT_TRUE(audio_device()->RecordingIsInitialized());
EXPECT_EQ(0, audio_device()->StartRecording());
EXPECT_TRUE(audio_device()->Recording());
}
void StopRecording() {
EXPECT_EQ(0, audio_device()->StopRecording());
EXPECT_FALSE(audio_device()->Recording());
}
rtc::scoped_ptr<EventWrapper> test_is_done_;
rtc::scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
rtc::LoggingSeverity old_sev_;
};
TEST_F(AudioDeviceTest, ConstructDestruct) {
// Using the test fixture to create and destruct the audio device module.
}
TEST_F(AudioDeviceTest, InitTerminate) {
// Initialization is part of the test fixture.
EXPECT_TRUE(audio_device()->Initialized());
// webrtc::SleepMs(5 * 1000);
EXPECT_EQ(0, audio_device()->Terminate());
EXPECT_FALSE(audio_device()->Initialized());
}
// Tests that playout can be initiated, started and stopped. No audio callback
// is registered in this test.
TEST_F(AudioDeviceTest, StartStopPlayout) {
StartPlayout();
StopPlayout();
StartPlayout();
StopPlayout();
}
// Tests that recording can be initiated, started and stopped. No audio callback
// is registered in this test.
TEST_F(AudioDeviceTest, StartStopRecording) {
StartRecording();
StopRecording();
StartRecording();
StopRecording();
}
// Verify that calling StopPlayout() will leave us in an uninitialized state
// which will require a new call to InitPlayout(). This test does not call
// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
EXPECT_EQ(0, audio_device()->InitPlayout());
EXPECT_EQ(0, audio_device()->StartPlayout());
EXPECT_EQ(0, audio_device()->StopPlayout());
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
}
// Start playout and verify that the native audio layer starts asking for real
// audio samples to play out using the NeedMorePlayData callback.
TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
MockAudioTransport mock(kPlayout);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
kBytesPerSample, playout_channels(),
playout_sample_rate(), NotNull(), _, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopPlayout();
}
// Start recording and verify that the native audio layer starts feeding real
// audio samples via the RecordedDataIsAvailable callback.
TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
MockAudioTransport mock(kRecording);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock,
RecordedDataIsAvailable(
NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
record_channels(), record_sample_rate(),
_, // TODO(henrika): fix delay
0, 0, false, _)).Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopRecording();
}
// Start playout and recording (full-duplex audio) and verify that audio is
// active in both directions.
TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
MockAudioTransport mock(kPlayout | kRecording);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
kBytesPerSample, playout_channels(),
playout_sample_rate(), NotNull(), _, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_CALL(mock,
RecordedDataIsAvailable(
NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
record_channels(), record_sample_rate(),
_, // TODO(henrika): fix delay
0, 0, false, _)).Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
StartRecording();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopRecording();
StopPlayout();
}
// Start playout and read audio from an external PCM file when the audio layer
// asks for data to play out. Real audio is played out in this test but it does
// not contain any explicit verification that the audio quality is perfect.
TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
// TODO(henrika): extend test when mono output is supported.
EXPECT_EQ(1, playout_channels());
NiceMock<MockAudioTransport> mock(kPlayout);
const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
std::string file_name = GetFileName(playout_sample_rate());
rtc::scoped_ptr<FileAudioStream> file_audio_stream(
new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
num_callbacks);
// SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopPlayout();
}
TEST_F(AudioDeviceTest, Devices) {
// Device enumeration is not supported. Verify fixed values only.
EXPECT_EQ(1, audio_device()->PlayoutDevices());
EXPECT_EQ(1, audio_device()->RecordingDevices());
}
// Start playout and recording and store recorded data in an intermediate FIFO
// buffer from which the playout side then reads its samples in the same order
// as they were stored. Under ideal circumstances, a callback sequence would
// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
// means 'packet played'. Under such conditions, the FIFO would only contain
// one packet on average. However, under more realistic conditions, the size
// of the FIFO will vary more due to an unbalance between the two sides.
// This test tries to verify that the device maintains a balanced callback-
// sequence by running in loopback for ten seconds while measuring the size
// (max and average) of the FIFO. The size of the FIFO is increased by the
// recording side and decreased by the playout side.
// TODO(henrika): tune the final test parameters after running tests on several
// different devices.
TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
new FifoAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
kFullDuplexTimeInSec * kNumCallbacksPerSecond);
// SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
StartPlayout();
test_is_done_->Wait(
std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
StopPlayout();
StopRecording();
EXPECT_LE(fifo_audio_stream->average_size(), 10);
EXPECT_LE(fifo_audio_stream->largest_size(), 20);
}
// Measures loopback latency and reports the min, max and average values for
// a full duplex audio session.
// The latency is measured like so:
// - Insert impulses periodically on the output side.
// - Detect the impulses on the input side.
// - Measure the time difference between the transmit time and receive time.
// - Store time differences in a vector and calculate min, max and average.
// This test requires a special hardware called Audio Loopback Dongle.
// See http://source.android.com/devices/audio/loopback.html for details.
TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
// SetMaxPlayoutVolume();
// DisableBuiltInAECIfAvailable();
StartRecording();
StartPlayout();
test_is_done_->Wait(
std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
StopPlayout();
StopRecording();
// Verify that the correct number of transmitted impulses are detected.
EXPECT_EQ(latency_audio_stream->num_latency_values(),
kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1);
latency_audio_stream->PrintResults();
}
} // namespace webrtc

View File

@ -65,7 +65,6 @@
'bwe_simulator',
'cng',
'desktop_capture',
'isac',
'isac_fix',
'media_file',
'neteq',
@ -361,11 +360,15 @@
['OS=="ios"', {
'sources': [
'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
'audio_device/ios/audio_device_unittest_ios.cc',
],
'mac_bundle_resources': [
'<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
'<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
'<(DEPTH)/resources/audio_device/audio_short16.pcm',
'<(DEPTH)/resources/audio_device/audio_short44.pcm',
'<(DEPTH)/resources/audio_device/audio_short48.pcm',
'<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_lag.dat',

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
#if defined(WEBRTC_IOS)
#include <string>
namespace webrtc {
namespace ios {
bool CheckAndLogError(BOOL success, NSError* error);
// Return thread ID as a string.
std::string GetThreadId();
// Return thread ID as string suitable for debug logging.
std::string GetThreadInfo();
// Returns [NSThread currentThread] description as string.
// Example: <NSThread: 0x170066d80>{number = 1, name = main}
std::string GetCurrentThreadDescription();
// Returns the current name of the operating system.
std::string GetSystemName();
// Returns the current version of the operating system.
std::string GetSystemVersion();
// Returns the version of the operating system as a floating point value.
float GetSystemVersionAsFloat();
// Returns the device type.
// Examples: ”iPhone” and ”iPod touch”.
std::string GetDeviceType();
// Returns a more detailed device name.
// Examples: "iPhone 5s (GSM)" and "iPhone 6 Plus".
std::string GetDeviceName();
} // namespace ios
} // namespace webrtc
#endif // defined(WEBRTC_IOS)
#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#if defined(WEBRTC_IOS)
#import <Foundation/Foundation.h>
#import <sys/sysctl.h>
#import <UIKit/UIKit.h>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/utility/interface/helpers_ios.h"
namespace webrtc {
namespace ios {
// TODO(henrika): move to shared location.
// See https://code.google.com/p/webrtc/issues/detail?id=4773 for details.
NSString* NSStringFromStdString(const std::string& stdString) {
// std::string may contain null termination character so we construct
// using length.
return [[NSString alloc] initWithBytes:stdString.data()
length:stdString.length()
encoding:NSUTF8StringEncoding];
}
std::string StdStringFromNSString(NSString* nsString) {
NSData* charData = [nsString dataUsingEncoding:NSUTF8StringEncoding];
return std::string(reinterpret_cast<const char*>([charData bytes]),
[charData length]);
}
bool CheckAndLogError(BOOL success, NSError* error) {
if (!success) {
NSString* msg =
[NSString stringWithFormat:@"Error: %ld, %@, %@", (long)error.code,
error.localizedDescription,
error.localizedFailureReason];
LOG(LS_ERROR) << StdStringFromNSString(msg);
return false;
}
return true;
}
// TODO(henrika): see if it is possible to move to GetThreadName in
// platform_thread.h and base it on pthread methods instead.
std::string GetCurrentThreadDescription() {
NSString* name = [NSString stringWithFormat:@"%@", [NSThread currentThread]];
return StdStringFromNSString(name);
}
std::string GetSystemName() {
NSString* osName = [[UIDevice currentDevice] systemName];
return StdStringFromNSString(osName);
}
std::string GetSystemVersion() {
NSString* osVersion = [[UIDevice currentDevice] systemVersion];
return StdStringFromNSString(osVersion);
}
float GetSystemVersionAsFloat() {
NSString* osVersion = [[UIDevice currentDevice] systemVersion];
return osVersion.floatValue;
}
std::string GetDeviceType() {
NSString* deviceModel = [[UIDevice currentDevice] model];
return StdStringFromNSString(deviceModel);
}
std::string GetDeviceName() {
size_t size;
sysctlbyname("hw.machine", NULL, &size, NULL, 0);
rtc::scoped_ptr<char[]> machine;
machine.reset(new char[size]);
sysctlbyname("hw.machine", machine.get(), &size, NULL, 0);
std::string raw_name(machine.get());
if (!raw_name.compare("iPhone1,1"))
return std::string("iPhone 1G");
if (!raw_name.compare("iPhone1,2"))
return std::string("iPhone 3G");
if (!raw_name.compare("iPhone2,1"))
return std::string("iPhone 3GS");
if (!raw_name.compare("iPhone3,1"))
return std::string("iPhone 4");
if (!raw_name.compare("iPhone3,3"))
return std::string("Verizon iPhone 4");
if (!raw_name.compare("iPhone4,1"))
return std::string("iPhone 4S");
if (!raw_name.compare("iPhone5,1"))
return std::string("iPhone 5 (GSM)");
if (!raw_name.compare("iPhone5,2"))
return std::string("iPhone 5 (GSM+CDMA)");
if (!raw_name.compare("iPhone5,3"))
return std::string("iPhone 5c (GSM)");
if (!raw_name.compare("iPhone5,4"))
return std::string("iPhone 5c (GSM+CDMA)");
if (!raw_name.compare("iPhone6,1"))
return std::string("iPhone 5s (GSM)");
if (!raw_name.compare("iPhone6,2"))
return std::string("iPhone 5s (GSM+CDMA)");
if (!raw_name.compare("iPhone7,1"))
return std::string("iPhone 6 Plus");
if (!raw_name.compare("iPhone7,2"))
return std::string("iPhone 6");
if (!raw_name.compare("iPod1,1"))
return std::string("iPod Touch 1G");
if (!raw_name.compare("iPod2,1"))
return std::string("iPod Touch 2G");
if (!raw_name.compare("iPod3,1"))
return std::string("iPod Touch 3G");
if (!raw_name.compare("iPod4,1"))
return std::string("iPod Touch 4G");
if (!raw_name.compare("iPod5,1"))
return std::string("iPod Touch 5G");
if (!raw_name.compare("iPad1,1"))
return std::string("iPad");
if (!raw_name.compare("iPad2,1"))
return std::string("iPad 2 (WiFi)");
if (!raw_name.compare("iPad2,2"))
return std::string("iPad 2 (GSM)");
if (!raw_name.compare("iPad2,3"))
return std::string("iPad 2 (CDMA)");
if (!raw_name.compare("iPad2,4"))
return std::string("iPad 2 (WiFi)");
if (!raw_name.compare("iPad2,5"))
return std::string("iPad Mini (WiFi)");
if (!raw_name.compare("iPad2,6"))
return std::string("iPad Mini (GSM)");
if (!raw_name.compare("iPad2,7"))
return std::string("iPad Mini (GSM+CDMA)");
if (!raw_name.compare("iPad3,1"))
return std::string("iPad 3 (WiFi)");
if (!raw_name.compare("iPad3,2"))
return std::string("iPad 3 (GSM+CDMA)");
if (!raw_name.compare("iPad3,3"))
return std::string("iPad 3 (GSM)");
if (!raw_name.compare("iPad3,4"))
return std::string("iPad 4 (WiFi)");
if (!raw_name.compare("iPad3,5"))
return std::string("iPad 4 (GSM)");
if (!raw_name.compare("iPad3,6"))
return std::string("iPad 4 (GSM+CDMA)");
if (!raw_name.compare("iPad4,1"))
return std::string("iPad Air (WiFi)");
if (!raw_name.compare("iPad4,2"))
return std::string("iPad Air (Cellular)");
if (!raw_name.compare("iPad4,4"))
return std::string("iPad mini 2G (WiFi)");
if (!raw_name.compare("iPad4,5"))
return std::string("iPad mini 2G (Cellular)");
if (!raw_name.compare("i386"))
return std::string("Simulator");
if (!raw_name.compare("x86_64"))
return std::string("Simulator");
LOG(LS_WARNING) << "Failed to find device name";
return raw_name;
}
} // namespace ios
} // namespace webrtc
#endif // defined(WEBRTC_IOS)

View File

@ -22,6 +22,7 @@
'interface/file_player.h',
'interface/file_recorder.h',
'interface/helpers_android.h',
'interface/helpers_ios.h',
'interface/jvm_android.h',
'interface/process_thread.h',
'source/audio_frame_operations.cc',
@ -32,6 +33,7 @@
'source/file_recorder_impl.cc',
'source/file_recorder_impl.h',
'source/helpers_android.cc',
'source/helpers_ios.mm',
'source/jvm_android.cc',
'source/process_thread_impl.cc',
'source/process_thread_impl.h',