Moving iOS Audio Device to sdk.
This change forks the existing iOS audio device module and audio device from modules/audio_device/ into sdk/objc/Framework. It also updates RTCPeerConnectionFactory to use the forked implementation. The unit tests are re-implemented as XCTests. (was: https://webrtc-review.googlesource.com/c/src/+/67300) Bug: webrtc:9120 Change-Id: I46c09900246f75ca5285aeb38f7b8b295784ffac Reviewed-on: https://webrtc-review.googlesource.com/76741 Reviewed-by: Kári Helgason <kthelgason@webrtc.org> Reviewed-by: Henrik Andreassson <henrika@webrtc.org> Reviewed-by: Anders Carlsson <andersc@webrtc.org> Commit-Queue: Peter Hanspers <peterhanspers@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23238}
This commit is contained in:

committed by
Commit Bot

parent
ef00310a4a
commit
8d95e3b211
@ -94,13 +94,7 @@ if (rtc_include_internal_audio_device && is_ios) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rtc_source_set("audio_device_api") {
|
rtc_source_set("audio_device_api") {
|
||||||
visibility = [
|
visibility = [ "*" ]
|
||||||
":audio_device",
|
|
||||||
":audio_device_buffer",
|
|
||||||
":audio_device_impl",
|
|
||||||
":audio_device_ios_objc",
|
|
||||||
":audio_device_generic",
|
|
||||||
]
|
|
||||||
sources = [
|
sources = [
|
||||||
"include/audio_device.h",
|
"include/audio_device.h",
|
||||||
"include/audio_device_defines.h",
|
"include/audio_device_defines.h",
|
||||||
|
104
sdk/BUILD.gn
104
sdk/BUILD.gn
@ -94,12 +94,85 @@ if (is_ios || is_mac) {
|
|||||||
|
|
||||||
if (!build_with_chromium) {
|
if (!build_with_chromium) {
|
||||||
if (is_ios) {
|
if (is_ios) {
|
||||||
|
rtc_static_library("native_api_audio_device_module") {
|
||||||
|
visibility = [ "*" ]
|
||||||
|
|
||||||
|
sources = [
|
||||||
|
"objc/Framework/Native/api/audio_device_module.h",
|
||||||
|
"objc/Framework/Native/api/audio_device_module.mm",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
":audio_device",
|
||||||
|
"../modules/audio_device:audio_device_api",
|
||||||
|
"../modules/audio_device:audio_device_generic",
|
||||||
|
"../rtc_base:checks",
|
||||||
|
"../rtc_base:rtc_base_approved",
|
||||||
|
"../system_wrappers",
|
||||||
|
"../system_wrappers:metrics_api",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_clang) {
|
||||||
|
# Suppress warnings from the Chromium Clang plugin
|
||||||
|
# (bugs.webrtc.org/163).
|
||||||
|
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rtc_source_set("audio_session_observer") {
|
||||||
|
visibility = [ ":*" ]
|
||||||
|
|
||||||
|
sources = [
|
||||||
|
"objc/Framework/Native/src/audio/audio_session_observer.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
"../rtc_base:rtc_base",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
rtc_static_library("audio_device") {
|
||||||
|
visibility = [ "*" ]
|
||||||
|
|
||||||
|
sources = [
|
||||||
|
"objc/Framework/Native/src/audio/audio_device_ios.h",
|
||||||
|
"objc/Framework/Native/src/audio/audio_device_ios.mm",
|
||||||
|
"objc/Framework/Native/src/audio/audio_device_module_ios.h",
|
||||||
|
"objc/Framework/Native/src/audio/audio_device_module_ios.mm",
|
||||||
|
"objc/Framework/Native/src/audio/voice_processing_audio_unit.h",
|
||||||
|
"objc/Framework/Native/src/audio/voice_processing_audio_unit.mm",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
":audio_objc",
|
||||||
|
":audio_session_observer",
|
||||||
|
":common_objc",
|
||||||
|
"../api:array_view",
|
||||||
|
"../modules/audio_device:audio_device_api",
|
||||||
|
"../modules/audio_device:audio_device_buffer",
|
||||||
|
"../modules/audio_device:audio_device_generic",
|
||||||
|
"../rtc_base:checks",
|
||||||
|
"../rtc_base:rtc_base",
|
||||||
|
"../rtc_base/system:fallthrough",
|
||||||
|
"../sdk:common_objc",
|
||||||
|
"../system_wrappers:metrics_api",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_clang) {
|
||||||
|
# Suppress warnings from the Chromium Clang plugin
|
||||||
|
# (bugs.webrtc.org/163).
|
||||||
|
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rtc_static_library("audio_objc") {
|
rtc_static_library("audio_objc") {
|
||||||
sources = [
|
sources = [
|
||||||
"objc/Framework/Classes/Audio/RTCAudioSession+Configuration.mm",
|
"objc/Framework/Classes/Audio/RTCAudioSession+Configuration.mm",
|
||||||
"objc/Framework/Classes/Audio/RTCAudioSession+Private.h",
|
"objc/Framework/Classes/Audio/RTCAudioSession+Private.h",
|
||||||
"objc/Framework/Classes/Audio/RTCAudioSession.mm",
|
"objc/Framework/Classes/Audio/RTCAudioSession.mm",
|
||||||
"objc/Framework/Classes/Audio/RTCAudioSessionConfiguration.m",
|
"objc/Framework/Classes/Audio/RTCAudioSessionConfiguration.m",
|
||||||
|
"objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.h",
|
||||||
|
"objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.mm",
|
||||||
"objc/Framework/Headers/WebRTC/RTCAudioSession.h",
|
"objc/Framework/Headers/WebRTC/RTCAudioSession.h",
|
||||||
"objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h",
|
"objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h",
|
||||||
]
|
]
|
||||||
@ -108,8 +181,10 @@ if (is_ios || is_mac) {
|
|||||||
public_configs = [ ":common_config_objc" ]
|
public_configs = [ ":common_config_objc" ]
|
||||||
|
|
||||||
deps = [
|
deps = [
|
||||||
|
":audio_session_observer",
|
||||||
":common_objc",
|
":common_objc",
|
||||||
"../rtc_base:checks",
|
"../rtc_base:checks",
|
||||||
|
"../rtc_base:rtc_base",
|
||||||
"../rtc_base:rtc_base_approved",
|
"../rtc_base:rtc_base_approved",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -689,6 +764,10 @@ if (is_ios || is_mac) {
|
|||||||
"../rtc_base:checks",
|
"../rtc_base:checks",
|
||||||
"../rtc_base:rtc_base",
|
"../rtc_base:rtc_base",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if (is_ios) {
|
||||||
|
deps += [ ":native_api_audio_device_module" ]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rtc_include_tests) {
|
if (rtc_include_tests) {
|
||||||
@ -710,15 +789,28 @@ if (is_ios || is_mac) {
|
|||||||
"objc/Framework/UnitTests/frame_buffer_helpers.mm",
|
"objc/Framework/UnitTests/frame_buffer_helpers.mm",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# TODO(peterhanspers): Reenable these tests on simulator.
|
||||||
|
# See bugs.webrtc.org/7812
|
||||||
|
if (!use_ios_simulator) {
|
||||||
|
sources += [
|
||||||
|
"objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm",
|
||||||
|
"objc/Framework/UnitTests/RTCAudioDevice_xctest.mm",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
deps = [
|
deps = [
|
||||||
|
":audio_device",
|
||||||
|
":audio_objc",
|
||||||
":common_objc",
|
":common_objc",
|
||||||
":framework_objc",
|
":framework_objc",
|
||||||
":native_api",
|
":native_api",
|
||||||
|
":native_api_audio_device_module",
|
||||||
":native_video",
|
":native_video",
|
||||||
":videocapture_objc",
|
":videocapture_objc",
|
||||||
":videoframebuffer_objc",
|
":videoframebuffer_objc",
|
||||||
":videosource_objc",
|
":videosource_objc",
|
||||||
":videotoolbox_objc",
|
":videotoolbox_objc",
|
||||||
|
"../../system_wrappers:system_wrappers",
|
||||||
"../../system_wrappers:system_wrappers_default",
|
"../../system_wrappers:system_wrappers_default",
|
||||||
"../api/video:video_frame_i420",
|
"../api/video:video_frame_i420",
|
||||||
"../common_video:common_video",
|
"../common_video:common_video",
|
||||||
@ -750,8 +842,12 @@ if (is_ios || is_mac) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bundle_data("sdk_unittests_bundle_data") {
|
bundle_data("sdk_unittests_bundle_data") {
|
||||||
# Sample video taken from https://media.xiph.org/video/derf/
|
|
||||||
sources = [
|
sources = [
|
||||||
|
"objc/Framework/UnitTests/audio_short16.pcm",
|
||||||
|
"objc/Framework/UnitTests/audio_short44.pcm",
|
||||||
|
"objc/Framework/UnitTests/audio_short48.pcm",
|
||||||
|
|
||||||
|
# Sample video taken from https://media.xiph.org/video/derf/
|
||||||
"objc/Framework/UnitTests/foreman.mp4",
|
"objc/Framework/UnitTests/foreman.mp4",
|
||||||
]
|
]
|
||||||
outputs = [
|
outputs = [
|
||||||
@ -772,6 +868,12 @@ if (is_ios || is_mac) {
|
|||||||
":sdk_unittests_sources",
|
":sdk_unittests_sources",
|
||||||
]
|
]
|
||||||
ldflags = [ "-all_load" ]
|
ldflags = [ "-all_load" ]
|
||||||
|
|
||||||
|
if (is_clang) {
|
||||||
|
# Suppress warnings from the Chromium Clang plugin
|
||||||
|
# (bugs.webrtc.org/163).
|
||||||
|
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
class AudioSessionObserver;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Adapter that forwards RTCAudioSessionDelegate calls to the appropriate
|
||||||
|
* methods on the AudioSessionObserver.
|
||||||
|
*/
|
||||||
|
@interface RTCNativeAudioSessionDelegateAdapter : NSObject <RTCAudioSessionDelegate>
|
||||||
|
|
||||||
|
- (instancetype)init NS_UNAVAILABLE;
|
||||||
|
|
||||||
|
/** |observer| is a raw pointer and should be kept alive
|
||||||
|
* for this object's lifetime.
|
||||||
|
*/
|
||||||
|
- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer
|
||||||
|
NS_DESIGNATED_INITIALIZER;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_END
|
@ -0,0 +1,89 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#import "RTCNativeAudioSessionDelegateAdapter.h"
|
||||||
|
|
||||||
|
#include "sdk/objc/Framework/Native/src/audio/audio_session_observer.h"
|
||||||
|
|
||||||
|
#import "WebRTC/RTCLogging.h"
|
||||||
|
|
||||||
|
@implementation RTCNativeAudioSessionDelegateAdapter {
|
||||||
|
webrtc::AudioSessionObserver *_observer;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer {
|
||||||
|
RTC_DCHECK(observer);
|
||||||
|
if (self = [super init]) {
|
||||||
|
_observer = observer;
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark - RTCAudioSessionDelegate
|
||||||
|
|
||||||
|
- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session {
|
||||||
|
_observer->OnInterruptionBegin();
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
|
||||||
|
shouldResumeSession:(BOOL)shouldResumeSession {
|
||||||
|
_observer->OnInterruptionEnd();
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
|
||||||
|
reason:(AVAudioSessionRouteChangeReason)reason
|
||||||
|
previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
|
||||||
|
switch (reason) {
|
||||||
|
case AVAudioSessionRouteChangeReasonUnknown:
|
||||||
|
case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
|
||||||
|
case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
|
||||||
|
case AVAudioSessionRouteChangeReasonCategoryChange:
|
||||||
|
// It turns out that we see a category change (at least in iOS 9.2)
|
||||||
|
// when making a switch from a BT device to e.g. Speaker using the
|
||||||
|
// iOS Control Center and that we therefore must check if the sample
|
||||||
|
// rate has changed. And if so is the case, restart the audio unit.
|
||||||
|
case AVAudioSessionRouteChangeReasonOverride:
|
||||||
|
case AVAudioSessionRouteChangeReasonWakeFromSleep:
|
||||||
|
case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
|
||||||
|
_observer->OnValidRouteChange();
|
||||||
|
break;
|
||||||
|
case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
|
||||||
|
// The set of input and output ports has not changed, but their
|
||||||
|
// configuration has, e.g., a port’s selected data source has
|
||||||
|
// changed. Ignore this type of route change since we are focusing
|
||||||
|
// on detecting headset changes.
|
||||||
|
RTCLog(@"Ignoring RouteConfigurationChange");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionMediaServerTerminated:(RTCAudioSession *)session {
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionMediaServerReset:(RTCAudioSession *)session {
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSession:(RTCAudioSession *)session
|
||||||
|
didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
|
||||||
|
_observer->OnCanPlayOrRecordChange(canPlayOrRecord);
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session {
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session {
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)audioSession:(RTCAudioSession *)audioSession
|
||||||
|
didChangeOutputVolume:(float)outputVolume {
|
||||||
|
_observer->OnChangedOutputVolume();
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
@ -38,6 +38,10 @@
|
|||||||
#include "sdk/objc/Framework/Native/src/objc_video_encoder_factory.h"
|
#include "sdk/objc/Framework/Native/src/objc_video_encoder_factory.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
#import "sdk/objc/Framework/Native/api/audio_device_module.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
// Adding the nogncheck to disable the including header check.
|
// Adding the nogncheck to disable the including header check.
|
||||||
// The no-media version PeerConnectionFactory doesn't depend on media related
|
// The no-media version PeerConnectionFactory doesn't depend on media related
|
||||||
// C++ target.
|
// C++ target.
|
||||||
@ -55,6 +59,14 @@
|
|||||||
|
|
||||||
@synthesize nativeFactory = _nativeFactory;
|
@synthesize nativeFactory = _nativeFactory;
|
||||||
|
|
||||||
|
- (rtc::scoped_refptr<webrtc::AudioDeviceModule>)audioDeviceModule {
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
return webrtc::CreateAudioDeviceModule();
|
||||||
|
#else
|
||||||
|
return nullptr;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
- (instancetype)init {
|
- (instancetype)init {
|
||||||
#ifdef HAVE_NO_MEDIA
|
#ifdef HAVE_NO_MEDIA
|
||||||
return [self initWithNoMedia];
|
return [self initWithNoMedia];
|
||||||
@ -65,7 +77,7 @@
|
|||||||
[[RTCVideoEncoderFactoryH264 alloc] init])
|
[[RTCVideoEncoderFactoryH264 alloc] init])
|
||||||
nativeVideoDecoderFactory:webrtc::ObjCToNativeVideoDecoderFactory(
|
nativeVideoDecoderFactory:webrtc::ObjCToNativeVideoDecoderFactory(
|
||||||
[[RTCVideoDecoderFactoryH264 alloc] init])
|
[[RTCVideoDecoderFactoryH264 alloc] init])
|
||||||
audioDeviceModule:nullptr
|
audioDeviceModule:[self audioDeviceModule]
|
||||||
audioProcessingModule:nullptr];
|
audioProcessingModule:nullptr];
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -87,7 +99,7 @@
|
|||||||
nativeAudioDecoderFactory:webrtc::CreateBuiltinAudioDecoderFactory()
|
nativeAudioDecoderFactory:webrtc::CreateBuiltinAudioDecoderFactory()
|
||||||
nativeVideoEncoderFactory:std::move(native_encoder_factory)
|
nativeVideoEncoderFactory:std::move(native_encoder_factory)
|
||||||
nativeVideoDecoderFactory:std::move(native_decoder_factory)
|
nativeVideoDecoderFactory:std::move(native_decoder_factory)
|
||||||
audioDeviceModule:nullptr
|
audioDeviceModule:[self audioDeviceModule]
|
||||||
audioProcessingModule:nullptr];
|
audioProcessingModule:nullptr];
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
24
sdk/objc/Framework/Native/api/audio_device_module.h
Normal file
24
sdk/objc/Framework/Native/api/audio_device_module.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
|
||||||
|
#define SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "modules/audio_device/include/audio_device.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule();
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
|
30
sdk/objc/Framework/Native/api/audio_device_module.mm
Normal file
30
sdk/objc/Framework/Native/api/audio_device_module.mm
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "audio_device_module.h"
|
||||||
|
|
||||||
|
#include "rtc_base/logging.h"
|
||||||
|
#include "rtc_base/refcountedobject.h"
|
||||||
|
|
||||||
|
#include "sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
return new rtc::RefCountedObject<ios_adm::AudioDeviceModuleIOS>();
|
||||||
|
#else
|
||||||
|
RTC_LOG(LERROR)
|
||||||
|
<< "current platform is not supported => this module will self destruct!";
|
||||||
|
return nullptr;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
293
sdk/objc/Framework/Native/src/audio/audio_device_ios.h
Normal file
293
sdk/objc/Framework/Native/src/audio/audio_device_ios.h
Normal file
@ -0,0 +1,293 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
|
||||||
|
#define MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "sdk/objc/Framework/Headers/WebRTC/RTCMacros.h"
|
||||||
|
#include "modules/audio_device/audio_device_generic.h"
|
||||||
|
#include "audio_session_observer.h"
|
||||||
|
#include "voice_processing_audio_unit.h"
|
||||||
|
#include "rtc_base/buffer.h"
|
||||||
|
#include "rtc_base/thread.h"
|
||||||
|
#include "rtc_base/thread_annotations.h"
|
||||||
|
#include "rtc_base/thread_checker.h"
|
||||||
|
|
||||||
|
RTC_FWD_DECL_OBJC_CLASS(RTCNativeAudioSessionDelegateAdapter);
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
class FineAudioBuffer;
|
||||||
|
|
||||||
|
namespace ios_adm {
|
||||||
|
|
||||||
|
// Implements full duplex 16-bit mono PCM audio support for iOS using a
|
||||||
|
// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
|
||||||
|
// supports audio echo cancellation. It also adds automatic gain control,
|
||||||
|
// adjustment of voice-processing quality and muting.
|
||||||
|
//
|
||||||
|
// An instance must be created and destroyed on one and the same thread.
|
||||||
|
// All supported public methods must also be called on the same thread.
|
||||||
|
// A thread checker will RTC_DCHECK if any supported method is called on an
|
||||||
|
// invalid thread.
|
||||||
|
//
|
||||||
|
// Recorded audio will be delivered on a real-time internal I/O thread in the
|
||||||
|
// audio unit. The audio unit will also ask for audio data to play out on this
|
||||||
|
// same thread.
|
||||||
|
class AudioDeviceIOS : public AudioDeviceGeneric,
|
||||||
|
public AudioSessionObserver,
|
||||||
|
public VoiceProcessingAudioUnitObserver,
|
||||||
|
public rtc::MessageHandler {
|
||||||
|
public:
|
||||||
|
AudioDeviceIOS();
|
||||||
|
~AudioDeviceIOS();
|
||||||
|
|
||||||
|
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||||
|
|
||||||
|
InitStatus Init() override;
|
||||||
|
int32_t Terminate() override;
|
||||||
|
bool Initialized() const override;
|
||||||
|
|
||||||
|
int32_t InitPlayout() override;
|
||||||
|
bool PlayoutIsInitialized() const override;
|
||||||
|
|
||||||
|
int32_t InitRecording() override;
|
||||||
|
bool RecordingIsInitialized() const override;
|
||||||
|
|
||||||
|
int32_t StartPlayout() override;
|
||||||
|
int32_t StopPlayout() override;
|
||||||
|
bool Playing() const override { return playing_; }
|
||||||
|
|
||||||
|
int32_t StartRecording() override;
|
||||||
|
int32_t StopRecording() override;
|
||||||
|
bool Recording() const override { return recording_; }
|
||||||
|
|
||||||
|
// These methods returns hard-coded delay values and not dynamic delay
|
||||||
|
// estimates. The reason is that iOS supports a built-in AEC and the WebRTC
|
||||||
|
// AEC will always be disabled in the Libjingle layer to avoid running two
|
||||||
|
// AEC implementations at the same time. And, it saves resources to avoid
|
||||||
|
// updating these delay values continuously.
|
||||||
|
// TODO(henrika): it would be possible to mark these two methods as not
|
||||||
|
// implemented since they are only called for A/V-sync purposes today and
|
||||||
|
// A/V-sync is not supported on iOS. However, we avoid adding error messages
|
||||||
|
// the log by using these dummy implementations instead.
|
||||||
|
int32_t PlayoutDelay(uint16_t& delayMS) const override;
|
||||||
|
|
||||||
|
// Native audio parameters stored during construction.
|
||||||
|
// These methods are unique for the iOS implementation.
|
||||||
|
int GetPlayoutAudioParameters(AudioParameters* params) const override;
|
||||||
|
int GetRecordAudioParameters(AudioParameters* params) const override;
|
||||||
|
|
||||||
|
// These methods are currently not fully implemented on iOS:
|
||||||
|
|
||||||
|
// See audio_device_not_implemented.cc for trivial implementations.
|
||||||
|
int32_t ActiveAudioLayer(
|
||||||
|
AudioDeviceModule::AudioLayer& audioLayer) const override;
|
||||||
|
int32_t PlayoutIsAvailable(bool& available) override;
|
||||||
|
int32_t RecordingIsAvailable(bool& available) override;
|
||||||
|
int16_t PlayoutDevices() override;
|
||||||
|
int16_t RecordingDevices() override;
|
||||||
|
int32_t PlayoutDeviceName(uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) override;
|
||||||
|
int32_t RecordingDeviceName(uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) override;
|
||||||
|
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||||
|
int32_t SetPlayoutDevice(
|
||||||
|
AudioDeviceModule::WindowsDeviceType device) override;
|
||||||
|
int32_t SetRecordingDevice(uint16_t index) override;
|
||||||
|
int32_t SetRecordingDevice(
|
||||||
|
AudioDeviceModule::WindowsDeviceType device) override;
|
||||||
|
int32_t InitSpeaker() override;
|
||||||
|
bool SpeakerIsInitialized() const override;
|
||||||
|
int32_t InitMicrophone() override;
|
||||||
|
bool MicrophoneIsInitialized() const override;
|
||||||
|
int32_t SpeakerVolumeIsAvailable(bool& available) override;
|
||||||
|
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||||
|
int32_t SpeakerVolume(uint32_t& volume) const override;
|
||||||
|
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
|
||||||
|
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
|
||||||
|
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
|
||||||
|
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||||
|
int32_t MicrophoneVolume(uint32_t& volume) const override;
|
||||||
|
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
|
||||||
|
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
|
||||||
|
int32_t MicrophoneMuteIsAvailable(bool& available) override;
|
||||||
|
int32_t SetMicrophoneMute(bool enable) override;
|
||||||
|
int32_t MicrophoneMute(bool& enabled) const override;
|
||||||
|
int32_t SpeakerMuteIsAvailable(bool& available) override;
|
||||||
|
int32_t SetSpeakerMute(bool enable) override;
|
||||||
|
int32_t SpeakerMute(bool& enabled) const override;
|
||||||
|
int32_t StereoPlayoutIsAvailable(bool& available) override;
|
||||||
|
int32_t SetStereoPlayout(bool enable) override;
|
||||||
|
int32_t StereoPlayout(bool& enabled) const override;
|
||||||
|
int32_t StereoRecordingIsAvailable(bool& available) override;
|
||||||
|
int32_t SetStereoRecording(bool enable) override;
|
||||||
|
int32_t StereoRecording(bool& enabled) const override;
|
||||||
|
|
||||||
|
// AudioSessionObserver methods. May be called from any thread.
|
||||||
|
void OnInterruptionBegin() override;
|
||||||
|
void OnInterruptionEnd() override;
|
||||||
|
void OnValidRouteChange() override;
|
||||||
|
void OnCanPlayOrRecordChange(bool can_play_or_record) override;
|
||||||
|
void OnChangedOutputVolume() override;
|
||||||
|
|
||||||
|
// VoiceProcessingAudioUnitObserver methods.
|
||||||
|
OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) override;
|
||||||
|
OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) override;
|
||||||
|
|
||||||
|
// Handles messages from posts.
|
||||||
|
void OnMessage(rtc::Message *msg) override;
|
||||||
|
|
||||||
|
bool IsInterrupted();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Called by the relevant AudioSessionObserver methods on |thread_|.
|
||||||
|
void HandleInterruptionBegin();
|
||||||
|
void HandleInterruptionEnd();
|
||||||
|
void HandleValidRouteChange();
|
||||||
|
void HandleCanPlayOrRecordChange(bool can_play_or_record);
|
||||||
|
void HandleSampleRateChange(float sample_rate);
|
||||||
|
void HandlePlayoutGlitchDetected();
|
||||||
|
void HandleOutputVolumeChange();
|
||||||
|
|
||||||
|
// Uses current |playout_parameters_| and |record_parameters_| to inform the
|
||||||
|
// audio device buffer (ADB) about our internal audio parameters.
|
||||||
|
void UpdateAudioDeviceBuffer();
|
||||||
|
|
||||||
|
// Since the preferred audio parameters are only hints to the OS, the actual
|
||||||
|
// values may be different once the AVAudioSession has been activated.
|
||||||
|
// This method asks for the current hardware parameters and takes actions
|
||||||
|
// if they should differ from what we have asked for initially. It also
|
||||||
|
// defines |playout_parameters_| and |record_parameters_|.
|
||||||
|
void SetupAudioBuffersForActiveAudioSession();
|
||||||
|
|
||||||
|
// Creates the audio unit.
|
||||||
|
bool CreateAudioUnit();
|
||||||
|
|
||||||
|
// Updates the audio unit state based on current state.
|
||||||
|
void UpdateAudioUnit(bool can_play_or_record);
|
||||||
|
|
||||||
|
// Configures the audio session for WebRTC.
|
||||||
|
bool ConfigureAudioSession();
|
||||||
|
// Unconfigures the audio session.
|
||||||
|
void UnconfigureAudioSession();
|
||||||
|
|
||||||
|
// Activates our audio session, creates and initializes the voice-processing
|
||||||
|
// audio unit and verifies that we got the preferred native audio parameters.
|
||||||
|
bool InitPlayOrRecord();
|
||||||
|
|
||||||
|
// Closes and deletes the voice-processing I/O unit.
|
||||||
|
void ShutdownPlayOrRecord();
|
||||||
|
|
||||||
|
// Ensures that methods are called from the same thread as this object is
|
||||||
|
// created on.
|
||||||
|
rtc::ThreadChecker thread_checker_;
|
||||||
|
|
||||||
|
// Native I/O audio thread checker.
|
||||||
|
rtc::ThreadChecker io_thread_checker_;
|
||||||
|
|
||||||
|
// Thread that this object is created on.
|
||||||
|
rtc::Thread* thread_;
|
||||||
|
|
||||||
|
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||||
|
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||||
|
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
|
||||||
|
// and therefore outlives this object.
|
||||||
|
AudioDeviceBuffer* audio_device_buffer_;
|
||||||
|
|
||||||
|
// Contains audio parameters (sample rate, #channels, buffer size etc.) for
|
||||||
|
// the playout and recording sides. These structure is set in two steps:
|
||||||
|
// first, native sample rate and #channels are defined in Init(). Next, the
|
||||||
|
// audio session is activated and we verify that the preferred parameters
|
||||||
|
// were granted by the OS. At this stage it is also possible to add a third
|
||||||
|
// component to the parameters; the native I/O buffer duration.
|
||||||
|
// A RTC_CHECK will be hit if we for some reason fail to open an audio session
|
||||||
|
// using the specified parameters.
|
||||||
|
AudioParameters playout_parameters_;
|
||||||
|
AudioParameters record_parameters_;
|
||||||
|
|
||||||
|
// The AudioUnit used to play and record audio.
|
||||||
|
std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_;
|
||||||
|
|
||||||
|
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||||
|
// in chunks of 10ms. It then allows for this data to be pulled in
|
||||||
|
// a finer or coarser granularity. I.e. interacting with this class instead
|
||||||
|
// of directly with the AudioDeviceBuffer one can ask for any number of
|
||||||
|
// audio data samples. Is also supports a similar scheme for the recording
|
||||||
|
// side.
|
||||||
|
// Example: native buffer size can be 128 audio frames at 16kHz sample rate.
|
||||||
|
// WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
|
||||||
|
// in each callback (one every 8ms). This class can then ask for 128 and the
|
||||||
|
// FineAudioBuffer will ask WebRTC for new data only when needed and also
|
||||||
|
// cache non-utilized audio between callbacks. On the recording side, iOS
|
||||||
|
// can provide audio data frames of size 128 and these are accumulated until
|
||||||
|
// enough data to supply one 10ms call exists. This 10ms chunk is then sent
|
||||||
|
// to WebRTC and the remaining part is stored.
|
||||||
|
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||||
|
|
||||||
|
// Temporary storage for recorded data. AudioUnitRender() renders into this
|
||||||
|
// array as soon as a frame of the desired buffer size has been recorded.
|
||||||
|
// On real iOS devices, the size will be fixed and set once. For iOS
|
||||||
|
// simulators, the size can vary from callback to callback and the size
|
||||||
|
// will be changed dynamically to account for this behavior.
|
||||||
|
rtc::BufferT<int16_t> record_audio_buffer_;
|
||||||
|
|
||||||
|
// Set to 1 when recording is active and 0 otherwise.
|
||||||
|
volatile int recording_;
|
||||||
|
|
||||||
|
// Set to 1 when playout is active and 0 otherwise.
|
||||||
|
volatile int playing_;
|
||||||
|
|
||||||
|
// Set to true after successful call to Init(), false otherwise.
|
||||||
|
bool initialized_ RTC_GUARDED_BY(thread_checker_);
|
||||||
|
|
||||||
|
// Set to true after successful call to InitRecording() or InitPlayout(),
|
||||||
|
// false otherwise.
|
||||||
|
bool audio_is_initialized_;
|
||||||
|
|
||||||
|
// Set to true if audio session is interrupted, false otherwise.
|
||||||
|
bool is_interrupted_;
|
||||||
|
|
||||||
|
// Audio interruption observer instance.
|
||||||
|
RTCNativeAudioSessionDelegateAdapter* audio_session_observer_
|
||||||
|
RTC_GUARDED_BY(thread_checker_);
|
||||||
|
|
||||||
|
// Set to true if we've activated the audio session.
|
||||||
|
bool has_configured_session_ RTC_GUARDED_BY(thread_checker_);
|
||||||
|
|
||||||
|
// Counts number of detected audio glitches on the playout side.
|
||||||
|
int64_t num_detected_playout_glitches_ RTC_GUARDED_BY(thread_checker_);
|
||||||
|
int64_t last_playout_time_ RTC_GUARDED_BY(io_thread_checker_);
|
||||||
|
|
||||||
|
// Counts number of playout callbacks per call.
|
||||||
|
// The value isupdated on the native I/O thread and later read on the
|
||||||
|
// creating thread (see thread_checker_) but at this stage no audio is
|
||||||
|
// active. Hence, it is a "thread safe" design and no lock is needed.
|
||||||
|
int64_t num_playout_callbacks_;
|
||||||
|
|
||||||
|
// Contains the time for when the last output volume change was detected.
|
||||||
|
int64_t last_output_volume_change_time_ RTC_GUARDED_BY(thread_checker_);
|
||||||
|
};
|
||||||
|
} // namespace ios_adm
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
|
1104
sdk/objc/Framework/Native/src/audio/audio_device_ios.mm
Normal file
1104
sdk/objc/Framework/Native/src/audio/audio_device_ios.mm
Normal file
File diff suppressed because it is too large
Load Diff
143
sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h
Normal file
143
sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
|
||||||
|
#define SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "audio_device_ios.h"
|
||||||
|
|
||||||
|
#include "modules/audio_device/audio_device_buffer.h"
|
||||||
|
#include "modules/audio_device/include/audio_device.h"
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
#include "rtc_base/criticalsection.h"
|
||||||
|
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
class AudioDeviceGeneric;
|
||||||
|
|
||||||
|
namespace ios_adm {
|
||||||
|
|
||||||
|
class AudioDeviceModuleIOS : public AudioDeviceModule {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
int32_t AttachAudioBuffer();
|
||||||
|
|
||||||
|
AudioDeviceModuleIOS();
|
||||||
|
~AudioDeviceModuleIOS() override;
|
||||||
|
|
||||||
|
// Retrieve the currently utilized audio layer
|
||||||
|
int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
|
||||||
|
|
||||||
|
// Full-duplex transportation of PCM audio
|
||||||
|
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
|
||||||
|
|
||||||
|
// Main initializaton and termination
|
||||||
|
int32_t Init() override;
|
||||||
|
int32_t Terminate() override;
|
||||||
|
bool Initialized() const override;
|
||||||
|
|
||||||
|
// Device enumeration
|
||||||
|
int16_t PlayoutDevices() override;
|
||||||
|
int16_t RecordingDevices() override;
|
||||||
|
int32_t PlayoutDeviceName(uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) override;
|
||||||
|
int32_t RecordingDeviceName(uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) override;
|
||||||
|
|
||||||
|
// Device selection
|
||||||
|
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||||
|
int32_t SetPlayoutDevice(WindowsDeviceType device) override;
|
||||||
|
int32_t SetRecordingDevice(uint16_t index) override;
|
||||||
|
int32_t SetRecordingDevice(WindowsDeviceType device) override;
|
||||||
|
|
||||||
|
// Audio transport initialization
|
||||||
|
int32_t PlayoutIsAvailable(bool* available) override;
|
||||||
|
int32_t InitPlayout() override;
|
||||||
|
bool PlayoutIsInitialized() const override;
|
||||||
|
int32_t RecordingIsAvailable(bool* available) override;
|
||||||
|
int32_t InitRecording() override;
|
||||||
|
bool RecordingIsInitialized() const override;
|
||||||
|
|
||||||
|
// Audio transport control
|
||||||
|
int32_t StartPlayout() override;
|
||||||
|
int32_t StopPlayout() override;
|
||||||
|
bool Playing() const override;
|
||||||
|
int32_t StartRecording() override;
|
||||||
|
int32_t StopRecording() override;
|
||||||
|
bool Recording() const override;
|
||||||
|
|
||||||
|
// Audio mixer initialization
|
||||||
|
int32_t InitSpeaker() override;
|
||||||
|
bool SpeakerIsInitialized() const override;
|
||||||
|
int32_t InitMicrophone() override;
|
||||||
|
bool MicrophoneIsInitialized() const override;
|
||||||
|
|
||||||
|
// Speaker volume controls
|
||||||
|
int32_t SpeakerVolumeIsAvailable(bool* available) override;
|
||||||
|
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||||
|
int32_t SpeakerVolume(uint32_t* volume) const override;
|
||||||
|
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
|
||||||
|
int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
|
||||||
|
|
||||||
|
// Microphone volume controls
|
||||||
|
int32_t MicrophoneVolumeIsAvailable(bool* available) override;
|
||||||
|
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||||
|
int32_t MicrophoneVolume(uint32_t* volume) const override;
|
||||||
|
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
|
||||||
|
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
|
||||||
|
|
||||||
|
// Speaker mute control
|
||||||
|
int32_t SpeakerMuteIsAvailable(bool* available) override;
|
||||||
|
int32_t SetSpeakerMute(bool enable) override;
|
||||||
|
int32_t SpeakerMute(bool* enabled) const override;
|
||||||
|
|
||||||
|
// Microphone mute control
|
||||||
|
int32_t MicrophoneMuteIsAvailable(bool* available) override;
|
||||||
|
int32_t SetMicrophoneMute(bool enable) override;
|
||||||
|
int32_t MicrophoneMute(bool* enabled) const override;
|
||||||
|
|
||||||
|
// Stereo support
|
||||||
|
int32_t StereoPlayoutIsAvailable(bool* available) const override;
|
||||||
|
int32_t SetStereoPlayout(bool enable) override;
|
||||||
|
int32_t StereoPlayout(bool* enabled) const override;
|
||||||
|
int32_t StereoRecordingIsAvailable(bool* available) const override;
|
||||||
|
int32_t SetStereoRecording(bool enable) override;
|
||||||
|
int32_t StereoRecording(bool* enabled) const override;
|
||||||
|
|
||||||
|
// Delay information and control
|
||||||
|
int32_t PlayoutDelay(uint16_t* delayMS) const override;
|
||||||
|
|
||||||
|
bool BuiltInAECIsAvailable() const override;
|
||||||
|
int32_t EnableBuiltInAEC(bool enable) override;
|
||||||
|
bool BuiltInAGCIsAvailable() const override;
|
||||||
|
int32_t EnableBuiltInAGC(bool enable) override;
|
||||||
|
bool BuiltInNSIsAvailable() const override;
|
||||||
|
int32_t EnableBuiltInNS(bool enable) override;
|
||||||
|
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
int GetPlayoutAudioParameters(AudioParameters* params) const override;
|
||||||
|
int GetRecordAudioParameters(AudioParameters* params) const override;
|
||||||
|
#endif // WEBRTC_IOS
|
||||||
|
private:
|
||||||
|
bool initialized_ = false;
|
||||||
|
std::unique_ptr<AudioDeviceIOS> audio_device_;
|
||||||
|
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
|
||||||
|
|
||||||
|
};
|
||||||
|
} // namespace ios_adm
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
|
673
sdk/objc/Framework/Native/src/audio/audio_device_module_ios.mm
Normal file
673
sdk/objc/Framework/Native/src/audio/audio_device_module_ios.mm
Normal file
@ -0,0 +1,673 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "audio_device_module_ios.h"
|
||||||
|
|
||||||
|
#include "modules/audio_device/audio_device_config.h"
|
||||||
|
#include "modules/audio_device/audio_device_generic.h"
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
#include "rtc_base/logging.h"
|
||||||
|
#include "rtc_base/refcount.h"
|
||||||
|
#include "rtc_base/refcountedobject.h"
|
||||||
|
#include "system_wrappers/include/metrics.h"
|
||||||
|
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
#include "audio_device_ios.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define CHECKinitialized_() \
|
||||||
|
{ \
|
||||||
|
if (!initialized_) { \
|
||||||
|
return -1; \
|
||||||
|
}; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define CHECKinitialized__BOOL() \
|
||||||
|
{ \
|
||||||
|
if (!initialized_) { \
|
||||||
|
return false; \
|
||||||
|
}; \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace ios_adm {
|
||||||
|
|
||||||
|
AudioDeviceModuleIOS::AudioDeviceModuleIOS() {
|
||||||
|
RTC_LOG(INFO) << "current platform is IOS";
|
||||||
|
RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::AttachAudioBuffer() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
audio_device_->AttachAudioBuffer(audio_device_buffer_.get());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioDeviceModuleIOS::~AudioDeviceModuleIOS() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
AudioLayer activeAudio;
|
||||||
|
if (audio_device_->ActiveAudioLayer(activeAudio) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*audioLayer = activeAudio;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::Init() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
if (initialized_)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer());
|
||||||
|
audio_device_.reset(new ios_adm::AudioDeviceIOS());
|
||||||
|
RTC_CHECK(audio_device_);
|
||||||
|
|
||||||
|
this->AttachAudioBuffer();
|
||||||
|
|
||||||
|
AudioDeviceGeneric::InitStatus status = audio_device_->Init();
|
||||||
|
RTC_HISTOGRAM_ENUMERATION(
|
||||||
|
"WebRTC.Audio.InitializationResult", static_cast<int>(status),
|
||||||
|
static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES));
|
||||||
|
if (status != AudioDeviceGeneric::InitStatus::OK) {
|
||||||
|
RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
initialized_ = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::Terminate() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
if (!initialized_)
|
||||||
|
return 0;
|
||||||
|
if (audio_device_->Terminate() == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
initialized_ = false;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::Initialized() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << ": " << initialized_;
|
||||||
|
return initialized_;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::InitSpeaker() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->InitSpeaker();
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::InitMicrophone() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->InitMicrophone();
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetSpeakerVolume(volume);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t level = 0;
|
||||||
|
if (audio_device_->SpeakerVolume(level) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*volume = level;
|
||||||
|
RTC_LOG(INFO) << "output: " << *volume;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::SpeakerIsInitialized() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
bool isInitialized = audio_device_->SpeakerIsInitialized();
|
||||||
|
RTC_LOG(INFO) << "output: " << isInitialized;
|
||||||
|
return isInitialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
bool isInitialized = audio_device_->MicrophoneIsInitialized();
|
||||||
|
RTC_LOG(INFO) << "output: " << isInitialized;
|
||||||
|
return isInitialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MaxSpeakerVolume(uint32_t* maxVolume) const {
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t maxVol = 0;
|
||||||
|
if (audio_device_->MaxSpeakerVolume(maxVol) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*maxVolume = maxVol;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MinSpeakerVolume(uint32_t* minVolume) const {
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t minVol = 0;
|
||||||
|
if (audio_device_->MinSpeakerVolume(minVol) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*minVolume = minVol;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetSpeakerMute(enable);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool muted = false;
|
||||||
|
if (audio_device_->SpeakerMute(muted) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*enabled = muted;
|
||||||
|
RTC_LOG(INFO) << "output: " << muted;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return (audio_device_->SetMicrophoneMute(enable));
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool muted = false;
|
||||||
|
if (audio_device_->MicrophoneMute(muted) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*enabled = muted;
|
||||||
|
RTC_LOG(INFO) << "output: " << muted;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return (audio_device_->SetMicrophoneVolume(volume));
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t level = 0;
|
||||||
|
if (audio_device_->MicrophoneVolume(level) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*volume = level;
|
||||||
|
RTC_LOG(INFO) << "output: " << *volume;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable(
|
||||||
|
bool* available) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (audio_device_->RecordingIsInitialized()) {
|
||||||
|
RTC_LOG(WARNING) << "recording in stereo is not supported";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (audio_device_->SetStereoRecording(enable) == -1) {
|
||||||
|
RTC_LOG(WARNING) << "failed to change stereo recording";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
int8_t nChannels(1);
|
||||||
|
if (enable) {
|
||||||
|
nChannels = 2;
|
||||||
|
}
|
||||||
|
audio_device_buffer_.get()->SetRecordingChannels(nChannels);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool stereo = false;
|
||||||
|
if (audio_device_->StereoRecording(stereo) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*enabled = stereo;
|
||||||
|
RTC_LOG(INFO) << "output: " << stereo;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (audio_device_->PlayoutIsInitialized()) {
|
||||||
|
RTC_LOG(LERROR)
|
||||||
|
<< "unable to set stereo mode while playing side is initialized";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (audio_device_->SetStereoPlayout(enable)) {
|
||||||
|
RTC_LOG(WARNING) << "stereo playout is not supported";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
int8_t nChannels(1);
|
||||||
|
if (enable) {
|
||||||
|
nChannels = 2;
|
||||||
|
}
|
||||||
|
audio_device_buffer_.get()->SetPlayoutChannels(nChannels);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool stereo = false;
|
||||||
|
if (audio_device_->StereoPlayout(stereo) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*enabled = stereo;
|
||||||
|
RTC_LOG(INFO) << "output: " << stereo;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
bool isAvailable = false;
|
||||||
|
if (audio_device_->RecordingIsAvailable(isAvailable) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*available = isAvailable;
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MaxMicrophoneVolume(uint32_t* maxVolume) const {
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t maxVol(0);
|
||||||
|
if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*maxVolume = maxVol;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::MinMicrophoneVolume(uint32_t* minVolume) const {
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint32_t minVol(0);
|
||||||
|
if (audio_device_->MinMicrophoneVolume(minVol) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*minVolume = minVol;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t AudioDeviceModuleIOS::PlayoutDevices() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint16_t nPlayoutDevices = audio_device_->PlayoutDevices();
|
||||||
|
RTC_LOG(INFO) << "output: " << nPlayoutDevices;
|
||||||
|
return (int16_t)(nPlayoutDevices);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetPlayoutDevice(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetPlayoutDevice(device);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::PlayoutDeviceName(
|
||||||
|
uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)";
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (name == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (name != NULL) {
|
||||||
|
RTC_LOG(INFO) << "output: name = " << name;
|
||||||
|
}
|
||||||
|
if (guid != NULL) {
|
||||||
|
RTC_LOG(INFO) << "output: guid = " << guid;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::RecordingDeviceName(
|
||||||
|
uint16_t index,
|
||||||
|
char name[kAdmMaxDeviceNameSize],
|
||||||
|
char guid[kAdmMaxGuidSize]) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)";
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (name == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (audio_device_->RecordingDeviceName(index, name, guid) == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (name != NULL) {
|
||||||
|
RTC_LOG(INFO) << "output: name = " << name;
|
||||||
|
}
|
||||||
|
if (guid != NULL) {
|
||||||
|
RTC_LOG(INFO) << "output: guid = " << guid;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t AudioDeviceModuleIOS::RecordingDevices() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint16_t nRecordingDevices = audio_device_->RecordingDevices();
|
||||||
|
RTC_LOG(INFO) << "output: " << nRecordingDevices;
|
||||||
|
return (int16_t)nRecordingDevices;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetRecordingDevice(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
return audio_device_->SetRecordingDevice(device);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::InitPlayout() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (PlayoutIsInitialized()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int32_t result = audio_device_->InitPlayout();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::InitRecording() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (RecordingIsInitialized()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int32_t result = audio_device_->InitRecording();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::PlayoutIsInitialized() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
return audio_device_->PlayoutIsInitialized();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::RecordingIsInitialized() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
return audio_device_->RecordingIsInitialized();
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StartPlayout() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (Playing()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
audio_device_buffer_.get()->StartPlayout();
|
||||||
|
int32_t result = audio_device_->StartPlayout();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StopPlayout() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
int32_t result = audio_device_->StopPlayout();
|
||||||
|
audio_device_buffer_.get()->StopPlayout();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::Playing() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
return audio_device_->Playing();
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StartRecording() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
if (Recording()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
audio_device_buffer_.get()->StartRecording();
|
||||||
|
int32_t result = audio_device_->StartRecording();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::StopRecording() {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized_();
|
||||||
|
int32_t result = audio_device_->StopRecording();
|
||||||
|
audio_device_buffer_.get()->StopRecording();
|
||||||
|
RTC_LOG(INFO) << "output: " << result;
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
|
||||||
|
static_cast<int>(result == 0));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::Recording() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
return audio_device_->Recording();
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::RegisterAudioCallback(
|
||||||
|
AudioTransport* audioCallback) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::PlayoutDelay(uint16_t* delayMS) const {
|
||||||
|
CHECKinitialized_();
|
||||||
|
uint16_t delay = 0;
|
||||||
|
if (audio_device_->PlayoutDelay(delay) == -1) {
|
||||||
|
RTC_LOG(LERROR) << "failed to retrieve the playout delay";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*delayMS = delay;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
bool isAvailable = audio_device_->BuiltInAECIsAvailable();
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return isAvailable;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
int32_t ok = audio_device_->EnableBuiltInAEC(enable);
|
||||||
|
RTC_LOG(INFO) << "output: " << ok;
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
bool isAvailable = audio_device_->BuiltInAGCIsAvailable();
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return isAvailable;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
int32_t ok = audio_device_->EnableBuiltInAGC(enable);
|
||||||
|
RTC_LOG(INFO) << "output: " << ok;
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
CHECKinitialized__BOOL();
|
||||||
|
bool isAvailable = audio_device_->BuiltInNSIsAvailable();
|
||||||
|
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||||
|
return isAvailable;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||||
|
CHECKinitialized_();
|
||||||
|
int32_t ok = audio_device_->EnableBuiltInNS(enable);
|
||||||
|
RTC_LOG(INFO) << "output: " << ok;
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
int AudioDeviceModuleIOS::GetPlayoutAudioParameters(
|
||||||
|
AudioParameters* params) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
int r = audio_device_->GetPlayoutAudioParameters(params);
|
||||||
|
RTC_LOG(INFO) << "output: " << r;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int AudioDeviceModuleIOS::GetRecordAudioParameters(
|
||||||
|
AudioParameters* params) const {
|
||||||
|
RTC_LOG(INFO) << __FUNCTION__;
|
||||||
|
int r = audio_device_->GetRecordAudioParameters(params);
|
||||||
|
RTC_LOG(INFO) << "output: " << r;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
#endif // WEBRTC_IOS
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
42
sdk/objc/Framework/Native/src/audio/audio_session_observer.h
Normal file
42
sdk/objc/Framework/Native/src/audio/audio_session_observer.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
|
||||||
|
#define MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
|
||||||
|
|
||||||
|
#include "rtc_base/asyncinvoker.h"
|
||||||
|
#include "rtc_base/thread.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
// Observer interface for listening to AVAudioSession events.
|
||||||
|
class AudioSessionObserver {
|
||||||
|
public:
|
||||||
|
// Called when audio session interruption begins.
|
||||||
|
virtual void OnInterruptionBegin() = 0;
|
||||||
|
|
||||||
|
// Called when audio session interruption ends.
|
||||||
|
virtual void OnInterruptionEnd() = 0;
|
||||||
|
|
||||||
|
// Called when audio route changes.
|
||||||
|
virtual void OnValidRouteChange() = 0;
|
||||||
|
|
||||||
|
// Called when the ability to play or record changes.
|
||||||
|
virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0;
|
||||||
|
|
||||||
|
virtual void OnChangedOutputVolume() = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual ~AudioSessionObserver() {}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
|
@ -0,0 +1,139 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
|
||||||
|
#define MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
|
||||||
|
|
||||||
|
#include <AudioUnit/AudioUnit.h>
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace ios_adm {
|
||||||
|
|
||||||
|
class VoiceProcessingAudioUnitObserver {
|
||||||
|
public:
|
||||||
|
// Callback function called on a real-time priority I/O thread from the audio
|
||||||
|
// unit. This method is used to signal that recorded audio is available.
|
||||||
|
virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) = 0;
|
||||||
|
|
||||||
|
// Callback function called on a real-time priority I/O thread from the audio
|
||||||
|
// unit. This method is used to provide audio samples to the audio unit.
|
||||||
|
virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
~VoiceProcessingAudioUnitObserver() {}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Convenience class to abstract away the management of a Voice Processing
|
||||||
|
// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics
|
||||||
|
// as the Remote I/O unit (supports full duplex low-latency audio input and
|
||||||
|
// output) and adds AEC for for two-way duplex communication. It also adds AGC,
|
||||||
|
// adjustment of voice-processing quality, and muting. Hence, ideal for
|
||||||
|
// VoIP applications.
|
||||||
|
class VoiceProcessingAudioUnit {
|
||||||
|
public:
|
||||||
|
explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer);
|
||||||
|
~VoiceProcessingAudioUnit();
|
||||||
|
|
||||||
|
// TODO(tkchin): enum for state and state checking.
|
||||||
|
enum State : int32_t {
|
||||||
|
// Init() should be called.
|
||||||
|
kInitRequired,
|
||||||
|
// Audio unit created but not initialized.
|
||||||
|
kUninitialized,
|
||||||
|
// Initialized but not started. Equivalent to stopped.
|
||||||
|
kInitialized,
|
||||||
|
// Initialized and started.
|
||||||
|
kStarted,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Number of bytes per audio sample for 16-bit signed integer representation.
|
||||||
|
static const UInt32 kBytesPerSample;
|
||||||
|
|
||||||
|
// Initializes this class by creating the underlying audio unit instance.
|
||||||
|
// Creates a Voice-Processing I/O unit and configures it for full-duplex
|
||||||
|
// audio. The selected stream format is selected to avoid internal resampling
|
||||||
|
// and to match the 10ms callback rate for WebRTC as well as possible.
|
||||||
|
// Does not intialize the audio unit.
|
||||||
|
bool Init();
|
||||||
|
|
||||||
|
VoiceProcessingAudioUnit::State GetState() const;
|
||||||
|
|
||||||
|
// Initializes the underlying audio unit with the given sample rate.
|
||||||
|
bool Initialize(Float64 sample_rate);
|
||||||
|
|
||||||
|
// Starts the underlying audio unit.
|
||||||
|
bool Start();
|
||||||
|
|
||||||
|
// Stops the underlying audio unit.
|
||||||
|
bool Stop();
|
||||||
|
|
||||||
|
// Uninitializes the underlying audio unit.
|
||||||
|
bool Uninitialize();
|
||||||
|
|
||||||
|
// Calls render on the underlying audio unit.
|
||||||
|
OSStatus Render(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 output_bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// The C API used to set callbacks requires static functions. When these are
|
||||||
|
// called, they will invoke the relevant instance method by casting
|
||||||
|
// in_ref_con to VoiceProcessingAudioUnit*.
|
||||||
|
static OSStatus OnGetPlayoutData(void* in_ref_con,
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data);
|
||||||
|
static OSStatus OnDeliverRecordedData(void* in_ref_con,
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data);
|
||||||
|
|
||||||
|
// Notifies observer that samples are needed for playback.
|
||||||
|
OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data);
|
||||||
|
// Notifies observer that recorded samples are available for render.
|
||||||
|
OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data);
|
||||||
|
|
||||||
|
// Returns the predetermined format with a specific sample rate. See
|
||||||
|
// implementation file for details on format.
|
||||||
|
AudioStreamBasicDescription GetFormat(Float64 sample_rate) const;
|
||||||
|
|
||||||
|
// Deletes the underlying audio unit.
|
||||||
|
void DisposeAudioUnit();
|
||||||
|
|
||||||
|
VoiceProcessingAudioUnitObserver* observer_;
|
||||||
|
AudioUnit vpio_unit_;
|
||||||
|
VoiceProcessingAudioUnit::State state_;
|
||||||
|
};
|
||||||
|
} // namespace ios_adm
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
|
@ -0,0 +1,470 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#import "voice_processing_audio_unit.h"
|
||||||
|
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
#include "rtc_base/system/fallthrough.h"
|
||||||
|
#include "system_wrappers/include/metrics.h"
|
||||||
|
|
||||||
|
#import "WebRTC/RTCLogging.h"
|
||||||
|
#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
|
||||||
|
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
static void LogStreamDescription(AudioStreamBasicDescription description) {
|
||||||
|
char formatIdString[5];
|
||||||
|
UInt32 formatId = CFSwapInt32HostToBig(description.mFormatID);
|
||||||
|
bcopy(&formatId, formatIdString, 4);
|
||||||
|
formatIdString[4] = '\0';
|
||||||
|
RTCLog(@"AudioStreamBasicDescription: {\n"
|
||||||
|
" mSampleRate: %.2f\n"
|
||||||
|
" formatIDString: %s\n"
|
||||||
|
" mFormatFlags: 0x%X\n"
|
||||||
|
" mBytesPerPacket: %u\n"
|
||||||
|
" mFramesPerPacket: %u\n"
|
||||||
|
" mBytesPerFrame: %u\n"
|
||||||
|
" mChannelsPerFrame: %u\n"
|
||||||
|
" mBitsPerChannel: %u\n"
|
||||||
|
" mReserved: %u\n}",
|
||||||
|
description.mSampleRate, formatIdString,
|
||||||
|
static_cast<unsigned int>(description.mFormatFlags),
|
||||||
|
static_cast<unsigned int>(description.mBytesPerPacket),
|
||||||
|
static_cast<unsigned int>(description.mFramesPerPacket),
|
||||||
|
static_cast<unsigned int>(description.mBytesPerFrame),
|
||||||
|
static_cast<unsigned int>(description.mChannelsPerFrame),
|
||||||
|
static_cast<unsigned int>(description.mBitsPerChannel),
|
||||||
|
static_cast<unsigned int>(description.mReserved));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace ios_adm {
|
||||||
|
|
||||||
|
// Calls to AudioUnitInitialize() can fail if called back-to-back on different
|
||||||
|
// ADM instances. A fall-back solution is to allow multiple sequential calls
|
||||||
|
// with as small delay between each. This factor sets the max number of allowed
|
||||||
|
// initialization attempts.
|
||||||
|
static const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
|
||||||
|
// A VP I/O unit's bus 1 connects to input hardware (microphone).
|
||||||
|
static const AudioUnitElement kInputBus = 1;
|
||||||
|
// A VP I/O unit's bus 0 connects to output hardware (speaker).
|
||||||
|
static const AudioUnitElement kOutputBus = 0;
|
||||||
|
|
||||||
|
// Returns the automatic gain control (AGC) state on the processed microphone
|
||||||
|
// signal. Should be on by default for Voice Processing audio units.
|
||||||
|
static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) {
|
||||||
|
RTC_DCHECK(audio_unit);
|
||||||
|
UInt32 size = sizeof(*enabled);
|
||||||
|
OSStatus result = AudioUnitGetProperty(audio_unit,
|
||||||
|
kAUVoiceIOProperty_VoiceProcessingEnableAGC,
|
||||||
|
kAudioUnitScope_Global,
|
||||||
|
kInputBus,
|
||||||
|
enabled,
|
||||||
|
&size);
|
||||||
|
RTCLog(@"VPIO unit AGC: %u", static_cast<unsigned int>(*enabled));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(
|
||||||
|
VoiceProcessingAudioUnitObserver* observer)
|
||||||
|
: observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) {
|
||||||
|
RTC_DCHECK(observer);
|
||||||
|
}
|
||||||
|
|
||||||
|
VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
}
|
||||||
|
|
||||||
|
const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2;
|
||||||
|
|
||||||
|
bool VoiceProcessingAudioUnit::Init() {
|
||||||
|
RTC_DCHECK_EQ(state_, kInitRequired);
|
||||||
|
|
||||||
|
// Create an audio component description to identify the Voice Processing
|
||||||
|
// I/O audio unit.
|
||||||
|
AudioComponentDescription vpio_unit_description;
|
||||||
|
vpio_unit_description.componentType = kAudioUnitType_Output;
|
||||||
|
vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
|
||||||
|
vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
|
||||||
|
vpio_unit_description.componentFlags = 0;
|
||||||
|
vpio_unit_description.componentFlagsMask = 0;
|
||||||
|
|
||||||
|
// Obtain an audio unit instance given the description.
|
||||||
|
AudioComponent found_vpio_unit_ref =
|
||||||
|
AudioComponentFindNext(nullptr, &vpio_unit_description);
|
||||||
|
|
||||||
|
// Create a Voice Processing IO audio unit.
|
||||||
|
OSStatus result = noErr;
|
||||||
|
result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
|
||||||
|
if (result != noErr) {
|
||||||
|
vpio_unit_ = nullptr;
|
||||||
|
RTCLogError(@"AudioComponentInstanceNew failed. Error=%ld.", (long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable input on the input scope of the input element.
|
||||||
|
UInt32 enable_input = 1;
|
||||||
|
result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
|
||||||
|
kAudioUnitScope_Input, kInputBus, &enable_input,
|
||||||
|
sizeof(enable_input));
|
||||||
|
if (result != noErr) {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
RTCLogError(@"Failed to enable input on input scope of input element. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable output on the output scope of the output element.
|
||||||
|
UInt32 enable_output = 1;
|
||||||
|
result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
|
||||||
|
kAudioUnitScope_Output, kOutputBus,
|
||||||
|
&enable_output, sizeof(enable_output));
|
||||||
|
if (result != noErr) {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
RTCLogError(@"Failed to enable output on output scope of output element. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specify the callback function that provides audio samples to the audio
|
||||||
|
// unit.
|
||||||
|
AURenderCallbackStruct render_callback;
|
||||||
|
render_callback.inputProc = OnGetPlayoutData;
|
||||||
|
render_callback.inputProcRefCon = this;
|
||||||
|
result = AudioUnitSetProperty(
|
||||||
|
vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
|
||||||
|
kOutputBus, &render_callback, sizeof(render_callback));
|
||||||
|
if (result != noErr) {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
RTCLogError(@"Failed to specify the render callback on the output bus. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable AU buffer allocation for the recorder, we allocate our own.
|
||||||
|
// TODO(henrika): not sure that it actually saves resource to make this call.
|
||||||
|
UInt32 flag = 0;
|
||||||
|
result = AudioUnitSetProperty(
|
||||||
|
vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
|
||||||
|
kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag));
|
||||||
|
if (result != noErr) {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
RTCLogError(@"Failed to disable buffer allocation on the input bus. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specify the callback to be called by the I/O thread to us when input audio
|
||||||
|
// is available. The recorded samples can then be obtained by calling the
|
||||||
|
// AudioUnitRender() method.
|
||||||
|
AURenderCallbackStruct input_callback;
|
||||||
|
input_callback.inputProc = OnDeliverRecordedData;
|
||||||
|
input_callback.inputProcRefCon = this;
|
||||||
|
result = AudioUnitSetProperty(vpio_unit_,
|
||||||
|
kAudioOutputUnitProperty_SetInputCallback,
|
||||||
|
kAudioUnitScope_Global, kInputBus,
|
||||||
|
&input_callback, sizeof(input_callback));
|
||||||
|
if (result != noErr) {
|
||||||
|
DisposeAudioUnit();
|
||||||
|
RTCLogError(@"Failed to specify the input callback on the input bus. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
state_ = kUninitialized;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
|
||||||
|
return state_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
|
||||||
|
RTC_DCHECK_GE(state_, kUninitialized);
|
||||||
|
RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate);
|
||||||
|
|
||||||
|
OSStatus result = noErr;
|
||||||
|
AudioStreamBasicDescription format = GetFormat(sample_rate);
|
||||||
|
UInt32 size = sizeof(format);
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
LogStreamDescription(format);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Set the format on the output scope of the input element/bus.
|
||||||
|
result =
|
||||||
|
AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
|
||||||
|
kAudioUnitScope_Output, kInputBus, &format, size);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to set format on output scope of input bus. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the format on the input scope of the output element/bus.
|
||||||
|
result =
|
||||||
|
AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
|
||||||
|
kAudioUnitScope_Input, kOutputBus, &format, size);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to set format on input scope of output bus. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the Voice Processing I/O unit instance.
|
||||||
|
// Calls to AudioUnitInitialize() can fail if called back-to-back on
|
||||||
|
// different ADM instances. The error message in this case is -66635 which is
|
||||||
|
// undocumented. Tests have shown that calling AudioUnitInitialize a second
|
||||||
|
// time, after a short sleep, avoids this issue.
|
||||||
|
// See webrtc:5166 for details.
|
||||||
|
int failed_initalize_attempts = 0;
|
||||||
|
result = AudioUnitInitialize(vpio_unit_);
|
||||||
|
while (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to initialize the Voice Processing I/O unit. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
++failed_initalize_attempts;
|
||||||
|
if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
|
||||||
|
// Max number of initialization attempts exceeded, hence abort.
|
||||||
|
RTCLogError(@"Too many initialization attempts.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
RTCLog(@"Pause 100ms and try audio unit initialization again...");
|
||||||
|
[NSThread sleepForTimeInterval:0.1f];
|
||||||
|
result = AudioUnitInitialize(vpio_unit_);
|
||||||
|
}
|
||||||
|
if (result == noErr) {
|
||||||
|
RTCLog(@"Voice Processing I/O unit is now initialized.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// AGC should be enabled by default for Voice Processing I/O units but it is
|
||||||
|
// checked below and enabled explicitly if needed. This scheme is used
|
||||||
|
// to be absolutely sure that the AGC is enabled since we have seen cases
|
||||||
|
// where only zeros are recorded and a disabled AGC could be one of the
|
||||||
|
// reasons why it happens.
|
||||||
|
int agc_was_enabled_by_default = 0;
|
||||||
|
UInt32 agc_is_enabled = 0;
|
||||||
|
result = GetAGCState(vpio_unit_, &agc_is_enabled);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to get AGC state (1st attempt). "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
// Example of error code: kAudioUnitErr_NoConnection (-10876).
|
||||||
|
// All error codes related to audio units are negative and are therefore
|
||||||
|
// converted into a postive value to match the UMA APIs.
|
||||||
|
RTC_HISTOGRAM_COUNTS_SPARSE_100000(
|
||||||
|
"WebRTC.Audio.GetAGCStateErrorCode1", (-1) * result);
|
||||||
|
} else if (agc_is_enabled) {
|
||||||
|
// Remember that the AGC was enabled by default. Will be used in UMA.
|
||||||
|
agc_was_enabled_by_default = 1;
|
||||||
|
} else {
|
||||||
|
// AGC was initially disabled => try to enable it explicitly.
|
||||||
|
UInt32 enable_agc = 1;
|
||||||
|
result =
|
||||||
|
AudioUnitSetProperty(vpio_unit_,
|
||||||
|
kAUVoiceIOProperty_VoiceProcessingEnableAGC,
|
||||||
|
kAudioUnitScope_Global, kInputBus, &enable_agc,
|
||||||
|
sizeof(enable_agc));
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to enable the built-in AGC. "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
RTC_HISTOGRAM_COUNTS_SPARSE_100000(
|
||||||
|
"WebRTC.Audio.SetAGCStateErrorCode", (-1) * result);
|
||||||
|
}
|
||||||
|
result = GetAGCState(vpio_unit_, &agc_is_enabled);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to get AGC state (2nd attempt). "
|
||||||
|
"Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
RTC_HISTOGRAM_COUNTS_SPARSE_100000(
|
||||||
|
"WebRTC.Audio.GetAGCStateErrorCode2", (-1) * result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track if the built-in AGC was enabled by default (as it should) or not.
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCWasEnabledByDefault",
|
||||||
|
agc_was_enabled_by_default);
|
||||||
|
RTCLog(@"WebRTC.Audio.BuiltInAGCWasEnabledByDefault: %d",
|
||||||
|
agc_was_enabled_by_default);
|
||||||
|
// As a final step, add an UMA histogram for tracking the AGC state.
|
||||||
|
// At this stage, the AGC should be enabled, and if it is not, more work is
|
||||||
|
// needed to find out the root cause.
|
||||||
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCIsEnabled", agc_is_enabled);
|
||||||
|
RTCLog(@"WebRTC.Audio.BuiltInAGCIsEnabled: %u",
|
||||||
|
static_cast<unsigned int>(agc_is_enabled));
|
||||||
|
|
||||||
|
state_ = kInitialized;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VoiceProcessingAudioUnit::Start() {
|
||||||
|
RTC_DCHECK_GE(state_, kUninitialized);
|
||||||
|
RTCLog(@"Starting audio unit.");
|
||||||
|
|
||||||
|
OSStatus result = AudioOutputUnitStart(vpio_unit_);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
RTCLog(@"Started audio unit");
|
||||||
|
}
|
||||||
|
state_ = kStarted;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VoiceProcessingAudioUnit::Stop() {
|
||||||
|
RTC_DCHECK_GE(state_, kUninitialized);
|
||||||
|
RTCLog(@"Stopping audio unit.");
|
||||||
|
|
||||||
|
OSStatus result = AudioOutputUnitStop(vpio_unit_);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
RTCLog(@"Stopped audio unit");
|
||||||
|
}
|
||||||
|
|
||||||
|
state_ = kInitialized;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VoiceProcessingAudioUnit::Uninitialize() {
|
||||||
|
RTC_DCHECK_GE(state_, kUninitialized);
|
||||||
|
RTCLog(@"Unintializing audio unit.");
|
||||||
|
|
||||||
|
OSStatus result = AudioUnitUninitialize(vpio_unit_);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
RTCLog(@"Uninitialized audio unit.");
|
||||||
|
}
|
||||||
|
|
||||||
|
state_ = kUninitialized;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 output_bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) {
|
||||||
|
RTC_DCHECK(vpio_unit_) << "Init() not called.";
|
||||||
|
|
||||||
|
OSStatus result = AudioUnitRender(vpio_unit_, flags, time_stamp,
|
||||||
|
output_bus_number, num_frames, io_data);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"Failed to render audio unit. Error=%ld", (long)result);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus VoiceProcessingAudioUnit::OnGetPlayoutData(
|
||||||
|
void* in_ref_con,
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) {
|
||||||
|
VoiceProcessingAudioUnit* audio_unit =
|
||||||
|
static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
|
||||||
|
return audio_unit->NotifyGetPlayoutData(flags, time_stamp, bus_number,
|
||||||
|
num_frames, io_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus VoiceProcessingAudioUnit::OnDeliverRecordedData(
|
||||||
|
void* in_ref_con,
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) {
|
||||||
|
VoiceProcessingAudioUnit* audio_unit =
|
||||||
|
static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
|
||||||
|
return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number,
|
||||||
|
num_frames, io_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData(
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) {
|
||||||
|
return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames,
|
||||||
|
io_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData(
|
||||||
|
AudioUnitRenderActionFlags* flags,
|
||||||
|
const AudioTimeStamp* time_stamp,
|
||||||
|
UInt32 bus_number,
|
||||||
|
UInt32 num_frames,
|
||||||
|
AudioBufferList* io_data) {
|
||||||
|
return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number,
|
||||||
|
num_frames, io_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat(
|
||||||
|
Float64 sample_rate) const {
|
||||||
|
// Set the application formats for input and output:
|
||||||
|
// - use same format in both directions
|
||||||
|
// - avoid resampling in the I/O unit by using the hardware sample rate
|
||||||
|
// - linear PCM => noncompressed audio data format with one frame per packet
|
||||||
|
// - no need to specify interleaving since only mono is supported
|
||||||
|
AudioStreamBasicDescription format;
|
||||||
|
RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels);
|
||||||
|
format.mSampleRate = sample_rate;
|
||||||
|
format.mFormatID = kAudioFormatLinearPCM;
|
||||||
|
format.mFormatFlags =
|
||||||
|
kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
|
||||||
|
format.mBytesPerPacket = kBytesPerSample;
|
||||||
|
format.mFramesPerPacket = 1; // uncompressed.
|
||||||
|
format.mBytesPerFrame = kBytesPerSample;
|
||||||
|
format.mChannelsPerFrame = kRTCAudioSessionPreferredNumberOfChannels;
|
||||||
|
format.mBitsPerChannel = 8 * kBytesPerSample;
|
||||||
|
return format;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VoiceProcessingAudioUnit::DisposeAudioUnit() {
|
||||||
|
if (vpio_unit_) {
|
||||||
|
switch (state_) {
|
||||||
|
case kStarted:
|
||||||
|
Stop();
|
||||||
|
// Fall through.
|
||||||
|
RTC_FALLTHROUGH();
|
||||||
|
case kInitialized:
|
||||||
|
Uninitialize();
|
||||||
|
break;
|
||||||
|
case kUninitialized:
|
||||||
|
RTC_FALLTHROUGH();
|
||||||
|
case kInitRequired:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
RTCLog(@"Disposing audio unit.");
|
||||||
|
OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
|
||||||
|
if (result != noErr) {
|
||||||
|
RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.",
|
||||||
|
(long)result);
|
||||||
|
}
|
||||||
|
vpio_unit_ = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ios_adm
|
||||||
|
} // namespace webrtc
|
592
sdk/objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm
Normal file
592
sdk/objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm
Normal file
@ -0,0 +1,592 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#import <XCTest/XCTest.h>
|
||||||
|
|
||||||
|
#if defined(WEBRTC_IOS)
|
||||||
|
#import "sdk/objc/Framework/Native/api/audio_device_module.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "system_wrappers/include/event_wrapper.h"
|
||||||
|
|
||||||
|
#include "rtc_base/scoped_ref_ptr.h"
|
||||||
|
|
||||||
|
typedef int32_t(^NeedMorePlayDataBlock)(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void* audioSamples,
|
||||||
|
size_t& nSamplesOut,
|
||||||
|
int64_t* elapsed_time_ms,
|
||||||
|
int64_t* ntp_time_ms);
|
||||||
|
|
||||||
|
typedef int32_t(^RecordedDataIsAvailableBlock)(const void* audioSamples,
|
||||||
|
const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
const uint32_t totalDelayMS,
|
||||||
|
const int32_t clockDrift,
|
||||||
|
const uint32_t currentMicLevel,
|
||||||
|
const bool keyPressed,
|
||||||
|
uint32_t& newMicLevel);
|
||||||
|
|
||||||
|
|
||||||
|
// This class implements the AudioTransport API and forwards all methods to the appropriate blocks.
|
||||||
|
class MockAudioTransport : public webrtc::AudioTransport {
|
||||||
|
public:
|
||||||
|
MockAudioTransport() {}
|
||||||
|
~MockAudioTransport() {}
|
||||||
|
|
||||||
|
void expectNeedMorePlayData(NeedMorePlayDataBlock block) {
|
||||||
|
needMorePlayDataBlock = block;
|
||||||
|
}
|
||||||
|
|
||||||
|
void expectRecordedDataIsAvailable(RecordedDataIsAvailableBlock block) {
|
||||||
|
recordedDataIsAvailableBlock = block;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t NeedMorePlayData(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void* audioSamples,
|
||||||
|
size_t& nSamplesOut,
|
||||||
|
int64_t* elapsed_time_ms,
|
||||||
|
int64_t* ntp_time_ms) {
|
||||||
|
return needMorePlayDataBlock(nSamples,
|
||||||
|
nBytesPerSample,
|
||||||
|
nChannels,
|
||||||
|
samplesPerSec,
|
||||||
|
audioSamples,
|
||||||
|
nSamplesOut,
|
||||||
|
elapsed_time_ms,
|
||||||
|
ntp_time_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t RecordedDataIsAvailable(const void* audioSamples,
|
||||||
|
const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
const uint32_t totalDelayMS,
|
||||||
|
const int32_t clockDrift,
|
||||||
|
const uint32_t currentMicLevel,
|
||||||
|
const bool keyPressed,
|
||||||
|
uint32_t& newMicLevel) {
|
||||||
|
return recordedDataIsAvailableBlock(audioSamples,
|
||||||
|
nSamples,
|
||||||
|
nBytesPerSample,
|
||||||
|
nChannels,
|
||||||
|
samplesPerSec,
|
||||||
|
totalDelayMS,
|
||||||
|
clockDrift,
|
||||||
|
currentMicLevel,
|
||||||
|
keyPressed,
|
||||||
|
newMicLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PullRenderData(int bits_per_sample,
|
||||||
|
int sample_rate,
|
||||||
|
size_t number_of_channels,
|
||||||
|
size_t number_of_frames,
|
||||||
|
void* audio_data,
|
||||||
|
int64_t* elapsed_time_ms,
|
||||||
|
int64_t* ntp_time_ms) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
NeedMorePlayDataBlock needMorePlayDataBlock;
|
||||||
|
RecordedDataIsAvailableBlock recordedDataIsAvailableBlock;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Number of callbacks (input or output) the tests waits for before we set
|
||||||
|
// an event indicating that the test was OK.
|
||||||
|
static const NSUInteger kNumCallbacks = 10;
|
||||||
|
// Max amount of time we wait for an event to be set while counting callbacks.
|
||||||
|
static const NSTimeInterval kTestTimeOutInSec = 20.0;
|
||||||
|
// Number of bits per PCM audio sample.
|
||||||
|
static const NSUInteger kBitsPerSample = 16;
|
||||||
|
// Number of bytes per PCM audio sample.
|
||||||
|
static const NSUInteger kBytesPerSample = kBitsPerSample / 8;
|
||||||
|
// Average number of audio callbacks per second assuming 10ms packet size.
|
||||||
|
static const NSUInteger kNumCallbacksPerSecond = 100;
|
||||||
|
// Play out a test file during this time (unit is in seconds).
|
||||||
|
static const NSUInteger kFilePlayTimeInSec = 15;
|
||||||
|
// Run the full-duplex test during this time (unit is in seconds).
|
||||||
|
// Note that first |kNumIgnoreFirstCallbacks| are ignored.
|
||||||
|
static const NSUInteger kFullDuplexTimeInSec = 10;
|
||||||
|
// Wait for the callback sequence to stabilize by ignoring this amount of the
|
||||||
|
// initial callbacks (avoids initial FIFO access).
|
||||||
|
// Only used in the RunPlayoutAndRecordingInFullDuplex test.
|
||||||
|
static const NSUInteger kNumIgnoreFirstCallbacks = 50;
|
||||||
|
|
||||||
|
@interface RTCAudioDeviceModuleTests : XCTestCase {
|
||||||
|
|
||||||
|
rtc::scoped_refptr<webrtc::AudioDeviceModule> audioDeviceModule;
|
||||||
|
webrtc::AudioParameters playoutParameters;
|
||||||
|
webrtc::AudioParameters recordParameters;
|
||||||
|
MockAudioTransport mock;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation RTCAudioDeviceModuleTests
|
||||||
|
|
||||||
|
- (void)setUp {
|
||||||
|
[super setUp];
|
||||||
|
audioDeviceModule = webrtc::CreateAudioDeviceModule();
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->Init());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->GetPlayoutAudioParameters(&playoutParameters));
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->GetRecordAudioParameters(&recordParameters));
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)tearDown {
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->Terminate());
|
||||||
|
audioDeviceModule = nullptr;
|
||||||
|
[super tearDown];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)startPlayout {
|
||||||
|
XCTAssertFalse(audioDeviceModule->Playing());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->InitPlayout());
|
||||||
|
XCTAssertTrue(audioDeviceModule->PlayoutIsInitialized());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StartPlayout());
|
||||||
|
XCTAssertTrue(audioDeviceModule->Playing());
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)stopPlayout {
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StopPlayout());
|
||||||
|
XCTAssertFalse(audioDeviceModule->Playing());
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)startRecording{
|
||||||
|
XCTAssertFalse(audioDeviceModule->Recording());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->InitRecording());
|
||||||
|
XCTAssertTrue(audioDeviceModule->RecordingIsInitialized());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StartRecording());
|
||||||
|
XCTAssertTrue(audioDeviceModule->Recording());
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)stopRecording{
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StopRecording());
|
||||||
|
XCTAssertFalse(audioDeviceModule->Recording());
|
||||||
|
}
|
||||||
|
|
||||||
|
- (NSURL*)fileURLForSampleRate:(int)sampleRate {
|
||||||
|
XCTAssertTrue(sampleRate == 48000 || sampleRate == 44100 || sampleRate == 16000);
|
||||||
|
NSString *filename = [NSString stringWithFormat:@"audio_short%d", sampleRate / 1000];
|
||||||
|
NSURL *url = [[NSBundle mainBundle] URLForResource:filename withExtension:@"pcm"];
|
||||||
|
XCTAssertNotNil(url);
|
||||||
|
|
||||||
|
return url;
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark - Tests
|
||||||
|
|
||||||
|
- (void)testConstructDestruct {
|
||||||
|
// Using the test fixture to create and destruct the audio device module.
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testInitTerminate {
|
||||||
|
// Initialization is part of the test fixture.
|
||||||
|
XCTAssertTrue(audioDeviceModule->Initialized());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->Terminate());
|
||||||
|
XCTAssertFalse(audioDeviceModule->Initialized());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that playout can be initiated, started and stopped. No audio callback
|
||||||
|
// is registered in this test.
|
||||||
|
// Failing when running on real iOS devices: bugs.webrtc.org/6889.
|
||||||
|
- (void)DISABLED_testStartStopPlayout {
|
||||||
|
[self startPlayout];
|
||||||
|
[self stopPlayout];
|
||||||
|
[self startPlayout];
|
||||||
|
[self stopPlayout];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that recording can be initiated, started and stopped. No audio callback
|
||||||
|
// is registered in this test.
|
||||||
|
// Can sometimes fail when running on real devices: bugs.webrtc.org/7888.
|
||||||
|
- (void)DISABLED_testStartStopRecording {
|
||||||
|
[self startRecording];
|
||||||
|
[self stopRecording];
|
||||||
|
[self startRecording];
|
||||||
|
[self stopRecording];
|
||||||
|
}
|
||||||
|
// Verify that calling StopPlayout() will leave us in an uninitialized state
|
||||||
|
// which will require a new call to InitPlayout(). This test does not call
|
||||||
|
// StartPlayout() while being uninitialized since doing so will hit a
|
||||||
|
// RTC_DCHECK.
|
||||||
|
- (void)testStopPlayoutRequiresInitToRestart {
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->InitPlayout());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StartPlayout());
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->StopPlayout());
|
||||||
|
XCTAssertFalse(audioDeviceModule->PlayoutIsInitialized());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that we can create two ADMs and start playing on the second ADM.
|
||||||
|
// Only the first active instance shall activate an audio session and the
|
||||||
|
// last active instance shall deactivate the audio session. The test does not
|
||||||
|
// explicitly verify correct audio session calls but instead focuses on
|
||||||
|
// ensuring that audio starts for both ADMs.
|
||||||
|
// Failing when running on real iOS devices: bugs.webrtc.org/6889.
|
||||||
|
- (void)DISABLED_testStartPlayoutOnTwoInstances {
|
||||||
|
// Create and initialize a second/extra ADM instance. The default ADM is
|
||||||
|
// created by the test harness.
|
||||||
|
rtc::scoped_refptr<webrtc::AudioDeviceModule> secondAudioDeviceModule =
|
||||||
|
webrtc::CreateAudioDeviceModule();
|
||||||
|
XCTAssertNotEqual(secondAudioDeviceModule.get(), nullptr);
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->Init());
|
||||||
|
|
||||||
|
// Start playout for the default ADM but don't wait here. Instead use the
|
||||||
|
// upcoming second stream for that. We set the same expectation on number
|
||||||
|
// of callbacks as for the second stream.
|
||||||
|
mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, playoutParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
[self startPlayout];
|
||||||
|
|
||||||
|
// Initialize playout for the second ADM. If all is OK, the second ADM shall
|
||||||
|
// reuse the audio session activated when the first ADM started playing.
|
||||||
|
// This call will also ensure that we avoid a problem related to initializing
|
||||||
|
// two different audio unit instances back to back (see webrtc:5166 for
|
||||||
|
// details).
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->InitPlayout());
|
||||||
|
XCTAssertTrue(secondAudioDeviceModule->PlayoutIsInitialized());
|
||||||
|
|
||||||
|
// Start playout for the second ADM and verify that it starts as intended.
|
||||||
|
// Passing this test ensures that initialization of the second audio unit
|
||||||
|
// has been done successfully and that there is no conflict with the already
|
||||||
|
// playing first ADM.
|
||||||
|
XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
|
||||||
|
playoutExpectation.expectedFulfillmentCount = kNumCallbacks;
|
||||||
|
|
||||||
|
MockAudioTransport mock2;
|
||||||
|
mock2.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, playoutParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
[playoutExpectation fulfill];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->RegisterAudioCallback(&mock2));
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->StartPlayout());
|
||||||
|
XCTAssertTrue(secondAudioDeviceModule->Playing());
|
||||||
|
[self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->StopPlayout());
|
||||||
|
XCTAssertFalse(secondAudioDeviceModule->Playing());
|
||||||
|
XCTAssertFalse(secondAudioDeviceModule->PlayoutIsInitialized());
|
||||||
|
|
||||||
|
XCTAssertEqual(0, secondAudioDeviceModule->Terminate());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start playout and verify that the native audio layer starts asking for real
|
||||||
|
// audio samples to play out using the NeedMorePlayData callback.
|
||||||
|
- (void)testStartPlayoutVerifyCallbacks {
|
||||||
|
|
||||||
|
XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
|
||||||
|
playoutExpectation.expectedFulfillmentCount = kNumCallbacks;
|
||||||
|
|
||||||
|
mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, playoutParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
[playoutExpectation fulfill];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
|
||||||
|
[self startPlayout];
|
||||||
|
[self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
|
||||||
|
[self stopPlayout];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start recording and verify that the native audio layer starts feeding real
|
||||||
|
// audio samples via the RecordedDataIsAvailable callback.
|
||||||
|
- (void)testStartRecordingVerifyCallbacks {
|
||||||
|
XCTestExpectation *recordExpectation =
|
||||||
|
[self expectationWithDescription:@"RecordedDataIsAvailable"];
|
||||||
|
recordExpectation.expectedFulfillmentCount = kNumCallbacks;
|
||||||
|
|
||||||
|
mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
|
||||||
|
const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
const uint32_t totalDelayMS,
|
||||||
|
const int32_t clockDrift,
|
||||||
|
const uint32_t currentMicLevel,
|
||||||
|
const bool keyPressed,
|
||||||
|
uint32_t& newMicLevel) {
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
XCTAssertEqual(nSamples, recordParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, recordParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, recordParameters.sample_rate());
|
||||||
|
XCTAssertEqual(0, clockDrift);
|
||||||
|
XCTAssertEqual(0u, currentMicLevel);
|
||||||
|
XCTAssertFalse(keyPressed);
|
||||||
|
[recordExpectation fulfill];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
[self startRecording];
|
||||||
|
[self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
|
||||||
|
[self stopRecording];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start playout and recording (full-duplex audio) and verify that audio is
|
||||||
|
// active in both directions.
|
||||||
|
- (void)testStartPlayoutAndRecordingVerifyCallbacks {
|
||||||
|
XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
|
||||||
|
__block NSUInteger callbackCount = 0;
|
||||||
|
|
||||||
|
XCTestExpectation *recordExpectation =
|
||||||
|
[self expectationWithDescription:@"RecordedDataIsAvailable"];
|
||||||
|
recordExpectation.expectedFulfillmentCount = kNumCallbacks;
|
||||||
|
|
||||||
|
mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, playoutParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
if (callbackCount++ >= kNumCallbacks) {
|
||||||
|
[playoutExpectation fulfill];
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
|
||||||
|
const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
const uint32_t totalDelayMS,
|
||||||
|
const int32_t clockDrift,
|
||||||
|
const uint32_t currentMicLevel,
|
||||||
|
const bool keyPressed,
|
||||||
|
uint32_t& newMicLevel) {
|
||||||
|
XCTAssertNotEqual((void*)NULL, audioSamples);
|
||||||
|
XCTAssertEqual(nSamples, recordParameters.frames_per_10ms_buffer());
|
||||||
|
XCTAssertEqual(nBytesPerSample, kBytesPerSample);
|
||||||
|
XCTAssertEqual(nChannels, recordParameters.channels());
|
||||||
|
XCTAssertEqual((int) samplesPerSec, recordParameters.sample_rate());
|
||||||
|
XCTAssertEqual(0, clockDrift);
|
||||||
|
XCTAssertEqual(0u, currentMicLevel);
|
||||||
|
XCTAssertFalse(keyPressed);
|
||||||
|
[recordExpectation fulfill];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
[self startPlayout];
|
||||||
|
[self startRecording];
|
||||||
|
[self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
|
||||||
|
[self stopRecording];
|
||||||
|
[self stopPlayout];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start playout and read audio from an external PCM file when the audio layer
|
||||||
|
// asks for data to play out. Real audio is played out in this test but it does
|
||||||
|
// not contain any explicit verification that the audio quality is perfect.
|
||||||
|
- (void)testRunPlayoutWithFileAsSource {
|
||||||
|
XCTAssertEqual(1u, playoutParameters.channels());
|
||||||
|
|
||||||
|
// Using XCTestExpectation to count callbacks is very slow.
|
||||||
|
XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
|
||||||
|
const int expectedCallbackCount = kFilePlayTimeInSec * kNumCallbacksPerSecond;
|
||||||
|
__block int callbackCount = 0;
|
||||||
|
|
||||||
|
NSURL *fileURL = [self fileURLForSampleRate:playoutParameters.sample_rate()];
|
||||||
|
NSInputStream *inputStream = [[NSInputStream alloc] initWithURL:fileURL];
|
||||||
|
|
||||||
|
mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
[inputStream read:(uint8_t *)audioSamples maxLength:nSamples*nBytesPerSample*nChannels];
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
if (callbackCount++ == expectedCallbackCount) {
|
||||||
|
[playoutExpectation fulfill];
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
[self startPlayout];
|
||||||
|
NSTimeInterval waitTimeout = kFilePlayTimeInSec * 2.0;
|
||||||
|
[self waitForExpectationsWithTimeout:waitTimeout handler:nil];
|
||||||
|
[self stopPlayout];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDevices {
|
||||||
|
// Device enumeration is not supported. Verify fixed values only.
|
||||||
|
XCTAssertEqual(1, audioDeviceModule->PlayoutDevices());
|
||||||
|
XCTAssertEqual(1, audioDeviceModule->RecordingDevices());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start playout and recording and store recorded data in an intermediate FIFO
|
||||||
|
// buffer from which the playout side then reads its samples in the same order
|
||||||
|
// as they were stored. Under ideal circumstances, a callback sequence would
|
||||||
|
// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
|
||||||
|
// means 'packet played'. Under such conditions, the FIFO would only contain
|
||||||
|
// one packet on average. However, under more realistic conditions, the size
|
||||||
|
// of the FIFO will vary more due to an unbalance between the two sides.
|
||||||
|
// This test tries to verify that the device maintains a balanced callback-
|
||||||
|
// sequence by running in loopback for ten seconds while measuring the size
|
||||||
|
// (max and average) of the FIFO. The size of the FIFO is increased by the
|
||||||
|
// recording side and decreased by the playout side.
|
||||||
|
// TODO(henrika): tune the final test parameters after running tests on several
|
||||||
|
// different devices.
|
||||||
|
- (void)testRunPlayoutAndRecordingInFullDuplex {
|
||||||
|
XCTAssertEqual(recordParameters.channels(), playoutParameters.channels());
|
||||||
|
XCTAssertEqual(recordParameters.sample_rate(), playoutParameters.sample_rate());
|
||||||
|
|
||||||
|
XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
|
||||||
|
__block NSUInteger playoutCallbacks = 0;
|
||||||
|
NSUInteger expectedPlayoutCallbacks = kFullDuplexTimeInSec * kNumCallbacksPerSecond;
|
||||||
|
|
||||||
|
// FIFO queue and measurements
|
||||||
|
NSMutableArray *fifoBuffer = [NSMutableArray arrayWithCapacity:20];
|
||||||
|
__block NSUInteger fifoMaxSize = 0;
|
||||||
|
__block NSUInteger fifoTotalWrittenElements = 0;
|
||||||
|
__block NSUInteger fifoWriteCount = 0;
|
||||||
|
|
||||||
|
mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
|
||||||
|
const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
const uint32_t totalDelayMS,
|
||||||
|
const int32_t clockDrift,
|
||||||
|
const uint32_t currentMicLevel,
|
||||||
|
const bool keyPressed,
|
||||||
|
uint32_t& newMicLevel) {
|
||||||
|
if (fifoWriteCount++ < kNumIgnoreFirstCallbacks) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
NSData *data = [NSData dataWithBytes:audioSamples length:nSamples*nBytesPerSample*nChannels];
|
||||||
|
@synchronized(fifoBuffer) {
|
||||||
|
[fifoBuffer addObject:data];
|
||||||
|
fifoMaxSize = MAX(fifoMaxSize, fifoBuffer.count);
|
||||||
|
fifoTotalWrittenElements += fifoBuffer.count;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
|
||||||
|
const size_t nBytesPerSample,
|
||||||
|
const size_t nChannels,
|
||||||
|
const uint32_t samplesPerSec,
|
||||||
|
void *audioSamples,
|
||||||
|
size_t &nSamplesOut,
|
||||||
|
int64_t *elapsed_time_ms,
|
||||||
|
int64_t *ntp_time_ms) {
|
||||||
|
nSamplesOut = nSamples;
|
||||||
|
NSData *data;
|
||||||
|
@synchronized(fifoBuffer) {
|
||||||
|
data = fifoBuffer.firstObject;
|
||||||
|
if (data) {
|
||||||
|
[fifoBuffer removeObjectAtIndex:0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data) {
|
||||||
|
memcpy(audioSamples, (char*) data.bytes, data.length);
|
||||||
|
} else {
|
||||||
|
memset(audioSamples, 0, nSamples*nBytesPerSample*nChannels);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (playoutCallbacks++ == expectedPlayoutCallbacks) {
|
||||||
|
[playoutExpectation fulfill];
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
|
||||||
|
[self startRecording];
|
||||||
|
[self startPlayout];
|
||||||
|
NSTimeInterval waitTimeout = kFullDuplexTimeInSec * 2.0;
|
||||||
|
[self waitForExpectationsWithTimeout:waitTimeout handler:nil];
|
||||||
|
|
||||||
|
size_t fifoAverageSize =
|
||||||
|
(fifoTotalWrittenElements == 0)
|
||||||
|
? 0.0
|
||||||
|
: 0.5 + (double)fifoTotalWrittenElements / (fifoWriteCount - kNumIgnoreFirstCallbacks);
|
||||||
|
|
||||||
|
[self stopPlayout];
|
||||||
|
[self stopRecording];
|
||||||
|
XCTAssertLessThan(fifoAverageSize, 10u);
|
||||||
|
XCTAssertLessThan(fifoMaxSize, 20u);
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
112
sdk/objc/Framework/UnitTests/RTCAudioDevice_xctest.mm
Normal file
112
sdk/objc/Framework/UnitTests/RTCAudioDevice_xctest.mm
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#import <XCTest/XCTest.h>
|
||||||
|
#import "sdk/objc/Framework/Native/src/audio/audio_device_ios.h"
|
||||||
|
#import "sdk/objc/Framework/Native/api/audio_device_module.h"
|
||||||
|
#import "sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h"
|
||||||
|
|
||||||
|
@interface RTCAudioDeviceTests: XCTestCase {
|
||||||
|
rtc::scoped_refptr<webrtc::AudioDeviceModule> _audioDeviceModule;
|
||||||
|
std::unique_ptr<webrtc::ios_adm::AudioDeviceIOS> _audio_device;
|
||||||
|
}
|
||||||
|
|
||||||
|
@property(nonatomic) RTCAudioSession *audioSession;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation RTCAudioDeviceTests
|
||||||
|
|
||||||
|
@synthesize audioSession = _audioSession;
|
||||||
|
|
||||||
|
- (void)setUp {
|
||||||
|
[super setUp];
|
||||||
|
|
||||||
|
_audioDeviceModule = webrtc::CreateAudioDeviceModule();
|
||||||
|
_audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS());
|
||||||
|
self.audioSession = [RTCAudioSession sharedInstance];
|
||||||
|
|
||||||
|
NSError *error = nil;
|
||||||
|
[self.audioSession lockForConfiguration];
|
||||||
|
[self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord
|
||||||
|
withOptions:0
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
|
||||||
|
[self.audioSession setMode:AVAudioSessionModeVoiceChat error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
|
||||||
|
[self.audioSession setActive:YES error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
|
||||||
|
[self.audioSession unlockForConfiguration];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)tearDown {
|
||||||
|
_audio_device->Terminate();
|
||||||
|
_audio_device.reset(nullptr);
|
||||||
|
_audioDeviceModule = nullptr;
|
||||||
|
[self.audioSession notifyDidEndInterruptionWithShouldResumeSession:NO];
|
||||||
|
|
||||||
|
[super tearDown];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifies that the AudioDeviceIOS is_interrupted_ flag is reset correctly
|
||||||
|
// after an iOS AVAudioSessionInterruptionTypeEnded notification event.
|
||||||
|
// AudioDeviceIOS listens to RTCAudioSession interrupted notifications by:
|
||||||
|
// - In AudioDeviceIOS.InitPlayOrRecord registers its audio_session_observer_
|
||||||
|
// callback with RTCAudioSession's delegate list.
|
||||||
|
// - When RTCAudioSession receives an iOS audio interrupted notification, it
|
||||||
|
// passes the notification to callbacks in its delegate list which sets
|
||||||
|
// AudioDeviceIOS's is_interrupted_ flag to true.
|
||||||
|
// - When AudioDeviceIOS.ShutdownPlayOrRecord is called, its
|
||||||
|
// audio_session_observer_ callback is removed from RTCAudioSessions's
|
||||||
|
// delegate list.
|
||||||
|
// So if RTCAudioSession receives an iOS end audio interruption notification,
|
||||||
|
// AudioDeviceIOS is not notified as its callback is not in RTCAudioSession's
|
||||||
|
// delegate list. This causes AudioDeviceIOS's is_interrupted_ flag to be in
|
||||||
|
// the wrong (true) state and the audio session will ignore audio changes.
|
||||||
|
// As RTCAudioSession keeps its own interrupted state, the fix is to initialize
|
||||||
|
// AudioDeviceIOS's is_interrupted_ flag to RTCAudioSession's isInterrupted
|
||||||
|
// flag in AudioDeviceIOS.InitPlayOrRecord.
|
||||||
|
- (void)testInterruptedAudioSession {
|
||||||
|
XCTAssertTrue(self.audioSession.isActive);
|
||||||
|
XCTAssertTrue([self.audioSession.category isEqual:AVAudioSessionCategoryPlayAndRecord] ||
|
||||||
|
[self.audioSession.category isEqual:AVAudioSessionCategoryPlayback]);
|
||||||
|
XCTAssertEqual(AVAudioSessionModeVoiceChat, self.audioSession.mode);
|
||||||
|
|
||||||
|
std::unique_ptr<webrtc::AudioDeviceBuffer> audio_buffer;
|
||||||
|
audio_buffer.reset(new webrtc::AudioDeviceBuffer());
|
||||||
|
_audio_device->AttachAudioBuffer(audio_buffer.get());
|
||||||
|
XCTAssertEqual(webrtc::AudioDeviceGeneric::InitStatus::OK, _audio_device->Init());
|
||||||
|
XCTAssertEqual(0, _audio_device->InitPlayout());
|
||||||
|
XCTAssertEqual(0, _audio_device->StartPlayout());
|
||||||
|
|
||||||
|
// Force interruption.
|
||||||
|
[self.audioSession notifyDidBeginInterruption];
|
||||||
|
|
||||||
|
// Wait for notification to propagate.
|
||||||
|
rtc::MessageQueueManager::ProcessAllMessageQueues();
|
||||||
|
XCTAssertTrue(_audio_device->IsInterrupted());
|
||||||
|
|
||||||
|
// Force it for testing.
|
||||||
|
_audio_device->StopPlayout();
|
||||||
|
|
||||||
|
[self.audioSession notifyDidEndInterruptionWithShouldResumeSession:YES];
|
||||||
|
// Wait for notification to propagate.
|
||||||
|
rtc::MessageQueueManager::ProcessAllMessageQueues();
|
||||||
|
XCTAssertTrue(_audio_device->IsInterrupted());
|
||||||
|
|
||||||
|
_audio_device->Init();
|
||||||
|
_audio_device->InitPlayout();
|
||||||
|
XCTAssertFalse(_audio_device->IsInterrupted());
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
BIN
sdk/objc/Framework/UnitTests/audio_short16.pcm
Normal file
BIN
sdk/objc/Framework/UnitTests/audio_short16.pcm
Normal file
Binary file not shown.
BIN
sdk/objc/Framework/UnitTests/audio_short44.pcm
Normal file
BIN
sdk/objc/Framework/UnitTests/audio_short44.pcm
Normal file
Binary file not shown.
BIN
sdk/objc/Framework/UnitTests/audio_short48.pcm
Normal file
BIN
sdk/objc/Framework/UnitTests/audio_short48.pcm
Normal file
Binary file not shown.
Reference in New Issue
Block a user