diff --git a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java
index a2b4c102c1..be6cae6b24 100644
--- a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java
+++ b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java
@@ -69,13 +69,7 @@ import org.webrtc.VideoRenderer;
import org.webrtc.VideoSink;
import org.webrtc.VideoSource;
import org.webrtc.VideoTrack;
-import org.webrtc.voiceengine.WebRtcAudioManager;
-import org.webrtc.voiceengine.WebRtcAudioRecord;
-import org.webrtc.voiceengine.WebRtcAudioRecord.AudioRecordStartErrorCode;
-import org.webrtc.voiceengine.WebRtcAudioRecord.WebRtcAudioRecordErrorCallback;
-import org.webrtc.voiceengine.WebRtcAudioTrack;
-import org.webrtc.voiceengine.WebRtcAudioTrack.AudioTrackStartErrorCode;
-import org.webrtc.voiceengine.WebRtcAudioUtils;
+import org.webrtc.audio.AudioDeviceModule;
/**
* Peer connection client implementation.
@@ -104,6 +98,8 @@ public class PeerConnectionClient {
"WebRTC-H264HighProfile/Enabled/";
private static final String DISABLE_WEBRTC_AGC_FIELDTRIAL =
"WebRTC-Audio-MinimizeResamplingOnMobile/Enabled/";
+ private static final String EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL =
+ "WebRTC-ExternalAndroidAudioDevice/Enabled/";
private static final String AUDIO_CODEC_PARAM_BITRATE = "maxaveragebitrate";
private static final String AUDIO_ECHO_CANCELLATION_CONSTRAINT = "googEchoCancellation";
private static final String AUDIO_AUTO_GAIN_CONTROL_CONSTRAINT = "googAutoGainControl";
@@ -402,6 +398,8 @@ public class PeerConnectionClient {
fieldTrials += DISABLE_WEBRTC_AGC_FIELDTRIAL;
Log.d(TAG, "Disable WebRTC AGC field trial.");
}
+ fieldTrials += EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL;
+ Log.d(TAG, "Enable WebRTC external Android audio device field trial.");
// Check preferred video codec.
preferredVideoCodec = VIDEO_CODEC_VP8;
@@ -450,38 +448,30 @@ public class PeerConnectionClient {
// Enable/disable OpenSL ES playback.
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Disable OpenSL ES audio even if device supports it");
- WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true /* enable */);
+ AudioDeviceModule.setBlacklistDeviceForOpenSLESUsage(true /* enable */);
} else {
Log.d(TAG, "Allow OpenSL ES audio if device supports it");
- WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(false);
+ AudioDeviceModule.setBlacklistDeviceForOpenSLESUsage(false);
}
if (peerConnectionParameters.disableBuiltInAEC) {
Log.d(TAG, "Disable built-in AEC even if device supports it");
- WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(true);
+ AudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(true);
} else {
Log.d(TAG, "Enable built-in AEC if device supports it");
- WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(false);
- }
-
- if (peerConnectionParameters.disableBuiltInAGC) {
- Log.d(TAG, "Disable built-in AGC even if device supports it");
- WebRtcAudioUtils.setWebRtcBasedAutomaticGainControl(true);
- } else {
- Log.d(TAG, "Enable built-in AGC if device supports it");
- WebRtcAudioUtils.setWebRtcBasedAutomaticGainControl(false);
+ AudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
}
if (peerConnectionParameters.disableBuiltInNS) {
Log.d(TAG, "Disable built-in NS even if device supports it");
- WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(true);
+ AudioDeviceModule.setWebRtcBasedNoiseSuppressor(true);
} else {
Log.d(TAG, "Enable built-in NS if device supports it");
- WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(false);
+ AudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
}
// Set audio record error callbacks.
- WebRtcAudioRecord.setErrorCallback(new WebRtcAudioRecordErrorCallback() {
+ AudioDeviceModule.setErrorCallback(new AudioDeviceModule.AudioRecordErrorCallback() {
@Override
public void onWebRtcAudioRecordInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordInitError: " + errorMessage);
@@ -490,7 +480,7 @@ public class PeerConnectionClient {
@Override
public void onWebRtcAudioRecordStartError(
- AudioRecordStartErrorCode errorCode, String errorMessage) {
+ AudioDeviceModule.AudioRecordStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
@@ -518,7 +508,7 @@ public class PeerConnectionClient {
}
}
- WebRtcAudioTrack.setErrorCallback(new WebRtcAudioTrack.ErrorCallback() {
+ AudioDeviceModule.setErrorCallback(new AudioDeviceModule.AudioTrackErrorCallback() {
@Override
public void onWebRtcAudioTrackInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackInitError: " + errorMessage);
@@ -527,7 +517,7 @@ public class PeerConnectionClient {
@Override
public void onWebRtcAudioTrackStartError(
- AudioTrackStartErrorCode errorCode, String errorMessage) {
+ AudioDeviceModule.AudioTrackStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
diff --git a/examples/androidapp/src/org/appspot/apprtc/SettingsActivity.java b/examples/androidapp/src/org/appspot/apprtc/SettingsActivity.java
index 73ec06e632..8e52e0c3c2 100644
--- a/examples/androidapp/src/org/appspot/apprtc/SettingsActivity.java
+++ b/examples/androidapp/src/org/appspot/apprtc/SettingsActivity.java
@@ -17,6 +17,7 @@ import android.os.Bundle;
import android.preference.ListPreference;
import android.preference.Preference;
import org.webrtc.Camera2Enumerator;
+import org.webrtc.audio.AudioDeviceModule;
import org.webrtc.voiceengine.WebRtcAudioUtils;
/**
@@ -172,8 +173,8 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
// Disable forcing WebRTC based AEC so it won't affect our value.
// Otherwise, if it was enabled, isAcousticEchoCancelerSupported would always return false.
- WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(false);
- if (!WebRtcAudioUtils.isAcousticEchoCancelerSupported()) {
+ AudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
+ if (!AudioDeviceModule.isAcousticEchoCancelerSupported()) {
Preference disableBuiltInAECPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
@@ -181,17 +182,14 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
disableBuiltInAECPreference.setEnabled(false);
}
- WebRtcAudioUtils.setWebRtcBasedAutomaticGainControl(false);
- if (!WebRtcAudioUtils.isAutomaticGainControlSupported()) {
- Preference disableBuiltInAGCPreference =
- settingsFragment.findPreference(keyprefDisableBuiltInAGC);
+ Preference disableBuiltInAGCPreference =
+ settingsFragment.findPreference(keyprefDisableBuiltInAGC);
- disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
- disableBuiltInAGCPreference.setEnabled(false);
- }
+ disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
+ disableBuiltInAGCPreference.setEnabled(false);
- WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(false);
- if (!WebRtcAudioUtils.isNoiseSuppressorSupported()) {
+ AudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
+ if (!AudioDeviceModule.isNoiseSuppressorSupported()) {
Preference disableBuiltInNSPreference =
settingsFragment.findPreference(keyprefDisableBuiltInNS);
diff --git a/sdk/android/BUILD.gn b/sdk/android/BUILD.gn
index 0c25f0295b..526219f87e 100644
--- a/sdk/android/BUILD.gn
+++ b/sdk/android/BUILD.gn
@@ -136,6 +136,81 @@ rtc_static_library("audio_jni") {
]
}
+rtc_source_set("native_api_audio_device_module") {
+ visibility = [ "*" ]
+
+ sources = [
+ "native_api/audio_device_module/audio_device_android.cc",
+ "native_api/audio_device_module/audio_device_android.h",
+ ]
+
+ deps = [
+ ":audio_device_jni",
+ ":base_jni",
+ "../../modules/audio_device:audio_device",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_base_approved",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics_api",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+}
+
+rtc_source_set("audio_device_jni") {
+ visibility = [ "*" ]
+
+ sources = [
+ "src/jni/audio_device/audio_common.h",
+ "src/jni/audio_device/audio_device_template_android.h",
+ "src/jni/audio_device/audio_manager.cc",
+ "src/jni/audio_device/audio_manager.h",
+ "src/jni/audio_device/audio_record_jni.cc",
+ "src/jni/audio_device/audio_record_jni.h",
+ "src/jni/audio_device/audio_track_jni.cc",
+ "src/jni/audio_device/audio_track_jni.h",
+ "src/jni/audio_device/build_info.cc",
+ "src/jni/audio_device/build_info.h",
+ "src/jni/audio_device/opensles_common.cc",
+ "src/jni/audio_device/opensles_common.h",
+ "src/jni/audio_device/opensles_player.cc",
+ "src/jni/audio_device/opensles_player.h",
+ "src/jni/audio_device/opensles_recorder.cc",
+ "src/jni/audio_device/opensles_recorder.h",
+ ]
+ libs = [ "OpenSLES" ]
+ if (rtc_enable_android_aaudio) {
+ defines += [ "AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO" ]
+ sources += [
+ "src/jni/audio_device/aaudio_player.cc",
+ "src/jni/audio_device/aaudio_player.h",
+ "src/jni/audio_device/aaudio_recorder.cc",
+ "src/jni/audio_device/aaudio_recorder.h",
+ "src/jni/audio_device/aaudio_wrapper.cc",
+ "src/jni/audio_device/aaudio_wrapper.h",
+ ]
+ libs += [ "aaudio" ]
+ }
+ deps = [
+ ":base_jni",
+ "../../api:array_view",
+ "../../modules/audio_device:audio_device",
+ "../../modules/audio_device:audio_device_buffer",
+ "../../modules/utility:utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_base",
+ "../../rtc_base:rtc_base_approved",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics_api",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+}
+
rtc_static_library("null_audio_jni") {
sources = [
"src/jni/pc/null_audio.cc",
@@ -508,6 +583,7 @@ rtc_static_library("peerconnection_jni") {
":base_jni",
":generated_external_classes_jni",
":generated_peerconnection_jni",
+ ":native_api_audio_device_module",
":native_api_jni",
"../..:webrtc_common",
"../../api:libjingle_peerconnection_api",
@@ -701,6 +777,7 @@ rtc_android_library("libjingle_peerconnection_java") {
"api/org/webrtc/VideoTrack.java",
"api/org/webrtc/YuvConverter.java",
"api/org/webrtc/YuvHelper.java",
+ "api/org/webrtc/audio/AudioDeviceModule.java",
"src/java/org/webrtc/AndroidVideoTrackSourceObserver.java",
"src/java/org/webrtc/BaseBitrateAdjuster.java",
"src/java/org/webrtc/BitrateAdjuster.java",
@@ -732,6 +809,11 @@ rtc_android_library("libjingle_peerconnection_java") {
"src/java/org/webrtc/WrappedNativeI420Buffer.java",
"src/java/org/webrtc/WrappedNativeVideoEncoder.java",
"src/java/org/webrtc/WrappedNativeVideoDecoder.java",
+ "src/java/org/webrtc/audio/WebRtcAudioEffects.java",
+ "src/java/org/webrtc/audio/WebRtcAudioManager.java",
+ "src/java/org/webrtc/audio/WebRtcAudioRecord.java",
+ "src/java/org/webrtc/audio/WebRtcAudioTrack.java",
+ "src/java/org/webrtc/audio/WebRtcAudioUtils.java",
]
if (rtc_use_builtin_sw_codecs) {
java_files += [
@@ -749,6 +831,7 @@ rtc_android_library("libjingle_peerconnection_java") {
}
deps = [
+ ":native_api_audio_device_module",
"../../modules/audio_device:audio_device_java",
"../../rtc_base:base_java",
]
@@ -813,6 +896,7 @@ if (rtc_include_tests) {
# The native API is currently experimental and may change without notice.
group("native_api") {
deps = [
+ ":native_api_audio_device_module",
":native_api_base",
":native_api_codecs",
":native_api_jni",
diff --git a/sdk/android/api/org/webrtc/audio/AudioDeviceModule.java b/sdk/android/api/org/webrtc/audio/AudioDeviceModule.java
new file mode 100644
index 0000000000..c3ca5bc84c
--- /dev/null
+++ b/sdk/android/api/org/webrtc/audio/AudioDeviceModule.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import org.webrtc.audio.WebRtcAudioManager;
+import org.webrtc.audio.WebRtcAudioRecord;
+import org.webrtc.audio.WebRtcAudioTrack;
+import org.webrtc.audio.WebRtcAudioUtils;
+
+/**
+ * Public API for Java audio methods.
+ *
+ *
Note: This class is still under development and may change without notice.
+ */
+public class AudioDeviceModule {
+ public AudioDeviceModule() {}
+
+ /* AudioManager */
+ public static void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
+ WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(enable);
+ }
+
+ public static void setStereoInput(boolean enable) {
+ WebRtcAudioManager.setStereoInput(enable);
+ }
+
+ /* AudioRecord */
+ // Audio recording error handler functions.
+ public enum AudioRecordStartErrorCode {
+ AUDIO_RECORD_START_EXCEPTION,
+ AUDIO_RECORD_START_STATE_MISMATCH,
+ }
+
+ public static interface AudioRecordErrorCallback {
+ void onWebRtcAudioRecordInitError(String errorMessage);
+ void onWebRtcAudioRecordStartError(AudioRecordStartErrorCode errorCode, String errorMessage);
+ void onWebRtcAudioRecordError(String errorMessage);
+ }
+
+ public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
+ WebRtcAudioRecord.setErrorCallback(errorCallback);
+ }
+
+ /* AudioTrack */
+ // Audio playout/track error handler functions.
+ public enum AudioTrackStartErrorCode {
+ AUDIO_TRACK_START_EXCEPTION,
+ AUDIO_TRACK_START_STATE_MISMATCH,
+ }
+ public static interface AudioTrackErrorCallback {
+ void onWebRtcAudioTrackInitError(String errorMessage);
+ void onWebRtcAudioTrackStartError(AudioTrackStartErrorCode errorCode, String errorMessage);
+ void onWebRtcAudioTrackError(String errorMessage);
+ }
+
+ public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
+ WebRtcAudioTrack.setErrorCallback(errorCallback);
+ }
+
+ /* AudioUtils */
+ public static void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
+ WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(enable);
+ }
+
+ public static void setWebRtcBasedNoiseSuppressor(boolean enable) {
+ WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(enable);
+ }
+
+ // Returns true if the device supports an audio effect (AEC or NS).
+ // Four conditions must be fulfilled if functions are to return true:
+ // 1) the platform must support the built-in (HW) effect,
+ // 2) explicit use (override) of a WebRTC based version must not be set,
+ // 3) the device must not be blacklisted for use of the effect, and
+ // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+ public static boolean isAcousticEchoCancelerSupported() {
+ return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+ }
+ public static boolean isNoiseSuppressorSupported() {
+ return WebRtcAudioEffects.canUseNoiseSuppressor();
+ }
+
+ // Call this method if the default handling of querying the native sample
+ // rate shall be overridden. Can be useful on some devices where the
+ // available Android APIs are known to return invalid results.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ public static void setDefaultSampleRateHz(int sampleRateHz) {
+ WebRtcAudioUtils.setDefaultSampleRateHz(sampleRateHz);
+ }
+}
diff --git a/sdk/android/native_api/DEPS b/sdk/android/native_api/DEPS
new file mode 100644
index 0000000000..020e1cbf09
--- /dev/null
+++ b/sdk/android/native_api/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+modules/audio_device/include/audio_device.h",
+ "+system_wrappers/include",
+]
diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.cc b/sdk/android/native_api/audio_device_module/audio_device_android.cc
new file mode 100644
index 0000000000..7d5a2171b4
--- /dev/null
+++ b/sdk/android/native_api/audio_device_module/audio_device_android.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/native_api/audio_device_module/audio_device_android.h"
+
+#include
+#include "rtc_base/logging.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "system_wrappers/include/metrics.h"
+
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#include "sdk/android/src/jni/audio_device/aaudio_player.h"
+#include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
+#endif
+#include "sdk/android/src/jni/audio_device/audio_device_template_android.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
+#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
+#include "sdk/android/src/jni/audio_device/opensles_player.h"
+#include "sdk/android/src/jni/audio_device/opensles_recorder.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr CreateAndroidAudioDeviceModule() {
+ RTC_LOG(INFO) << __FUNCTION__;
+ // Create an Android audio manager.
+ android_adm::AudioManager audio_manager_android;
+ // Select best possible combination of audio layers.
+ if (audio_manager_android.IsAAudioSupported()) {
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+ return new rtc::RefCountedObject>(
+ AudioDeviceModule::kAndroidAAudioAudio);
+#endif
+ } else if (audio_manager_android.IsLowLatencyPlayoutSupported() &&
+ audio_manager_android.IsLowLatencyRecordSupported()) {
+ // Use OpenSL ES for both playout and recording.
+ return new rtc::RefCountedObject>(
+ AudioDeviceModule::kAndroidOpenSLESAudio);
+ } else if (audio_manager_android.IsLowLatencyPlayoutSupported() &&
+ !audio_manager_android.IsLowLatencyRecordSupported()) {
+ // Use OpenSL ES for output on devices that only supports the
+ // low-latency output audio path.
+ // This combination provides low-latency output audio and at the same
+ // time support for HW AEC using the AudioRecord Java API.
+ return new rtc::RefCountedObject>(
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio);
+ } else {
+ // Use Java-based audio in both directions when low-latency output is
+ // not supported.
+ return new rtc::RefCountedObject>(
+ AudioDeviceModule::kAndroidJavaAudio);
+ }
+ RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.h b/sdk/android/native_api/audio_device_module/audio_device_android.h
new file mode 100644
index 0000000000..56567785a2
--- /dev/null
+++ b/sdk/android/native_api/audio_device_module/audio_device_android.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_ANDROID_H_
+#define SDK_ANDROID_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_ANDROID_H_
+
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr CreateAndroidAudioDeviceModule();
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_ANDROID_H_
diff --git a/sdk/android/src/java/org/webrtc/audio/BuildInfo.java b/sdk/android/src/java/org/webrtc/audio/BuildInfo.java
new file mode 100644
index 0000000000..03b8e0e407
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/BuildInfo.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.os.Build;
+
+public final class BuildInfo {
+ public static String getDevice() {
+ return Build.DEVICE;
+ }
+
+ public static String getDeviceModel() {
+ return Build.MODEL;
+ }
+
+ public static String getProduct() {
+ return Build.PRODUCT;
+ }
+
+ public static String getBrand() {
+ return Build.BRAND;
+ }
+
+ public static String getDeviceManufacturer() {
+ return Build.MANUFACTURER;
+ }
+
+ public static String getAndroidBuildId() {
+ return Build.ID;
+ }
+
+ public static String getBuildType() {
+ return Build.TYPE;
+ }
+
+ public static String getBuildRelease() {
+ return Build.VERSION.RELEASE;
+ }
+
+ public static int getSdkVersion() {
+ return Build.VERSION.SDK_INT;
+ }
+}
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
new file mode 100644
index 0000000000..8273b7e3a7
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.TargetApi;
+import android.media.audiofx.AcousticEchoCanceler;
+import android.media.audiofx.AudioEffect;
+import android.media.audiofx.AudioEffect.Descriptor;
+import android.media.audiofx.AutomaticGainControl;
+import android.media.audiofx.NoiseSuppressor;
+import android.os.Build;
+import java.util.List;
+import java.util.UUID;
+import org.webrtc.Logging;
+
+// This class wraps control of three different platform effects. Supported
+// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
+// Calling enable() will active all effects that are
+// supported by the device if the corresponding |shouldEnableXXX| member is set.
+class WebRtcAudioEffects {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioEffects";
+
+ // UUIDs for Software Audio Effects that we want to avoid using.
+ // The implementor field will be set to "The Android Open Source Project".
+ private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
+ UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
+ private static final UUID AOSP_NOISE_SUPPRESSOR =
+ UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
+
+ // Contains the available effect descriptors returned from the
+ // AudioEffect.getEffects() call. This result is cached to avoid doing the
+ // slow OS call multiple times.
+ private static Descriptor[] cachedEffects = null;
+
+ // Contains the audio effect objects. Created in enable() and destroyed
+ // in release().
+ private AcousticEchoCanceler aec = null;
+ private NoiseSuppressor ns = null;
+
+ // Affects the final state given to the setEnabled() method on each effect.
+ // The default state is set to "disabled" but each effect can also be enabled
+ // by calling setAEC() and setNS().
+ // To enable an effect, both the shouldEnableXXX member and the static
+ // canUseXXX() must be true.
+ private boolean shouldEnableAec = false;
+ private boolean shouldEnableNs = false;
+
+ // Checks if the device implements Acoustic Echo Cancellation (AEC).
+ // Returns true if the device implements AEC, false otherwise.
+ public static boolean isAcousticEchoCancelerSupported() {
+ // Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
+ // AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
+ // OS API call.
+ return isAcousticEchoCancelerEffectAvailable();
+ }
+
+ // Checks if the device implements Noise Suppression (NS).
+ // Returns true if the device implements NS, false otherwise.
+ public static boolean isNoiseSuppressorSupported() {
+ // Note: we're using isNoiseSuppressorEffectAvailable() instead of
+ // NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
+ // OS API call.
+ return isNoiseSuppressorEffectAvailable();
+ }
+
+ // Returns true if the device is blacklisted for HW AEC usage.
+ public static boolean isAcousticEchoCancelerBlacklisted() {
+ List blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
+ boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+ if (isBlacklisted) {
+ Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
+ }
+ return isBlacklisted;
+ }
+
+ // Returns true if the device is blacklisted for HW NS usage.
+ public static boolean isNoiseSuppressorBlacklisted() {
+ List blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
+ boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+ if (isBlacklisted) {
+ Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
+ }
+ return isBlacklisted;
+ }
+
+ // Returns true if the platform AEC should be excluded based on its UUID.
+ // AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
+ private static boolean isAcousticEchoCancelerExcludedByUUID() {
+ for (Descriptor d : getAvailableEffects()) {
+ if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
+ && d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns true if the platform NS should be excluded based on its UUID.
+ // AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
+ private static boolean isNoiseSuppressorExcludedByUUID() {
+ for (Descriptor d : getAvailableEffects()) {
+ if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns true if the device supports Acoustic Echo Cancellation (AEC).
+ @TargetApi(18)
+ private static boolean isAcousticEchoCancelerEffectAvailable() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
+ }
+
+ // Returns true if the device supports Noise Suppression (NS).
+ @TargetApi(18)
+ private static boolean isNoiseSuppressorEffectAvailable() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
+ }
+
+ // Returns true if all conditions for supporting the HW AEC are fulfilled.
+ // It will not be possible to enable the HW AEC if this method returns false.
+ public static boolean canUseAcousticEchoCanceler() {
+ boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
+ && !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
+ && !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
+ Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
+ return canUseAcousticEchoCanceler;
+ }
+
+ // Returns true if all conditions for supporting the HW NS are fulfilled.
+ // It will not be possible to enable the HW NS if this method returns false.
+ public static boolean canUseNoiseSuppressor() {
+ boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
+ && !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
+ && !isNoiseSuppressorExcludedByUUID();
+ Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
+ return canUseNoiseSuppressor;
+ }
+
+ public static WebRtcAudioEffects create() {
+ return new WebRtcAudioEffects();
+ }
+
+ private WebRtcAudioEffects() {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ }
+
+ // Call this method to enable or disable the platform AEC. It modifies
+ // |shouldEnableAec| which is used in enable() where the actual state
+ // of the AEC effect is modified. Returns true if HW AEC is supported and
+ // false otherwise.
+ public boolean setAEC(boolean enable) {
+ Logging.d(TAG, "setAEC(" + enable + ")");
+ if (!canUseAcousticEchoCanceler()) {
+ Logging.w(TAG, "Platform AEC is not supported");
+ shouldEnableAec = false;
+ return false;
+ }
+ if (aec != null && (enable != shouldEnableAec)) {
+ Logging.e(TAG, "Platform AEC state can't be modified while recording");
+ return false;
+ }
+ shouldEnableAec = enable;
+ return true;
+ }
+
+ // Call this method to enable or disable the platform NS. It modifies
+ // |shouldEnableNs| which is used in enable() where the actual state
+ // of the NS effect is modified. Returns true if HW NS is supported and
+ // false otherwise.
+ public boolean setNS(boolean enable) {
+ Logging.d(TAG, "setNS(" + enable + ")");
+ if (!canUseNoiseSuppressor()) {
+ Logging.w(TAG, "Platform NS is not supported");
+ shouldEnableNs = false;
+ return false;
+ }
+ if (ns != null && (enable != shouldEnableNs)) {
+ Logging.e(TAG, "Platform NS state can't be modified while recording");
+ return false;
+ }
+ shouldEnableNs = enable;
+ return true;
+ }
+
+ public void enable(int audioSession) {
+ Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
+ assertTrue(aec == null);
+ assertTrue(ns == null);
+
+ if (DEBUG) {
+ // Add logging of supported effects but filter out "VoIP effects", i.e.,
+ // AEC, AEC and NS. Avoid calling AudioEffect.queryEffects() unless the
+ // DEBUG flag is set since we have seen crashes in this API.
+ for (Descriptor d : AudioEffect.queryEffects()) {
+ if (effectTypeIsVoIP(d.type)) {
+ Logging.d(TAG,
+ "name: " + d.name + ", "
+ + "mode: " + d.connectMode + ", "
+ + "implementor: " + d.implementor + ", "
+ + "UUID: " + d.uuid);
+ }
+ }
+ }
+
+ if (isAcousticEchoCancelerSupported()) {
+ // Create an AcousticEchoCanceler and attach it to the AudioRecord on
+ // the specified audio session.
+ aec = AcousticEchoCanceler.create(audioSession);
+ if (aec != null) {
+ boolean enabled = aec.getEnabled();
+ boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
+ if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
+ }
+ Logging.d(TAG,
+ "AcousticEchoCanceler: was " + (enabled ? "enabled" : "disabled") + ", enable: "
+ + enable + ", is now: " + (aec.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
+ }
+ }
+
+ if (isNoiseSuppressorSupported()) {
+ // Create an NoiseSuppressor and attach it to the AudioRecord on the
+ // specified audio session.
+ ns = NoiseSuppressor.create(audioSession);
+ if (ns != null) {
+ boolean enabled = ns.getEnabled();
+ boolean enable = shouldEnableNs && canUseNoiseSuppressor();
+ if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the NoiseSuppressor state");
+ }
+ Logging.d(TAG,
+ "NoiseSuppressor: was " + (enabled ? "enabled" : "disabled") + ", enable: " + enable
+ + ", is now: " + (ns.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
+ }
+ }
+ }
+
+ // Releases all native audio effect resources. It is a good practice to
+ // release the effect engine when not in use as control can be returned
+ // to other applications or the native resources released.
+ public void release() {
+ Logging.d(TAG, "release");
+ if (aec != null) {
+ aec.release();
+ aec = null;
+ }
+ if (ns != null) {
+ ns.release();
+ ns = null;
+ }
+ }
+
+ // Returns true for effect types in |type| that are of "VoIP" types:
+ // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
+ // Noise Suppressor (NS). Note that, an extra check for support is needed
+ // in each comparison since some devices includes effects in the
+ // AudioEffect.Descriptor array that are actually not available on the device.
+ // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
+ // AutomaticGainControl.isAvailable() returns false.
+ @TargetApi(18)
+ private boolean effectTypeIsVoIP(UUID type) {
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher())
+ return false;
+
+ return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported())
+ || (AudioEffect.EFFECT_TYPE_NS.equals(type) && isNoiseSuppressorSupported());
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ // Returns the cached copy of the audio effects array, if available, or
+ // queries the operating system for the list of effects.
+ private static Descriptor[] getAvailableEffects() {
+ if (cachedEffects != null) {
+ return cachedEffects;
+ }
+ // The caching is best effort only - if this method is called from several
+ // threads in parallel, they may end up doing the underlying OS call
+ // multiple times. It's normally only called on one thread so there's no
+ // real need to optimize for the multiple threads case.
+ cachedEffects = AudioEffect.queryEffects();
+ return cachedEffects;
+ }
+
+ // Returns true if an effect of the specified type is available. Functionally
+ // equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
+ // faster as it avoids the expensive OS call to enumerate effects.
+ private static boolean isEffectTypeAvailable(UUID effectType) {
+ Descriptor[] effects = getAvailableEffects();
+ if (effects == null) {
+ return false;
+ }
+ for (Descriptor d : effects) {
+ if (d.type.equals(effectType)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
new file mode 100644
index 0000000000..b4288cb067
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
+import java.util.Timer;
+import java.util.TimerTask;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
+// At construction, storeAudioParameters() is called and it retrieves
+// fundamental audio parameters like native sample rate and number of channels.
+// The result is then provided to the caller by nativeCacheAudioParameters().
+// It is also possible to call init() to set up the audio environment for best
+// possible "VoIP performance". All settings done in init() are reverted by
+// dispose(). This class can also be used without calling init() if the user
+// prefers to set up the audio environment separately. However, it is
+// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
+class WebRtcAudioManager {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioManager";
+
+ // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
+ // been completed. Goal is to always return false on Android O MR1 and higher.
+ private static final boolean blacklistDeviceForAAudioUsage = true;
+
+ // Use mono as default for both audio directions.
+ private static boolean useStereoOutput = false;
+ private static boolean useStereoInput = false;
+
+ private static boolean blacklistDeviceForOpenSLESUsage = false;
+ private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false;
+
+ // Call this method to override the default list of blacklisted devices
+ // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
+ // Allows an app to take control over which devices to exclude from using
+ // the OpenSL ES audio output path
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
+ blacklistDeviceForOpenSLESUsageIsOverridden = true;
+ blacklistDeviceForOpenSLESUsage = enable;
+ }
+
+ // Call these methods to override the default mono audio modes for the specified direction(s)
+ // (input and/or output).
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setStereoOutput(boolean enable) {
+ Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
+ useStereoOutput = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setStereoInput(boolean enable) {
+ Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
+ useStereoInput = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean getStereoOutput() {
+ return useStereoOutput;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean getStereoInput() {
+ return useStereoInput;
+ }
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ private static final int DEFAULT_FRAME_PER_BUFFER = 256;
+
+ // Private utility class that periodically checks and logs the volume level
+ // of the audio stream that is currently controlled by the volume control.
+ // A timer triggers logs once every 30 seconds and the timer's associated
+ // thread is named "WebRtcVolumeLevelLoggerThread".
+ private static class VolumeLogger {
+ private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
+ private static final int TIMER_PERIOD_IN_SECONDS = 30;
+
+ private final AudioManager audioManager;
+ private Timer timer;
+
+ public VolumeLogger(AudioManager audioManager) {
+ this.audioManager = audioManager;
+ }
+
+ public void start() {
+ timer = new Timer(THREAD_NAME);
+ timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
+ audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
+ 0, TIMER_PERIOD_IN_SECONDS * 1000);
+ }
+
+ private class LogVolumeTask extends TimerTask {
+ private final int maxRingVolume;
+ private final int maxVoiceCallVolume;
+
+ LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
+ this.maxRingVolume = maxRingVolume;
+ this.maxVoiceCallVolume = maxVoiceCallVolume;
+ }
+
+ @Override
+ public void run() {
+ final int mode = audioManager.getMode();
+ if (mode == AudioManager.MODE_RINGTONE) {
+ Logging.d(TAG,
+ "STREAM_RING stream volume: " + audioManager.getStreamVolume(AudioManager.STREAM_RING)
+ + " (max=" + maxRingVolume + ")");
+ } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
+ Logging.d(TAG,
+ "VOICE_CALL stream volume: "
+ + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL)
+ + " (max=" + maxVoiceCallVolume + ")");
+ }
+ }
+ }
+
+ private void stop() {
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ }
+ }
+
+ private final long nativeAudioManager;
+ private final AudioManager audioManager;
+
+ private boolean initialized = false;
+ private int nativeSampleRate;
+ private int nativeChannels;
+
+ private boolean hardwareAEC;
+ private boolean hardwareAGC;
+ private boolean hardwareNS;
+ private boolean lowLatencyOutput;
+ private boolean lowLatencyInput;
+ private boolean proAudio;
+ private boolean aAudio;
+ private int sampleRate;
+ private int outputChannels;
+ private int inputChannels;
+ private int outputBufferSize;
+ private int inputBufferSize;
+
+ private final VolumeLogger volumeLogger;
+
+ WebRtcAudioManager(long nativeAudioManager) {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioManager = nativeAudioManager;
+ audioManager =
+ (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ volumeLogger = new VolumeLogger(audioManager);
+ storeAudioParameters();
+ nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
+ hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
+ inputBufferSize, nativeAudioManager);
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+
+ private boolean init() {
+ Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
+ if (initialized) {
+ return true;
+ }
+ Logging.d(TAG, "audio mode is: " + WebRtcAudioUtils.modeToString(audioManager.getMode()));
+ initialized = true;
+ volumeLogger.start();
+ return true;
+ }
+
+ private void dispose() {
+ Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
+ if (!initialized) {
+ return;
+ }
+ volumeLogger.stop();
+ }
+
+ private boolean isCommunicationModeEnabled() {
+ return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
+ }
+
+ private boolean isDeviceBlacklistedForOpenSLESUsage() {
+ boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
+ ? blacklistDeviceForOpenSLESUsage
+ : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
+ if (blacklisted) {
+ Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
+ }
+ return blacklisted;
+ }
+
+ private void storeAudioParameters() {
+ outputChannels = getStereoOutput() ? 2 : 1;
+ inputChannels = getStereoInput() ? 2 : 1;
+ sampleRate = getNativeOutputSampleRate();
+ hardwareAEC = isAcousticEchoCancelerSupported();
+ // TODO(henrika): use of hardware AGC is no longer supported. Currently
+ // hardcoded to false. To be removed.
+ hardwareAGC = false;
+ hardwareNS = isNoiseSuppressorSupported();
+ lowLatencyOutput = isLowLatencyOutputSupported();
+ lowLatencyInput = isLowLatencyInputSupported();
+ proAudio = isProAudioSupported();
+ aAudio = isAAudioSupported();
+ outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
+ : getMinOutputFrameSize(sampleRate, outputChannels);
+ inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
+ : getMinInputFrameSize(sampleRate, inputChannels);
+ }
+
+ // Gets the current earpiece state.
+ private boolean hasEarpiece() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_TELEPHONY);
+ }
+
+ // Returns true if low-latency audio output is supported.
+ private boolean isLowLatencyOutputSupported() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ }
+
+ // Returns true if low-latency audio input is supported.
+ // TODO(henrika): remove the hardcoded false return value when OpenSL ES
+ // input performance has been evaluated and tested more.
+ public boolean isLowLatencyInputSupported() {
+ // TODO(henrika): investigate if some sort of device list is needed here
+ // as well. The NDK doc states that: "As of API level 21, lower latency
+ // audio input is supported on select devices. To take advantage of this
+ // feature, first confirm that lower latency output is available".
+ return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported();
+ }
+
+ // Returns true if the device has professional audio level of functionality
+ // and therefore supports the lowest possible round-trip latency.
+ @TargetApi(23)
+ private boolean isProAudioSupported() {
+ return WebRtcAudioUtils.runningOnMarshmallowOrHigher()
+ && ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_PRO);
+ }
+
+ // AAudio is supported on Androio Oreo MR1 (API 27) and higher.
+ // TODO(bugs.webrtc.org/8914): currently disabled by default.
+ private boolean isAAudioSupported() {
+ if (blacklistDeviceForAAudioUsage) {
+ Logging.w(TAG, "AAudio support is currently disabled on all devices!");
+ }
+ return !blacklistDeviceForAAudioUsage && WebRtcAudioUtils.runningOnOreoMR1OrHigher();
+ }
+
+ // Returns the native output sample rate for this device's output stream.
+ private int getNativeOutputSampleRate() {
+ // Override this if we're running on an old emulator image which only
+ // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
+ if (WebRtcAudioUtils.runningOnEmulator()) {
+ Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
+ return 8000;
+ }
+ // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
+ // If so, use that value and return here.
+ if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
+ Logging.d(TAG,
+ "Default sample rate is overriden to " + WebRtcAudioUtils.getDefaultSampleRateHz()
+ + " Hz");
+ return WebRtcAudioUtils.getDefaultSampleRateHz();
+ }
+ // No overrides available. Deliver best possible estimate based on default
+ // Android AudioManager APIs.
+ final int sampleRateHz;
+ if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
+ } else {
+ sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
+ }
+ Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
+ return sampleRateHz;
+ }
+
+ @TargetApi(17)
+ private int getSampleRateOnJellyBeanMR10OrHigher() {
+ String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
+ : Integer.parseInt(sampleRateString);
+ }
+
+ // Returns the native output buffer size for low-latency output streams.
+ @TargetApi(17)
+ private int getLowLatencyOutputFramesPerBuffer() {
+ assertTrue(isLowLatencyOutputSupported());
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ return DEFAULT_FRAME_PER_BUFFER;
+ }
+ String framesPerBuffer =
+ audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
+ }
+
+ // Returns true if the device supports an audio effect (AEC or NS).
+ // Four conditions must be fulfilled if functions are to return true:
+ // 1) the platform must support the built-in (HW) effect,
+ // 2) explicit use (override) of a WebRTC based version must not be set,
+ // 3) the device must not be blacklisted for use of the effect, and
+ // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+ private static boolean isAcousticEchoCancelerSupported() {
+ return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+ }
+ private static boolean isNoiseSuppressorSupported() {
+ return WebRtcAudioEffects.canUseNoiseSuppressor();
+ }
+
+ // Returns the minimum output buffer size for Java based audio (AudioTrack).
+ // This size can also be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency output.
+ private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ return AudioTrack.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+
+ // Returns the native input buffer size for input streams.
+ private int getLowLatencyInputFramesPerBuffer() {
+ assertTrue(isLowLatencyInputSupported());
+ return getLowLatencyOutputFramesPerBuffer();
+ }
+
+ // Returns the minimum input buffer size for Java based audio (AudioRecord).
+ // This size can calso be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency input.
+ private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ return AudioRecord.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
+ int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
+ boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
+ int outputBufferSize, int inputBufferSize, long nativeAudioManager);
+}
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
new file mode 100644
index 0000000000..3d940049dc
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.TargetApi;
+import android.media.AudioFormat;
+import android.media.AudioRecord;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Process;
+import java.lang.System;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+import org.webrtc.audio.AudioDeviceModule.AudioRecordErrorCallback;
+import org.webrtc.audio.AudioDeviceModule.AudioRecordStartErrorCode;
+
+class WebRtcAudioRecord {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioRecord";
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
+ // buffer size). The extra space is allocated to guard against glitches under
+ // high load.
+ private static final int BUFFER_SIZE_FACTOR = 2;
+
+ // The AudioRecordJavaThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
+ private static int audioSource = DEFAULT_AUDIO_SOURCE;
+
+ private final long nativeAudioRecord;
+
+ private WebRtcAudioEffects effects = null;
+
+ private ByteBuffer byteBuffer;
+
+ private AudioRecord audioRecord = null;
+ private AudioRecordThread audioThread = null;
+
+ private static volatile boolean microphoneMute = false;
+ private byte[] emptyBytes;
+
+ private static AudioRecordErrorCallback errorCallback = null;
+
+ public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ WebRtcAudioRecord.errorCallback = errorCallback;
+ }
+
+ /**
+ * Contains audio sample information. Object is passed using {@link
+ * WebRtcAudioRecord.WebRtcAudioRecordSamplesReadyCallback}
+ */
+ public static class AudioSamples {
+ /** See {@link AudioRecord#getAudioFormat()} */
+ private final int audioFormat;
+ /** See {@link AudioRecord#getChannelCount()} */
+ private final int channelCount;
+ /** See {@link AudioRecord#getSampleRate()} */
+ private final int sampleRate;
+
+ private final byte[] data;
+
+ private AudioSamples(AudioRecord audioRecord, byte[] data) {
+ this.audioFormat = audioRecord.getAudioFormat();
+ this.channelCount = audioRecord.getChannelCount();
+ this.sampleRate = audioRecord.getSampleRate();
+ this.data = data;
+ }
+
+ public int getAudioFormat() {
+ return audioFormat;
+ }
+
+ public int getChannelCount() {
+ return channelCount;
+ }
+
+ public int getSampleRate() {
+ return sampleRate;
+ }
+
+ public byte[] getData() {
+ return data;
+ }
+ }
+
+ /** Called when new audio samples are ready. This should only be set for debug purposes */
+ public static interface WebRtcAudioRecordSamplesReadyCallback {
+ void onWebRtcAudioRecordSamplesReady(AudioSamples samples);
+ }
+
+ private static WebRtcAudioRecordSamplesReadyCallback audioSamplesReadyCallback = null;
+
+ public static void setOnAudioSamplesReady(WebRtcAudioRecordSamplesReadyCallback callback) {
+ audioSamplesReadyCallback = callback;
+ }
+
+ /**
+ * Audio thread which keeps calling ByteBuffer.read() waiting for audio
+ * to be recorded. Feeds recorded data to the native counterpart as a
+ * periodic sequence of callbacks using DataIsRecorded().
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioRecordThread extends Thread {
+ private volatile boolean keepAlive = true;
+
+ public AudioRecordThread(String name) {
+ super(name);
+ }
+
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
+
+ long lastTime = System.nanoTime();
+ while (keepAlive) {
+ int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
+ if (bytesRead == byteBuffer.capacity()) {
+ if (microphoneMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ }
+ // It's possible we've been shut down during the read, and stopRecording() tried and
+ // failed to join this thread. To be a bit safer, try to avoid calling any native methods
+ // in case they've been unregistered after stopRecording() returned.
+ if (keepAlive) {
+ nativeDataIsRecorded(bytesRead, nativeAudioRecord);
+ }
+ if (audioSamplesReadyCallback != null) {
+ // Copy the entire byte buffer array. Assume that the start of the byteBuffer is
+ // at index 0.
+ byte[] data = Arrays.copyOf(byteBuffer.array(), byteBuffer.capacity());
+ audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
+ new AudioSamples(audioRecord, data));
+ }
+ } else {
+ String errorMessage = "AudioRecord.read failed: " + bytesRead;
+ Logging.e(TAG, errorMessage);
+ if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
+ keepAlive = false;
+ reportWebRtcAudioRecordError(errorMessage);
+ }
+ }
+ if (DEBUG) {
+ long nowTime = System.nanoTime();
+ long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
+ lastTime = nowTime;
+ Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
+ }
+ }
+
+ try {
+ if (audioRecord != null) {
+ audioRecord.stop();
+ }
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
+ }
+ }
+
+ // Stops the inner thread loop and also calls AudioRecord.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ WebRtcAudioRecord(long nativeAudioRecord) {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioRecord = nativeAudioRecord;
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ effects = WebRtcAudioEffects.create();
+ }
+
+ private boolean enableBuiltInAEC(boolean enable) {
+ Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
+ if (effects == null) {
+ Logging.e(TAG, "Built-in AEC is not supported on this platform");
+ return false;
+ }
+ return effects.setAEC(enable);
+ }
+
+ private boolean enableBuiltInNS(boolean enable) {
+ Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
+ if (effects == null) {
+ Logging.e(TAG, "Built-in NS is not supported on this platform");
+ return false;
+ }
+ return effects.setNS(enable);
+ }
+
+ private int initRecording(int sampleRate, int channels) {
+ Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
+ if (audioRecord != null) {
+ reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
+ return -1;
+ }
+ final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+ final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
+
+ // Get the minimum buffer size required for the successful creation of
+ // an AudioRecord object, in byte units.
+ // Note that this size doesn't guarantee a smooth recording under load.
+ final int channelConfig = channelCountToConfiguration(channels);
+ int minBufferSize =
+ AudioRecord.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
+ if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
+ reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
+ return -1;
+ }
+ Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
+
+ // Use a larger buffer size than the minimum required when creating the
+ // AudioRecord instance to ensure smooth recording under load. It has been
+ // verified that it does not increase the actual recording latency.
+ int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
+ Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
+ try {
+ audioRecord = new AudioRecord(audioSource, sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
+ } catch (IllegalArgumentException e) {
+ reportWebRtcAudioRecordInitError("AudioRecord ctor error: " + e.getMessage());
+ releaseAudioResources();
+ return -1;
+ }
+ if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
+ reportWebRtcAudioRecordInitError("Failed to create a new AudioRecord instance");
+ releaseAudioResources();
+ return -1;
+ }
+ if (effects != null) {
+ effects.enable(audioRecord.getAudioSessionId());
+ }
+ logMainParameters();
+ logMainParametersExtended();
+ return framesPerBuffer;
+ }
+
+ private boolean startRecording() {
+ Logging.d(TAG, "startRecording");
+ assertTrue(audioRecord != null);
+ assertTrue(audioThread == null);
+ try {
+ audioRecord.startRecording();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
+ "AudioRecord.startRecording failed: " + e.getMessage());
+ return false;
+ }
+ if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
+ reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
+ "AudioRecord.startRecording failed - incorrect state :"
+ + audioRecord.getRecordingState());
+ return false;
+ }
+ audioThread = new AudioRecordThread("AudioRecordJavaThread");
+ audioThread.start();
+ return true;
+ }
+
+ private boolean stopRecording() {
+ Logging.d(TAG, "stopRecording");
+ assertTrue(audioThread != null);
+ audioThread.stopThread();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+ audioThread = null;
+ if (effects != null) {
+ effects.release();
+ }
+ releaseAudioResources();
+ return true;
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG,
+ "AudioRecord: "
+ + "session ID: " + audioRecord.getAudioSessionId() + ", "
+ + "channels: " + audioRecord.getChannelCount() + ", "
+ + "sample rate: " + audioRecord.getSampleRate());
+ }
+
+ @TargetApi(23)
+ private void logMainParametersExtended() {
+ if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
+ Logging.d(TAG,
+ "AudioRecord: "
+ // The frame count of the native AudioRecord buffer.
+ + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
+ }
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ }
+
+ private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+ private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
+
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setAudioSource(int source) {
+ Logging.w(TAG, "Audio source is changed from: " + audioSource + " to " + source);
+ audioSource = source;
+ }
+
+ private static int getDefaultAudioSource() {
+ return AudioSource.VOICE_COMMUNICATION;
+ }
+
+ // Sets all recorded samples to zero if |mute| is true, i.e., ensures that
+ // the microphone is muted.
+ public static void setMicrophoneMute(boolean mute) {
+ Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
+ microphoneMute = mute;
+ }
+
+ // Releases the native AudioRecord resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioRecord != null) {
+ audioRecord.release();
+ audioRecord = null;
+ }
+ }
+
+ private void reportWebRtcAudioRecordInitError(String errorMessage) {
+ Logging.e(TAG, "Init recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordStartError(
+ AudioRecordStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordError(String errorMessage) {
+ Logging.e(TAG, "Run-time recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordError(errorMessage);
+ }
+ }
+}
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
new file mode 100644
index 0000000000..9c5755ef3d
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.SuppressLint;
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.media.AudioAttributes;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.os.Process;
+import java.lang.Thread;
+import java.nio.ByteBuffer;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+import org.webrtc.audio.AudioDeviceModule.AudioTrackErrorCallback;
+import org.webrtc.audio.AudioDeviceModule.AudioTrackStartErrorCode;
+
+class WebRtcAudioTrack {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioTrack";
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // The AudioTrackThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ // By default, WebRTC creates audio tracks with a usage attribute
+ // corresponding to voice communications, such as telephony or VoIP.
+ private static final int DEFAULT_USAGE = getDefaultUsageAttribute();
+ private static int usageAttribute = DEFAULT_USAGE;
+
+ // This method overrides the default usage attribute and allows the user
+ // to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
+ // NOTE: calling this method will most likely break existing VoIP tuning.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setAudioTrackUsageAttribute(int usage) {
+ Logging.w(TAG, "Default usage attribute is changed from: " + DEFAULT_USAGE + " to " + usage);
+ usageAttribute = usage;
+ }
+
+ private static int getDefaultUsageAttribute() {
+ if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
+ return getDefaultUsageAttributeOnLollipopOrHigher();
+ } else {
+ // Not used on SDKs lower than L.
+ return 0;
+ }
+ }
+
+ @TargetApi(21)
+ private static int getDefaultUsageAttributeOnLollipopOrHigher() {
+ return AudioAttributes.USAGE_VOICE_COMMUNICATION;
+ }
+
+ private final long nativeAudioTrack;
+ private final AudioManager audioManager;
+ private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
+
+ private ByteBuffer byteBuffer;
+
+ private AudioTrack audioTrack = null;
+ private AudioTrackThread audioThread = null;
+
+ // Samples to be played are replaced by zeros if |speakerMute| is set to true.
+ // Can be used to ensure that the speaker is fully muted.
+ private static volatile boolean speakerMute = false;
+ private byte[] emptyBytes;
+
+ private static AudioTrackErrorCallback errorCallback = null;
+
+ public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
+ Logging.d(TAG, "Set extended error callback");
+ WebRtcAudioTrack.errorCallback = errorCallback;
+ }
+
+ /**
+ * Audio thread which keeps calling AudioTrack.write() to stream audio.
+ * Data is periodically acquired from the native WebRTC layer using the
+ * nativeGetPlayoutData callback function.
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioTrackThread extends Thread {
+ private volatile boolean keepAlive = true;
+
+ public AudioTrackThread(String name) {
+ super(name);
+ }
+
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
+
+ // Fixed size in bytes of each 10ms block of audio data that we ask for
+ // using callbacks to the native WebRTC client.
+ final int sizeInBytes = byteBuffer.capacity();
+
+ while (keepAlive) {
+ // Get 10ms of PCM data from the native WebRTC client. Audio data is
+ // written into the common ByteBuffer using the address that was
+ // cached at construction.
+ nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
+ // Write data until all data has been written to the audio sink.
+ // Upon return, the buffer position will have been advanced to reflect
+ // the amount of data that was successfully written to the AudioTrack.
+ assertTrue(sizeInBytes <= byteBuffer.remaining());
+ if (speakerMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ byteBuffer.position(0);
+ }
+ int bytesWritten = 0;
+ if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
+ bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes);
+ } else {
+ bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes);
+ }
+ if (bytesWritten != sizeInBytes) {
+ Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
+ // If a write() returns a negative value, an error has occurred.
+ // Stop playing and report an error in this case.
+ if (bytesWritten < 0) {
+ keepAlive = false;
+ reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
+ }
+ }
+ // The byte buffer must be rewinded since byteBuffer.position() is
+ // increased at each call to AudioTrack.write(). If we don't do this,
+ // next call to AudioTrack.write() will fail.
+ byteBuffer.rewind();
+
+ // TODO(henrika): it is possible to create a delay estimate here by
+ // counting number of written frames and subtracting the result from
+ // audioTrack.getPlaybackHeadPosition().
+ }
+
+ // Stops playing the audio data. Since the instance was created in
+ // MODE_STREAM mode, audio will stop playing after the last buffer that
+ // was written has been played.
+ if (audioTrack != null) {
+ Logging.d(TAG, "Calling AudioTrack.stop...");
+ try {
+ audioTrack.stop();
+ Logging.d(TAG, "AudioTrack.stop is done.");
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
+ }
+ }
+ }
+
+ @TargetApi(21)
+ private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+ }
+
+ private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
+ }
+
+ // Stops the inner thread loop which results in calling AudioTrack.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ WebRtcAudioTrack(long nativeAudioTrack) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ this.nativeAudioTrack = nativeAudioTrack;
+ audioManager =
+ (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+ if (DEBUG) {
+ WebRtcAudioUtils.logDeviceInfo(TAG);
+ }
+ }
+
+ private boolean initPlayout(int sampleRate, int channels) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels + ")");
+ final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
+
+ // Get the minimum buffer size required for the successful creation of an
+ // AudioTrack object to be created in the MODE_STREAM mode.
+ // Note that this size doesn't guarantee a smooth playback under load.
+ // TODO(henrika): should we extend the buffer size to avoid glitches?
+ final int channelConfig = channelCountToConfiguration(channels);
+ final int minBufferSizeInBytes =
+ AudioTrack.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
+ Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
+ // For the streaming mode, data must be written to the audio sink in
+ // chunks of size (given by byteBuffer.capacity()) less than or equal
+ // to the total buffer size |minBufferSizeInBytes|. But, we have seen
+ // reports of "getMinBufferSize(): error querying hardware". Hence, it
+ // can happen that |minBufferSizeInBytes| contains an invalid value.
+ if (minBufferSizeInBytes < byteBuffer.capacity()) {
+ reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
+ return false;
+ }
+
+ // Ensure that prevision audio session was stopped correctly before trying
+ // to create a new AudioTrack.
+ if (audioTrack != null) {
+ reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
+ return false;
+ }
+ try {
+ // Create an AudioTrack object and initialize its associated audio buffer.
+ // The size of this buffer determines how long an AudioTrack can play
+ // before running out of data.
+ if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
+ // If we are on API level 21 or higher, it is possible to use a special AudioTrack
+ // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
+ // supersede the notion of stream types for defining the behavior of audio playback,
+ // and to allow certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ audioTrack =
+ createAudioTrackOnLollipopOrHigher(sampleRate, channelConfig, minBufferSizeInBytes);
+ } else {
+ // Use default constructor for API levels below 21.
+ audioTrack =
+ createAudioTrackOnLowerThanLollipop(sampleRate, channelConfig, minBufferSizeInBytes);
+ }
+ } catch (IllegalArgumentException e) {
+ reportWebRtcAudioTrackInitError(e.getMessage());
+ releaseAudioResources();
+ return false;
+ }
+
+ // It can happen that an AudioTrack is created but it was not successfully
+ // initialized upon creation. Seems to be the case e.g. when the maximum
+ // number of globally available audio tracks is exceeded.
+ if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
+ reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
+ releaseAudioResources();
+ return false;
+ }
+ logMainParameters();
+ logMainParametersExtended();
+ return true;
+ }
+
+ private boolean startPlayout() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "startPlayout");
+ assertTrue(audioTrack != null);
+ assertTrue(audioThread == null);
+
+ // Starts playing an audio track.
+ try {
+ audioTrack.play();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
+ "AudioTrack.play failed: " + e.getMessage());
+ releaseAudioResources();
+ return false;
+ }
+ if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
+ reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
+ "AudioTrack.play failed - incorrect state :" + audioTrack.getPlayState());
+ releaseAudioResources();
+ return false;
+ }
+
+ // Create and start new high-priority thread which calls AudioTrack.write()
+ // and where we also call the native nativeGetPlayoutData() callback to
+ // request decoded audio from WebRTC.
+ audioThread = new AudioTrackThread("AudioTrackJavaThread");
+ audioThread.start();
+ return true;
+ }
+
+ private boolean stopPlayout() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "stopPlayout");
+ assertTrue(audioThread != null);
+ logUnderrunCount();
+ audioThread.stopThread();
+
+ Logging.d(TAG, "Stopping the AudioTrackThread...");
+ audioThread.interrupt();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioTrackThread timed out.");
+ WebRtcAudioUtils.logAudioState(TAG);
+ }
+ Logging.d(TAG, "AudioTrackThread has now been stopped.");
+ audioThread = null;
+ releaseAudioResources();
+ return true;
+ }
+
+ // Get max possible volume index for a phone call audio stream.
+ private int getStreamMaxVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamMaxVolume");
+ assertTrue(audioManager != null);
+ return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ // Set current volume level for a phone call audio stream.
+ private boolean setStreamVolume(int volume) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "setStreamVolume(" + volume + ")");
+ assertTrue(audioManager != null);
+ if (isVolumeFixed()) {
+ Logging.e(TAG, "The device implements a fixed volume policy.");
+ return false;
+ }
+ audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
+ return true;
+ }
+
+ // TODO(bugs.webrtc.org/8580): Call requires API level 21 (current min is 16):
+ // `android.media.AudioManager#isVolumeFixed`: NewApi [warning]
+ @SuppressLint("NewApi")
+ private boolean isVolumeFixed() {
+ if (!WebRtcAudioUtils.runningOnLollipopOrHigher())
+ return false;
+ return audioManager.isVolumeFixed();
+ }
+
+ /** Get current volume level for a phone call audio stream. */
+ private int getStreamVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamVolume");
+ assertTrue(audioManager != null);
+ return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG,
+ "AudioTrack: "
+ + "session ID: " + audioTrack.getAudioSessionId() + ", "
+ + "channels: " + audioTrack.getChannelCount() + ", "
+ + "sample rate: " + audioTrack.getSampleRate()
+ + ", "
+ // Gain (>=1.0) expressed as linear multiplier on sample values.
+ + "max gain: " + AudioTrack.getMaxVolume());
+ }
+
+ // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
+ // It allows certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ @TargetApi(21)
+ private static AudioTrack createAudioTrackOnLollipopOrHigher(
+ int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
+ Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
+ // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
+ // performance when Android O is supported. Add some logging in the mean time.
+ final int nativeOutputSampleRate =
+ AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
+ Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
+ if (sampleRateInHz != nativeOutputSampleRate) {
+ Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
+ }
+ if (usageAttribute != DEFAULT_USAGE) {
+ Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
+ }
+ // Create an audio track where the audio usage is for VoIP and the content type is speech.
+ return new AudioTrack(new AudioAttributes.Builder()
+ .setUsage(usageAttribute)
+ .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
+ .build(),
+ new AudioFormat.Builder()
+ .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
+ .setSampleRate(sampleRateInHz)
+ .setChannelMask(channelConfig)
+ .build(),
+ bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
+ }
+
+ @SuppressWarnings("deprecation") // Deprecated in API level 25.
+ private static AudioTrack createAudioTrackOnLowerThanLollipop(
+ int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
+ return new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRateInHz, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes, AudioTrack.MODE_STREAM);
+ }
+
+ @TargetApi(24)
+ private void logMainParametersExtended() {
+ if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
+ Logging.d(TAG,
+ "AudioTrack: "
+ // The effective size of the AudioTrack buffer that the app writes to.
+ + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
+ }
+ if (WebRtcAudioUtils.runningOnNougatOrHigher()) {
+ Logging.d(TAG,
+ "AudioTrack: "
+ // Maximum size of the AudioTrack buffer in frames.
+ + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
+ }
+ }
+
+ // Prints the number of underrun occurrences in the application-level write
+ // buffer since the AudioTrack was created. An underrun occurs if the app does
+ // not write audio data quickly enough, causing the buffer to underflow and a
+ // potential audio glitch.
+ // TODO(henrika): keep track of this value in the field and possibly add new
+ // UMA stat if needed.
+ @TargetApi(24)
+ private void logUnderrunCount() {
+ if (WebRtcAudioUtils.runningOnNougatOrHigher()) {
+ Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
+ }
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ }
+
+ private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+ private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
+
+ // Sets all samples to be played out to zero if |mute| is true, i.e.,
+ // ensures that the speaker is muted.
+ public static void setSpeakerMute(boolean mute) {
+ Logging.w(TAG, "setSpeakerMute(" + mute + ")");
+ speakerMute = mute;
+ }
+
+ // Releases the native AudioTrack resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioTrack != null) {
+ audioTrack.release();
+ audioTrack = null;
+ }
+ }
+
+ private void reportWebRtcAudioTrackInitError(String errorMessage) {
+ Logging.e(TAG, "Init playout error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackStartError(
+ AudioTrackStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackError(String errorMessage) {
+ Logging.e(TAG, "Run-time playback error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackError(errorMessage);
+ }
+ }
+}
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java
new file mode 100644
index 0000000000..73fe32b19f
--- /dev/null
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import static android.media.AudioManager.MODE_IN_CALL;
+import static android.media.AudioManager.MODE_IN_COMMUNICATION;
+import static android.media.AudioManager.MODE_NORMAL;
+import static android.media.AudioManager.MODE_RINGTONE;
+
+import android.annotation.SuppressLint;
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioDeviceInfo;
+import android.media.AudioManager;
+import android.media.AudioRecordingConfiguration;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Build;
+import android.os.Process;
+import java.lang.Thread;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+final class WebRtcAudioUtils {
+ private static final String TAG = "WebRtcAudioUtils";
+
+ // List of devices where we have seen issues (e.g. bad audio quality) using
+ // the low latency output mode in combination with OpenSL ES.
+ // The device name is given by Build.MODEL.
+ private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call
+ // WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true)
+ // from the client for devices where OpenSL ES shall be disabled.
+ };
+
+ // List of devices where it has been verified that the built-in effect
+ // bad and where it makes sense to avoid using it and instead rely on the
+ // native WebRTC version instead. The device name is given by Build.MODEL.
+ private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
+ // from the client for devices where the built-in AEC shall be disabled.
+ };
+ private static final String[] BLACKLISTED_NS_MODELS = new String[] {
+ // It is recommended to maintain a list of blacklisted models outside
+ // this package and instead call setWebRtcBasedNoiseSuppressor(true)
+ // from the client for devices where the built-in NS shall be disabled.
+ };
+
+ // Use 16kHz as the default sample rate. A higher sample rate might prevent
+ // us from supporting communication mode on some older (e.g. ICS) devices.
+ private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
+ private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
+ // Set to true if setDefaultSampleRateHz() has been called.
+ private static boolean isDefaultSampleRateOverridden = false;
+
+ // By default, utilize hardware based audio effects for AEC and NS when
+ // available.
+ private static boolean useWebRtcBasedAcousticEchoCanceler = false;
+ private static boolean useWebRtcBasedNoiseSuppressor = false;
+
+ // Call these methods if any hardware based effect shall be replaced by a
+ // software based version provided by the WebRTC stack instead.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
+ useWebRtcBasedAcousticEchoCanceler = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
+ useWebRtcBasedNoiseSuppressor = enable;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
+ if (useWebRtcBasedAcousticEchoCanceler) {
+ Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
+ }
+ return useWebRtcBasedAcousticEchoCanceler;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
+ if (useWebRtcBasedNoiseSuppressor) {
+ Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
+ }
+ return useWebRtcBasedNoiseSuppressor;
+ }
+
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
+ // Always return true here to avoid trying to use any built-in AGC.
+ return true;
+ }
+
+ // Returns true if the device supports an audio effect (AEC or NS).
+ // Four conditions must be fulfilled if functions are to return true:
+ // 1) the platform must support the built-in (HW) effect,
+ // 2) explicit use (override) of a WebRTC based version must not be set,
+ // 3) the device must not be blacklisted for use of the effect, and
+ // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+ public static boolean isAcousticEchoCancelerSupported() {
+ return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+ }
+ public static boolean isNoiseSuppressorSupported() {
+ return WebRtcAudioEffects.canUseNoiseSuppressor();
+ }
+ // TODO(henrika): deprecated; remove when no longer used by any client.
+ public static boolean isAutomaticGainControlSupported() {
+ // Always return false here to avoid trying to use any built-in AGC.
+ return false;
+ }
+
+ // Call this method if the default handling of querying the native sample
+ // rate shall be overridden. Can be useful on some devices where the
+ // available Android APIs are known to return invalid results.
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
+ isDefaultSampleRateOverridden = true;
+ defaultSampleRateHz = sampleRateHz;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized boolean isDefaultSampleRateOverridden() {
+ return isDefaultSampleRateOverridden;
+ }
+
+ // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+ @SuppressWarnings("NoSynchronizedMethodCheck")
+ public static synchronized int getDefaultSampleRateHz() {
+ return defaultSampleRateHz;
+ }
+
+ public static List getBlackListedModelsForAecUsage() {
+ return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
+ }
+
+ public static List getBlackListedModelsForNsUsage() {
+ return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
+ }
+
+ public static boolean runningOnJellyBeanMR1OrHigher() {
+ // November 2012: Android 4.2. API Level 17.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
+ }
+
+ public static boolean runningOnJellyBeanMR2OrHigher() {
+ // July 24, 2013: Android 4.3. API Level 18.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2;
+ }
+
+ public static boolean runningOnLollipopOrHigher() {
+ // API Level 21.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
+ }
+
+ public static boolean runningOnMarshmallowOrHigher() {
+ // API Level 23.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.M;
+ }
+
+ public static boolean runningOnNougatOrHigher() {
+ // API Level 24.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.N;
+ }
+
+ public static boolean runningOnOreoOrHigher() {
+ // API Level 26.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O;
+ }
+
+ public static boolean runningOnOreoMR1OrHigher() {
+ // API Level 27.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O_MR1;
+ }
+
+ // Helper method for building a string of thread information.
+ public static String getThreadInfo() {
+ return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
+ + "]";
+ }
+
+ // Returns true if we're running on emulator.
+ public static boolean runningOnEmulator() {
+ return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
+ }
+
+ // Returns true if the device is blacklisted for OpenSL ES usage.
+ public static boolean deviceIsBlacklistedForOpenSLESUsage() {
+ List blackListedModels = Arrays.asList(BLACKLISTED_OPEN_SL_ES_MODELS);
+ return blackListedModels.contains(Build.MODEL);
+ }
+
+ // Information about the current build, taken from system properties.
+ static void logDeviceInfo(String tag) {
+ Logging.d(tag,
+ "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ + "Release: " + Build.VERSION.RELEASE + ", "
+ + "Brand: " + Build.BRAND + ", "
+ + "Device: " + Build.DEVICE + ", "
+ + "Id: " + Build.ID + ", "
+ + "Hardware: " + Build.HARDWARE + ", "
+ + "Manufacturer: " + Build.MANUFACTURER + ", "
+ + "Model: " + Build.MODEL + ", "
+ + "Product: " + Build.PRODUCT);
+ }
+
+ // Logs information about the current audio state. The idea is to call this
+ // method when errors are detected to log under what conditions the error
+ // occurred. Hopefully it will provide clues to what might be the root cause.
+ static void logAudioState(String tag) {
+ logDeviceInfo(tag);
+ final Context context = ContextUtils.getApplicationContext();
+ final AudioManager audioManager =
+ (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+ logAudioStateBasic(tag, audioManager);
+ logAudioStateVolume(tag, audioManager);
+ logAudioDeviceInfo(tag, audioManager);
+ }
+
+ // Reports basic audio statistics.
+ private static void logAudioStateBasic(String tag, AudioManager audioManager) {
+ Logging.d(tag,
+ "Audio State: "
+ + "audio mode: " + modeToString(audioManager.getMode()) + ", "
+ + "has mic: " + hasMicrophone() + ", "
+ + "mic muted: " + audioManager.isMicrophoneMute() + ", "
+ + "music active: " + audioManager.isMusicActive() + ", "
+ + "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
+ + "BT SCO: " + audioManager.isBluetoothScoOn());
+ }
+
+ // TODO(bugs.webrtc.org/8580): Call requires API level 21 (current min is 16):
+ // `android.media.AudioManager#isVolumeFixed`: NewApi [warning]
+ @SuppressLint("NewApi")
+ // Adds volume information for all possible stream types.
+ private static void logAudioStateVolume(String tag, AudioManager audioManager) {
+ final int[] streams = {AudioManager.STREAM_VOICE_CALL, AudioManager.STREAM_MUSIC,
+ AudioManager.STREAM_RING, AudioManager.STREAM_ALARM, AudioManager.STREAM_NOTIFICATION,
+ AudioManager.STREAM_SYSTEM};
+ Logging.d(tag, "Audio State: ");
+ boolean fixedVolume = false;
+ if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
+ fixedVolume = audioManager.isVolumeFixed();
+ // Some devices may not have volume controls and might use a fixed volume.
+ Logging.d(tag, " fixed volume=" + fixedVolume);
+ }
+ if (!fixedVolume) {
+ for (int stream : streams) {
+ StringBuilder info = new StringBuilder();
+ info.append(" " + streamTypeToString(stream) + ": ");
+ info.append("volume=").append(audioManager.getStreamVolume(stream));
+ info.append(", max=").append(audioManager.getStreamMaxVolume(stream));
+ logIsStreamMute(tag, audioManager, stream, info);
+ Logging.d(tag, info.toString());
+ }
+ }
+ }
+
+ @TargetApi(23)
+ private static void logIsStreamMute(
+ String tag, AudioManager audioManager, int stream, StringBuilder info) {
+ if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
+ info.append(", muted=").append(audioManager.isStreamMute(stream));
+ }
+ }
+
+ @TargetApi(23)
+ private static void logAudioDeviceInfo(String tag, AudioManager audioManager) {
+ if (!WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
+ return;
+ }
+ final AudioDeviceInfo[] devices = audioManager.getDevices(AudioManager.GET_DEVICES_ALL);
+ if (devices.length == 0) {
+ return;
+ }
+ Logging.d(tag, "Audio Devices: ");
+ for (AudioDeviceInfo device : devices) {
+ StringBuilder info = new StringBuilder();
+ info.append(" ").append(deviceTypeToString(device.getType()));
+ info.append(device.isSource() ? "(in): " : "(out): ");
+ // An empty array indicates that the device supports arbitrary channel counts.
+ if (device.getChannelCounts().length > 0) {
+ info.append("channels=").append(Arrays.toString(device.getChannelCounts()));
+ info.append(", ");
+ }
+ if (device.getEncodings().length > 0) {
+ // Examples: ENCODING_PCM_16BIT = 2, ENCODING_PCM_FLOAT = 4.
+ info.append("encodings=").append(Arrays.toString(device.getEncodings()));
+ info.append(", ");
+ }
+ if (device.getSampleRates().length > 0) {
+ info.append("sample rates=").append(Arrays.toString(device.getSampleRates()));
+ info.append(", ");
+ }
+ info.append("id=").append(device.getId());
+ Logging.d(tag, info.toString());
+ }
+ }
+
+ // Converts media.AudioManager modes into local string representation.
+ static String modeToString(int mode) {
+ switch (mode) {
+ case MODE_IN_CALL:
+ return "MODE_IN_CALL";
+ case MODE_IN_COMMUNICATION:
+ return "MODE_IN_COMMUNICATION";
+ case MODE_NORMAL:
+ return "MODE_NORMAL";
+ case MODE_RINGTONE:
+ return "MODE_RINGTONE";
+ default:
+ return "MODE_INVALID";
+ }
+ }
+
+ private static String streamTypeToString(int stream) {
+ switch (stream) {
+ case AudioManager.STREAM_VOICE_CALL:
+ return "STREAM_VOICE_CALL";
+ case AudioManager.STREAM_MUSIC:
+ return "STREAM_MUSIC";
+ case AudioManager.STREAM_RING:
+ return "STREAM_RING";
+ case AudioManager.STREAM_ALARM:
+ return "STREAM_ALARM";
+ case AudioManager.STREAM_NOTIFICATION:
+ return "STREAM_NOTIFICATION";
+ case AudioManager.STREAM_SYSTEM:
+ return "STREAM_SYSTEM";
+ default:
+ return "STREAM_INVALID";
+ }
+ }
+
+ // Converts AudioDeviceInfo types to local string representation.
+ private static String deviceTypeToString(int type) {
+ switch (type) {
+ case AudioDeviceInfo.TYPE_UNKNOWN:
+ return "TYPE_UNKNOWN";
+ case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
+ return "TYPE_BUILTIN_EARPIECE";
+ case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
+ return "TYPE_BUILTIN_SPEAKER";
+ case AudioDeviceInfo.TYPE_WIRED_HEADSET:
+ return "TYPE_WIRED_HEADSET";
+ case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
+ return "TYPE_WIRED_HEADPHONES";
+ case AudioDeviceInfo.TYPE_LINE_ANALOG:
+ return "TYPE_LINE_ANALOG";
+ case AudioDeviceInfo.TYPE_LINE_DIGITAL:
+ return "TYPE_LINE_DIGITAL";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
+ return "TYPE_BLUETOOTH_SCO";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
+ return "TYPE_BLUETOOTH_A2DP";
+ case AudioDeviceInfo.TYPE_HDMI:
+ return "TYPE_HDMI";
+ case AudioDeviceInfo.TYPE_HDMI_ARC:
+ return "TYPE_HDMI_ARC";
+ case AudioDeviceInfo.TYPE_USB_DEVICE:
+ return "TYPE_USB_DEVICE";
+ case AudioDeviceInfo.TYPE_USB_ACCESSORY:
+ return "TYPE_USB_ACCESSORY";
+ case AudioDeviceInfo.TYPE_DOCK:
+ return "TYPE_DOCK";
+ case AudioDeviceInfo.TYPE_FM:
+ return "TYPE_FM";
+ case AudioDeviceInfo.TYPE_BUILTIN_MIC:
+ return "TYPE_BUILTIN_MIC";
+ case AudioDeviceInfo.TYPE_FM_TUNER:
+ return "TYPE_FM_TUNER";
+ case AudioDeviceInfo.TYPE_TV_TUNER:
+ return "TYPE_TV_TUNER";
+ case AudioDeviceInfo.TYPE_TELEPHONY:
+ return "TYPE_TELEPHONY";
+ case AudioDeviceInfo.TYPE_AUX_LINE:
+ return "TYPE_AUX_LINE";
+ case AudioDeviceInfo.TYPE_IP:
+ return "TYPE_IP";
+ case AudioDeviceInfo.TYPE_BUS:
+ return "TYPE_BUS";
+ case AudioDeviceInfo.TYPE_USB_HEADSET:
+ return "TYPE_USB_HEADSET";
+ default:
+ return "TYPE_UNKNOWN";
+ }
+ }
+
+ // Returns true if the device can record audio via a microphone.
+ private static boolean hasMicrophone() {
+ return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_MICROPHONE);
+ }
+}
diff --git a/sdk/android/src/jni/audio_device/DEPS b/sdk/android/src/jni/audio_device/DEPS
new file mode 100644
index 0000000000..4abbc4258d
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+base/android/jni_android.h",
+ "+modules/audio_device",
+ # TODO(bugs.webrtc.org/8689): Remove this dependency and use jni generation instead.
+ "+modules/utility/include/helpers_android.h",
+]
diff --git a/sdk/android/src/jni/audio_device/aaudio_player.cc b/sdk/android/src/jni/audio_device/aaudio_player.cc
new file mode 100644
index 0000000000..0e6b7be4db
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_player.cc
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/aaudio_player.h"
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageOutputStreamDisconnected,
+};
+
+AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
+ : main_thread_(rtc::Thread::Current()),
+ aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
+ RTC_LOG(INFO) << "ctor";
+ thread_checker_aaudio_.DetachFromThread();
+}
+
+AAudioPlayer::~AAudioPlayer() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ Terminate();
+ RTC_LOG(INFO) << "#detected underruns: " << underrun_count_;
+}
+
+int AAudioPlayer::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
+ return 0;
+}
+
+int AAudioPlayer::Terminate() {
+ RTC_LOG(INFO) << "Terminate";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ StopPlayout();
+ return 0;
+}
+
+int AAudioPlayer::InitPlayout() {
+ RTC_LOG(INFO) << "InitPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!aaudio_.Init()) {
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+bool AAudioPlayer::PlayoutIsInitialized() const {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ return initialized_;
+}
+
+int AAudioPlayer::StartPlayout() {
+ RTC_LOG(INFO) << "StartPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DCHECK(!playing_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ return 0;
+ }
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ if (!aaudio_.Start()) {
+ return -1;
+ }
+ underrun_count_ = aaudio_.xrun_count();
+ first_data_callback_ = true;
+ playing_ = true;
+ return 0;
+}
+
+int AAudioPlayer::StopPlayout() {
+ RTC_LOG(INFO) << "StopPlayout";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ if (!aaudio_.Stop()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+ thread_checker_aaudio_.DetachFromThread();
+ initialized_ = false;
+ playing_ = false;
+ return 0;
+}
+
+bool AAudioPlayer::Playing() const {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ return playing_;
+}
+
+void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_DLOG(INFO) << "AttachAudioBuffer";
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ audio_device_buffer_ = audioBuffer;
+ const AudioParameters audio_parameters = aaudio_.audio_parameters();
+ audio_device_buffer_->SetPlayoutSampleRate(audio_parameters.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(audio_parameters.channels());
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the optimal buffer
+ // size per callback used by AAudio. Use an initial capacity of 50ms to ensure
+ // that the buffer can cache old data and at the same time be prepared for
+ // increased burst size in AAudio if buffer underruns are detected.
+ const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
+ fine_audio_buffer_.reset(new FineAudioBuffer(
+ audio_device_buffer_, audio_parameters.sample_rate(), capacity));
+}
+
+int AAudioPlayer::SpeakerVolumeIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+void AAudioPlayer::OnErrorCallback(aaudio_result_t error) {
+ RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+ // TODO(henrika): investigate if we can use a thread checker here. Initial
+ // tests shows that this callback can sometimes be called on a unique thread
+ // but according to the documentation it should be on the same thread as the
+ // data callback.
+ // RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+ if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // The stream is disconnected and any attempt to use it will return
+ // AAUDIO_ERROR_DISCONNECTED.
+ RTC_LOG(WARNING) << "Output stream disconnected";
+ // AAudio documentation states: "You should not close or reopen the stream
+ // from the callback, use another thread instead". A message is therefore
+ // sent to the main thread to do the restart operation.
+ RTC_DCHECK(main_thread_);
+ main_thread_->Post(RTC_FROM_HERE, this, kMessageOutputStreamDisconnected);
+ }
+}
+
+aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
+ int32_t num_frames) {
+ RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+ // Log device id in first data callback to ensure that a valid device is
+ // utilized.
+ if (first_data_callback_) {
+ RTC_LOG(INFO) << "--- First output data callback: "
+ << "device id=" << aaudio_.device_id();
+ first_data_callback_ = false;
+ }
+
+ // Check if the underrun count has increased. If it has, increase the buffer
+ // size by adding the size of a burst. It will reduce the risk of underruns
+ // at the expense of an increased latency.
+ // TODO(henrika): enable possibility to disable and/or tune the algorithm.
+ const int32_t underrun_count = aaudio_.xrun_count();
+ if (underrun_count > underrun_count_) {
+ RTC_LOG(LS_ERROR) << "Underrun detected: " << underrun_count;
+ underrun_count_ = underrun_count;
+ aaudio_.IncreaseOutputBufferSize();
+ }
+
+ // Estimate latency between writing an audio frame to the output stream and
+ // the time that same frame is played out on the output audio device.
+ latency_millis_ = aaudio_.EstimateLatencyMillis();
+ // TODO(henrika): use for development only.
+ if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
+ RTC_DLOG(INFO) << "output latency: " << latency_millis_
+ << ", num_frames: " << num_frames;
+ }
+
+ // Read audio data from the WebRTC source using the FineAudioBuffer object
+ // and write that data into |audio_data| to be played out by AAudio.
+ const size_t num_bytes =
+ sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+ // Prime output with zeros during a short initial phase to avoid distortion.
+ // TODO(henrika): do more work to figure out of if the initial forced silence
+ // period is really needed.
+ if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
+ memset(audio_data, 0, num_bytes);
+ } else {
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::ArrayView(static_cast(audio_data), num_bytes),
+ static_cast(latency_millis_ + 0.5));
+ }
+
+ // TODO(henrika): possibly add trace here to be included in systrace.
+ // See https://developer.android.com/studio/profile/systrace-commandline.html.
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioPlayer::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ switch (msg->message_id) {
+ case kMessageOutputStreamDisconnected:
+ HandleStreamDisconnected();
+ break;
+ }
+}
+
+void AAudioPlayer::HandleStreamDisconnected() {
+ RTC_DCHECK_RUN_ON(&main_thread_checker_);
+ RTC_DLOG(INFO) << "HandleStreamDisconnected";
+ if (!initialized_ || !playing_) {
+ return;
+ }
+ // Perform a restart by first closing the disconnected stream and then start
+ // a new stream; this time using the new (preferred) audio output device.
+ audio_device_buffer_->NativeAudioPlayoutInterrupted();
+ StopPlayout();
+ InitPlayout();
+ StartPlayout();
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/aaudio_player.h b/sdk/android/src/jni/audio_device/aaudio_player.h
new file mode 100644
index 0000000000..ce99797e85
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_player.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_PLAYER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_PLAYER_H_
+
+#include
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/aaudio_wrapper.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace android_adm {
+
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio output support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread. Audio buffers
+// are requested on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitPlayout() after StopPlayout()
+// to be able to call StartPlayout() again. This is in line with how the Java-
+// based implementation works.
+//
+// An audio stream can be disconnected, e.g. when an audio device is removed.
+// This implementation will restart the audio stream using the new preferred
+// device if such an event happens.
+//
+// Also supports automatic buffer-size adjustment based on underrun detections
+// where the internal AAudio buffer can be increased when needed. It will
+// reduce the risk of underruns (~glitches) at the expense of an increased
+// latency.
+class AAudioPlayer final : public AAudioObserverInterface,
+ public rtc::MessageHandler {
+ public:
+ explicit AAudioPlayer(AudioManager* audio_manager);
+ ~AAudioPlayer();
+
+ int Init();
+ int Terminate();
+
+ int InitPlayout();
+ bool PlayoutIsInitialized() const;
+
+ int StartPlayout();
+ int StopPlayout();
+ bool Playing() const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ // Not implemented in AAudio.
+ int SpeakerVolumeIsAvailable(bool* available); // NOLINT
+ int SetSpeakerVolume(uint32_t volume) { return -1; }
+ int SpeakerVolume(uint32_t* volume) const { return -1; } // NOLINT
+ int MaxSpeakerVolume(uint32_t* maxVolume) const { return -1; } // NOLINT
+ int MinSpeakerVolume(uint32_t* minVolume) const { return -1; } // NOLINT
+
+ protected:
+ // AAudioObserverInterface implementation.
+
+ // For an output stream, this function should render and write |num_frames|
+ // of data in the streams current data format to the |audio_data| buffer.
+ // Called on a real-time thread owned by AAudio.
+ aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) override;
+ // AAudio calls this functions if any error occurs on a callback thread.
+ // Called on a real-time thread owned by AAudio.
+ void OnErrorCallback(aaudio_result_t error) override;
+
+ // rtc::MessageHandler used for restart messages from the error-callback
+ // thread to the main (creating) thread.
+ void OnMessage(rtc::Message* msg) override;
+
+ private:
+ // Closes the existing stream and starts a new stream.
+ void HandleStreamDisconnected();
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker main_thread_checker_;
+
+ // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+ // real-time thread owned by AAudio. Detached during construction of this
+ // object.
+ rtc::ThreadChecker thread_checker_aaudio_;
+
+ // The thread on which this object is created on.
+ rtc::Thread* main_thread_;
+
+ // Wraps all AAudio resources. Contains an output stream using the default
+ // output audio device. Can be accessed on both the main thread and the
+ // real-time thread owned by AAudio. See separate AAudio documentation about
+ // thread safety.
+ AAudioWrapper aaudio_;
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples.
+ // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but AAudio asks for 192
+ // in each callback (once every 4th ms). This class can then ask for 192 and
+ // the FineAudioBuffer will ask WebRTC for new data approximately only every
+ // second callback and also cache non-utilized audio.
+ std::unique_ptr fine_audio_buffer_;
+
+ // Counts number of detected underrun events reported by AAudio.
+ int32_t underrun_count_ = 0;
+
+ // True only for the first data callback in each audio session.
+ bool first_data_callback_ = true;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and set by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_ RTC_GUARDED_BY(main_thread_checker_) =
+ nullptr;
+
+ bool initialized_ RTC_GUARDED_BY(main_thread_checker_) = false;
+ bool playing_ RTC_GUARDED_BY(main_thread_checker_) = false;
+
+ // Estimated latency between writing an audio frame to the output stream and
+ // the time that same frame is played out on the output audio device.
+ double latency_millis_ RTC_GUARDED_BY(thread_checker_aaudio_) = 0;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_PLAYER_H_
diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.cc b/sdk/android/src/jni/audio_device/aaudio_recorder.cc
new file mode 100644
index 0000000000..e940d3272c
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_recorder.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageInputStreamDisconnected,
+};
+
+AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
+ : main_thread_(rtc::Thread::Current()),
+ aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
+ RTC_LOG(INFO) << "ctor";
+ thread_checker_aaudio_.DetachFromThread();
+}
+
+AAudioRecorder::~AAudioRecorder() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+ RTC_LOG(INFO) << "detected owerflows: " << overflow_count_;
+}
+
+int AAudioRecorder::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
+ return 0;
+}
+
+int AAudioRecorder::Terminate() {
+ RTC_LOG(INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ StopRecording();
+ return 0;
+}
+
+int AAudioRecorder::InitRecording() {
+ RTC_LOG(INFO) << "InitRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ if (!aaudio_.Init()) {
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+int AAudioRecorder::StartRecording() {
+ RTC_LOG(INFO) << "StartRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!recording_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ if (!aaudio_.Start()) {
+ return -1;
+ }
+ overflow_count_ = aaudio_.xrun_count();
+ first_data_callback_ = true;
+ recording_ = true;
+ return 0;
+}
+
+int AAudioRecorder::StopRecording() {
+ RTC_LOG(INFO) << "StopRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ if (!aaudio_.Stop()) {
+ return -1;
+ }
+ thread_checker_aaudio_.DetachFromThread();
+ initialized_ = false;
+ recording_ = false;
+ return 0;
+}
+
+void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
+ const AudioParameters audio_parameters = aaudio_.audio_parameters();
+ audio_device_buffer_->SetRecordingSampleRate(audio_parameters.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(audio_parameters.channels());
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to deliver any number
+ // of samples (and not only multiples of 10ms which WebRTC uses) to match the
+ // native AAudio buffer size.
+ const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
+ fine_audio_buffer_.reset(new FineAudioBuffer(
+ audio_device_buffer_, audio_parameters.sample_rate(), capacity));
+}
+
+int AAudioRecorder::EnableBuiltInAEC(bool enable) {
+ RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+int AAudioRecorder::EnableBuiltInAGC(bool enable) {
+ RTC_LOG(INFO) << "EnableBuiltInAGC: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+int AAudioRecorder::EnableBuiltInNS(bool enable) {
+ RTC_LOG(INFO) << "EnableBuiltInNS: " << enable;
+ RTC_LOG(LS_ERROR) << "Not implemented";
+ return -1;
+}
+
+void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
+ RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+ // RTC_DCHECK(thread_checker_aaudio_.CalledOnValidThread());
+ if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // The stream is disconnected and any attempt to use it will return
+ // AAUDIO_ERROR_DISCONNECTED..
+ RTC_LOG(WARNING) << "Input stream disconnected => restart is required";
+ // AAudio documentation states: "You should not close or reopen the stream
+ // from the callback, use another thread instead". A message is therefore
+ // sent to the main thread to do the restart operation.
+ RTC_DCHECK(main_thread_);
+ main_thread_->Post(RTC_FROM_HERE, this, kMessageInputStreamDisconnected);
+ }
+}
+
+// Read and process |num_frames| of data from the |audio_data| buffer.
+// TODO(henrika): possibly add trace here to be included in systrace.
+// See https://developer.android.com/studio/profile/systrace-commandline.html.
+aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
+ void* audio_data,
+ int32_t num_frames) {
+ // TODO(henrika): figure out why we sometimes hit this one.
+ // RTC_DCHECK(thread_checker_aaudio_.CalledOnValidThread());
+ // RTC_LOG(INFO) << "OnDataCallback: " << num_frames;
+ // Drain the input buffer at first callback to ensure that it does not
+ // contain any old data. Will also ensure that the lowest possible latency
+ // is obtained.
+ if (first_data_callback_) {
+ RTC_LOG(INFO) << "--- First input data callback: "
+ << "device id=" << aaudio_.device_id();
+ aaudio_.ClearInputStream(audio_data, num_frames);
+ first_data_callback_ = false;
+ }
+ // Check if the overflow counter has increased and if so log a warning.
+ // TODO(henrika): possible add UMA stat or capacity extension.
+ const int32_t overflow_count = aaudio_.xrun_count();
+ if (overflow_count > overflow_count_) {
+ RTC_LOG(LS_ERROR) << "Overflow detected: " << overflow_count;
+ overflow_count_ = overflow_count;
+ }
+ // Estimated time between an audio frame was recorded by the input device and
+ // it can read on the input stream.
+ latency_millis_ = aaudio_.EstimateLatencyMillis();
+ // TODO(henrika): use for development only.
+ if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
+ RTC_DLOG(INFO) << "input latency: " << latency_millis_
+ << ", num_frames: " << num_frames;
+ }
+ // Copy recorded audio in |audio_data| to the WebRTC sink using the
+ // FineAudioBuffer object.
+ const size_t num_bytes =
+ sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::ArrayView(static_cast(audio_data),
+ num_bytes),
+ static_cast(latency_millis_ + 0.5));
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioRecorder::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ switch (msg->message_id) {
+ case kMessageInputStreamDisconnected:
+ HandleStreamDisconnected();
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Invalid message id: " << msg->message_id;
+ break;
+ }
+}
+
+void AAudioRecorder::HandleStreamDisconnected() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(INFO) << "HandleStreamDisconnected";
+ if (!initialized_ || !recording_) {
+ return;
+ }
+ // Perform a restart by first closing the disconnected stream and then start
+ // a new stream; this time using the new (preferred) audio input device.
+ // TODO(henrika): resolve issue where a one restart attempt leads to a long
+ // sequence of new calls to OnErrorCallback().
+ // See b/73148976 for details.
+ audio_device_buffer_->NativeAudioRecordingInterrupted();
+ StopRecording();
+ InitRecording();
+ StartRecording();
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.h b/sdk/android/src/jni/audio_device/aaudio_recorder.h
new file mode 100644
index 0000000000..9924b75ff9
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_recorder.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_RECORDER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_RECORDER_H_
+
+#include
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/aaudio_wrapper.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+class AudioDeviceBuffer;
+
+namespace android_adm {
+
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio input support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Audio buffers
+// are delivered on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is in line
+// with how the Java- based implementation works.
+//
+// TODO(henrika): add comments about device changes and adaptive buffer
+// management.
+class AAudioRecorder : public AAudioObserverInterface,
+ public rtc::MessageHandler {
+ public:
+ explicit AAudioRecorder(AudioManager* audio_manager);
+ ~AAudioRecorder();
+
+ int Init();
+ int Terminate();
+
+ int InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int StartRecording();
+ int StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ double latency_millis() const { return latency_millis_; }
+
+ // TODO(henrika): add support using AAudio APIs when available.
+ int EnableBuiltInAEC(bool enable);
+ int EnableBuiltInAGC(bool enable);
+ int EnableBuiltInNS(bool enable);
+
+ protected:
+ // AAudioObserverInterface implementation.
+
+ // For an input stream, this function should read |num_frames| of recorded
+ // data, in the stream's current data format, from the |audio_data| buffer.
+ // Called on a real-time thread owned by AAudio.
+ aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) override;
+
+ // AAudio calls this function if any error occurs on a callback thread.
+ // Called on a real-time thread owned by AAudio.
+ void OnErrorCallback(aaudio_result_t error) override;
+
+ // rtc::MessageHandler used for restart messages.
+ void OnMessage(rtc::Message* msg) override;
+
+ private:
+ // Closes the existing stream and starts a new stream.
+ void HandleStreamDisconnected();
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+ // real-time thread owned by AAudio. Detached during construction of this
+ // object.
+ rtc::ThreadChecker thread_checker_aaudio_;
+
+ // The thread on which this object is created on.
+ rtc::Thread* main_thread_;
+
+ // Wraps all AAudio resources. Contains an input stream using the default
+ // input audio device.
+ AAudioWrapper aaudio_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+
+ bool initialized_ = false;
+ bool recording_ = false;
+
+ // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+ // chunks of audio.
+ std::unique_ptr fine_audio_buffer_;
+
+ // Counts number of detected overflow events reported by AAudio.
+ int32_t overflow_count_ = 0;
+
+ // Estimated time between an audio frame was recorded by the input device and
+ // it can read on the input stream.
+ double latency_millis_ = 0;
+
+ // True only for the first data callback in each audio session.
+ bool first_data_callback_ = true;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_RECORDER_H_
diff --git a/sdk/android/src/jni/audio_device/aaudio_wrapper.cc b/sdk/android/src/jni/audio_device/aaudio_wrapper.cc
new file mode 100644
index 0000000000..d8694b7ae0
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_wrapper.cc
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/aaudio_wrapper.h"
+
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+#define LOG_ON_ERROR(op) \
+ do { \
+ aaudio_result_t result = (op); \
+ if (result != AAUDIO_OK) { \
+ RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+ } \
+ } while (0)
+
+#define RETURN_ON_ERROR(op, ...) \
+ do { \
+ aaudio_result_t result = (op); \
+ if (result != AAUDIO_OK) { \
+ RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+namespace android_adm {
+
+namespace {
+
+const char* DirectionToString(aaudio_direction_t direction) {
+ switch (direction) {
+ case AAUDIO_DIRECTION_OUTPUT:
+ return "OUTPUT";
+ case AAUDIO_DIRECTION_INPUT:
+ return "INPUT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* SharingModeToString(aaudio_sharing_mode_t mode) {
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ return "EXCLUSIVE";
+ case AAUDIO_SHARING_MODE_SHARED:
+ return "SHARED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* PerformanceModeToString(aaudio_performance_mode_t mode) {
+ switch (mode) {
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ return "NONE";
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ return "POWER_SAVING";
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ return "LOW_LATENCY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* FormatToString(int32_t id) {
+ switch (id) {
+ case AAUDIO_FORMAT_INVALID:
+ return "INVALID";
+ case AAUDIO_FORMAT_UNSPECIFIED:
+ return "UNSPECIFIED";
+ case AAUDIO_FORMAT_PCM_I16:
+ return "PCM_I16";
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ return "FLOAT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void ErrorCallback(AAudioStream* stream,
+ void* user_data,
+ aaudio_result_t error) {
+ RTC_DCHECK(user_data);
+ AAudioWrapper* aaudio_wrapper = reinterpret_cast(user_data);
+ RTC_LOG(WARNING) << "ErrorCallback: "
+ << DirectionToString(aaudio_wrapper->direction());
+ RTC_DCHECK(aaudio_wrapper->observer());
+ aaudio_wrapper->observer()->OnErrorCallback(error);
+}
+
+aaudio_data_callback_result_t DataCallback(AAudioStream* stream,
+ void* user_data,
+ void* audio_data,
+ int32_t num_frames) {
+ RTC_DCHECK(user_data);
+ RTC_DCHECK(audio_data);
+ AAudioWrapper* aaudio_wrapper = reinterpret_cast(user_data);
+ RTC_DCHECK(aaudio_wrapper->observer());
+ return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames);
+}
+
+// Wraps the stream builder object to ensure that it is released properly when
+// the stream builder goes out of scope.
+class ScopedStreamBuilder {
+ public:
+ ScopedStreamBuilder() {
+ LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_));
+ RTC_DCHECK(builder_);
+ }
+ ~ScopedStreamBuilder() {
+ if (builder_) {
+ LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_));
+ }
+ }
+
+ AAudioStreamBuilder* get() const { return builder_; }
+
+ private:
+ AAudioStreamBuilder* builder_ = nullptr;
+};
+
+} // namespace
+
+AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
+ aaudio_direction_t direction,
+ AAudioObserverInterface* observer)
+ : direction_(direction), observer_(observer) {
+ RTC_LOG(INFO) << "ctor";
+ RTC_DCHECK(observer_);
+ direction_ == AAUDIO_DIRECTION_OUTPUT
+ ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
+ : audio_parameters_ = audio_manager->GetRecordAudioParameters();
+ aaudio_thread_checker_.DetachFromThread();
+ RTC_LOG(INFO) << audio_parameters_.ToString();
+}
+
+AAudioWrapper::~AAudioWrapper() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!stream_);
+}
+
+bool AAudioWrapper::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Creates a stream builder which can be used to open an audio stream.
+ ScopedStreamBuilder builder;
+ // Configures the stream builder using audio parameters given at construction.
+ SetStreamConfiguration(builder.get());
+ // Opens a stream based on options in the stream builder.
+ if (!OpenStream(builder.get())) {
+ return false;
+ }
+ // Ensures that the opened stream could activate the requested settings.
+ if (!VerifyStreamConfiguration()) {
+ return false;
+ }
+ // Optimizes the buffer scheme for lowest possible latency and creates
+ // additional buffer logic to match the 10ms buffer size used in WebRTC.
+ if (!OptimizeBuffers()) {
+ return false;
+ }
+ LogStreamState();
+ return true;
+}
+
+bool AAudioWrapper::Start() {
+ RTC_LOG(INFO) << "Start";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // TODO(henrika): this state check might not be needed.
+ aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
+ if (current_state != AAUDIO_STREAM_STATE_OPEN) {
+ RTC_LOG(LS_ERROR) << "Invalid state: "
+ << AAudio_convertStreamStateToText(current_state);
+ return false;
+ }
+ // Asynchronous request for the stream to start.
+ RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false);
+ LogStreamState();
+ return true;
+}
+
+bool AAudioWrapper::Stop() {
+ RTC_LOG(INFO) << "Stop: " << DirectionToString(direction());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Asynchronous request for the stream to stop.
+ RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
+ CloseStream();
+ aaudio_thread_checker_.DetachFromThread();
+ return true;
+}
+
+double AAudioWrapper::EstimateLatencyMillis() const {
+ RTC_DCHECK(stream_);
+ double latency_millis = 0.0;
+ if (direction() == AAUDIO_DIRECTION_INPUT) {
+ // For input streams. Best guess we can do is to use the current burst size
+ // as delay estimate.
+ latency_millis = static_cast(frames_per_burst()) / sample_rate() *
+ rtc::kNumMillisecsPerSec;
+ } else {
+ int64_t existing_frame_index;
+ int64_t existing_frame_presentation_time;
+ // Get the time at which a particular frame was presented to audio hardware.
+ aaudio_result_t result = AAudioStream_getTimestamp(
+ stream_, CLOCK_MONOTONIC, &existing_frame_index,
+ &existing_frame_presentation_time);
+ // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+ if (result == AAUDIO_OK) {
+ // Get write index for next audio frame.
+ int64_t next_frame_index = frames_written();
+ // Number of frames between next frame and the existing frame.
+ int64_t frame_index_delta = next_frame_index - existing_frame_index;
+ // Assume the next frame will be written now.
+ int64_t next_frame_write_time = rtc::TimeNanos();
+ // Calculate time when next frame will be presented to the hardware taking
+ // sample rate into account.
+ int64_t frame_time_delta =
+ (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
+ int64_t next_frame_presentation_time =
+ existing_frame_presentation_time + frame_time_delta;
+ // Derive a latency estimate given results above.
+ latency_millis = static_cast(next_frame_presentation_time -
+ next_frame_write_time) /
+ rtc::kNumNanosecsPerMillisec;
+ }
+ }
+ return latency_millis;
+}
+
+// Returns new buffer size or a negative error value if buffer size could not
+// be increased.
+bool AAudioWrapper::IncreaseOutputBufferSize() {
+ RTC_LOG(INFO) << "IncreaseBufferSize";
+ RTC_DCHECK(stream_);
+ RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
+ aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+ // Try to increase size of buffer with one burst to reduce risk of underrun.
+ buffer_size += frames_per_burst();
+ // Verify that the new buffer size is not larger than max capacity.
+ // TODO(henrika): keep track of case when we reach the capacity limit.
+ const int32_t max_buffer_size = buffer_capacity_in_frames();
+ if (buffer_size > max_buffer_size) {
+ RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
+ << ") is higher than max: " << max_buffer_size;
+ return false;
+ }
+ RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size
+ << " (max=" << max_buffer_size << ")";
+ buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
+ if (buffer_size < 0) {
+ RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
+ << AAudio_convertResultToText(buffer_size);
+ return false;
+ }
+ RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size;
+ return true;
+}
+
+void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
+ RTC_LOG(INFO) << "ClearInputStream";
+ RTC_DCHECK(stream_);
+ RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
+ aaudio_result_t cleared_frames = 0;
+ do {
+ cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
+ } while (cleared_frames > 0);
+}
+
+AAudioObserverInterface* AAudioWrapper::observer() const {
+ return observer_;
+}
+
+AudioParameters AAudioWrapper::audio_parameters() const {
+ return audio_parameters_;
+}
+
+int32_t AAudioWrapper::samples_per_frame() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSamplesPerFrame(stream_);
+}
+
+int32_t AAudioWrapper::buffer_size_in_frames() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getBufferSizeInFrames(stream_);
+}
+
+int32_t AAudioWrapper::buffer_capacity_in_frames() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getBufferCapacityInFrames(stream_);
+}
+
+int32_t AAudioWrapper::device_id() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getDeviceId(stream_);
+}
+
+int32_t AAudioWrapper::xrun_count() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getXRunCount(stream_);
+}
+
+int32_t AAudioWrapper::format() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFormat(stream_);
+}
+
+int32_t AAudioWrapper::sample_rate() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSampleRate(stream_);
+}
+
+int32_t AAudioWrapper::channel_count() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getChannelCount(stream_);
+}
+
+int32_t AAudioWrapper::frames_per_callback() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesPerDataCallback(stream_);
+}
+
+aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getSharingMode(stream_);
+}
+
+aaudio_performance_mode_t AAudioWrapper::performance_mode() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getPerformanceMode(stream_);
+}
+
+aaudio_stream_state_t AAudioWrapper::stream_state() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getState(stream_);
+}
+
+int64_t AAudioWrapper::frames_written() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesWritten(stream_);
+}
+
+int64_t AAudioWrapper::frames_read() const {
+ RTC_DCHECK(stream_);
+ return AAudioStream_getFramesRead(stream_);
+}
+
+void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
+ RTC_LOG(INFO) << "SetStreamConfiguration";
+ RTC_DCHECK(builder);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Request usage of default primary output/input device.
+ // TODO(henrika): verify that default device follows Java APIs.
+ // https://developer.android.com/reference/android/media/AudioDeviceInfo.html.
+ AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED);
+ // Use preferred sample rate given by the audio parameters.
+ AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate());
+ // Use preferred channel configuration given by the audio parameters.
+ AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels());
+ // Always use 16-bit PCM audio sample format.
+ AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16);
+ // TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE.
+ // Ask for exclusive mode since this will give us the lowest possible latency.
+ // If exclusive mode isn't available, shared mode will be used instead.
+ AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED);
+ // Use the direction that was given at construction.
+ AAudioStreamBuilder_setDirection(builder, direction_);
+ // TODO(henrika): investigate performance using different performance modes.
+ AAudioStreamBuilder_setPerformanceMode(builder,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ // Given that WebRTC applications require low latency, our audio stream uses
+ // an asynchronous callback function to transfer data to and from the
+ // application. AAudio executes the callback in a higher-priority thread that
+ // has better performance.
+ AAudioStreamBuilder_setDataCallback(builder, DataCallback, this);
+ // Request that AAudio calls this functions if any error occurs on a callback
+ // thread.
+ AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this);
+}
+
+bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
+ RTC_LOG(INFO) << "OpenStream";
+ RTC_DCHECK(builder);
+ AAudioStream* stream = nullptr;
+ RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
+ stream_ = stream;
+ LogStreamConfiguration();
+ return true;
+}
+
+void AAudioWrapper::CloseStream() {
+ RTC_LOG(INFO) << "CloseStream";
+ RTC_DCHECK(stream_);
+ LOG_ON_ERROR(AAudioStream_close(stream_));
+ stream_ = nullptr;
+}
+
+void AAudioWrapper::LogStreamConfiguration() {
+ RTC_DCHECK(stream_);
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss << "Stream Configuration: ";
+ ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
+ ss << ", samples per frame=" << samples_per_frame();
+ ss << ", format=" << FormatToString(format());
+ ss << ", sharing mode=" << SharingModeToString(sharing_mode());
+ ss << ", performance mode=" << PerformanceModeToString(performance_mode());
+ ss << ", direction=" << DirectionToString(direction());
+ ss << ", device id=" << AAudioStream_getDeviceId(stream_);
+ ss << ", frames per callback=" << frames_per_callback();
+ RTC_LOG(INFO) << ss.str();
+}
+
+void AAudioWrapper::LogStreamState() {
+ RTC_LOG(INFO) << "AAudio stream state: "
+ << AAudio_convertStreamStateToText(stream_state());
+}
+
+bool AAudioWrapper::VerifyStreamConfiguration() {
+ RTC_LOG(INFO) << "VerifyStreamConfiguration";
+ RTC_DCHECK(stream_);
+ // TODO(henrika): should we verify device ID as well?
+ if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
+ return false;
+ }
+ if (AAudioStream_getChannelCount(stream_) !=
+ static_cast(audio_parameters().channels())) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
+ return false;
+ }
+ if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
+ return false;
+ }
+ if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
+ return false;
+ }
+ if (AAudioStream_getPerformanceMode(stream_) !=
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
+ RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
+ return false;
+ }
+ if (AAudioStream_getDirection(stream_) != direction()) {
+ RTC_LOG(LS_ERROR) << "Stream direction could not be set";
+ return false;
+ }
+ if (AAudioStream_getSamplesPerFrame(stream_) !=
+ static_cast(audio_parameters().channels())) {
+ RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
+ return false;
+ }
+ return true;
+}
+
+bool AAudioWrapper::OptimizeBuffers() {
+ RTC_LOG(INFO) << "OptimizeBuffers";
+ RTC_DCHECK(stream_);
+ // Maximum number of frames that can be filled without blocking.
+ RTC_LOG(INFO) << "max buffer capacity in frames: "
+ << buffer_capacity_in_frames();
+ // Query the number of frames that the application should read or write at
+ // one time for optimal performance.
+ int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
+ RTC_LOG(INFO) << "frames per burst for optimal performance: "
+ << frames_per_burst;
+ frames_per_burst_ = frames_per_burst;
+ if (direction() == AAUDIO_DIRECTION_INPUT) {
+ // There is no point in calling setBufferSizeInFrames() for input streams
+ // since it has no effect on the performance (latency in this case).
+ return true;
+ }
+ // Set buffer size to same as burst size to guarantee lowest possible latency.
+ // This size might change for output streams if underruns are detected and
+ // automatic buffer adjustment is enabled.
+ AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
+ int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+ if (buffer_size != frames_per_burst) {
+ RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
+ return false;
+ }
+ // Maximum number of frames that can be filled without blocking.
+ RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size;
+ return true;
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/aaudio_wrapper.h b/sdk/android/src/jni/audio_device/aaudio_wrapper.h
new file mode 100644
index 0000000000..8cd999bad9
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/aaudio_wrapper.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_WRAPPER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_WRAPPER_H_
+
+#include
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+class AudioManager;
+
+// AAudio callback interface for audio transport to/from the AAudio stream.
+// The interface also contains an error callback method for notifications of
+// e.g. device changes.
+class AAudioObserverInterface {
+ public:
+ // Audio data will be passed in our out of this function dependning on the
+ // direction of the audio stream. This callback function will be called on a
+ // real-time thread owned by AAudio.
+ virtual aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+ int32_t num_frames) = 0;
+ // AAudio will call this functions if any error occurs on a callback thread.
+ // In response, this function could signal or launch another thread to reopen
+ // a stream on another device. Do not reopen the stream in this callback.
+ virtual void OnErrorCallback(aaudio_result_t error) = 0;
+
+ protected:
+ virtual ~AAudioObserverInterface() {}
+};
+
+// Utility class which wraps the C-based AAudio API into a more handy C++ class
+// where the underlying resources (AAudioStreamBuilder and AAudioStream) are
+// encapsulated. User must set the direction (in or out) at construction since
+// it defines the stream type and the direction of the data flow in the
+// AAudioObserverInterface.
+//
+// AAudio is a new Android C API introduced in the Android O (26) release.
+// It is designed for high-performance audio applications that require low
+// latency. Applications communicate with AAudio by reading and writing data
+// to streams.
+//
+// Each stream is attached to a single audio device, where each audio device
+// has a unique ID. The ID can be used to bind an audio stream to a specific
+// audio device but this implementation lets AAudio choose the default primary
+// device instead (device selection takes place in Java). A stream can only
+// move data in one direction. When a stream is opened, Android checks to
+// ensure that the audio device and stream direction agree.
+class AAudioWrapper {
+ public:
+ AAudioWrapper(AudioManager* audio_manager,
+ aaudio_direction_t direction,
+ AAudioObserverInterface* observer);
+ ~AAudioWrapper();
+
+ bool Init();
+ bool Start();
+ bool Stop();
+
+ // For output streams: estimates latency between writing an audio frame to
+ // the output stream and the time that same frame is played out on the output
+ // audio device.
+ // For input streams: estimates latency between reading an audio frame from
+ // the input stream and the time that same frame was recorded on the input
+ // audio device.
+ double EstimateLatencyMillis() const;
+
+ // Increases the internal buffer size for output streams by one burst size to
+ // reduce the risk of underruns. Can be used while a stream is active.
+ bool IncreaseOutputBufferSize();
+
+ // Drains the recording stream of any existing data by reading from it until
+ // it's empty. Can be used to clear out old data before starting a new audio
+ // session.
+ void ClearInputStream(void* audio_data, int32_t num_frames);
+
+ AAudioObserverInterface* observer() const;
+ AudioParameters audio_parameters() const;
+ int32_t samples_per_frame() const;
+ int32_t buffer_size_in_frames() const;
+ int32_t buffer_capacity_in_frames() const;
+ int32_t device_id() const;
+ int32_t xrun_count() const;
+ int32_t format() const;
+ int32_t sample_rate() const;
+ int32_t channel_count() const;
+ int32_t frames_per_callback() const;
+ aaudio_sharing_mode_t sharing_mode() const;
+ aaudio_performance_mode_t performance_mode() const;
+ aaudio_stream_state_t stream_state() const;
+ int64_t frames_written() const;
+ int64_t frames_read() const;
+ aaudio_direction_t direction() const { return direction_; }
+ AAudioStream* stream() const { return stream_; }
+ int32_t frames_per_burst() const { return frames_per_burst_; }
+
+ private:
+ void SetStreamConfiguration(AAudioStreamBuilder* builder);
+ bool OpenStream(AAudioStreamBuilder* builder);
+ void CloseStream();
+ void LogStreamConfiguration();
+ void LogStreamState();
+ bool VerifyStreamConfiguration();
+ bool OptimizeBuffers();
+
+ rtc::ThreadChecker thread_checker_;
+ rtc::ThreadChecker aaudio_thread_checker_;
+ AudioParameters audio_parameters_;
+ const aaudio_direction_t direction_;
+ AAudioObserverInterface* observer_ = nullptr;
+ AAudioStream* stream_ = nullptr;
+ int32_t frames_per_burst_ = 0;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_WRAPPER_H_
diff --git a/sdk/android/src/jni/audio_device/audio_common.h b/sdk/android/src/jni/audio_device/audio_common.h
new file mode 100644
index 0000000000..868af590bd
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_common.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_COMMON_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_COMMON_H_
+
+namespace webrtc {
+
+namespace android_adm {
+
+const int kDefaultSampleRate = 44100;
+// Delay estimates for the two different supported modes. These values are based
+// on real-time round-trip delay estimates on a large set of devices and they
+// are lower bounds since the filter length is 128 ms, so the AEC works for
+// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
+// cases, the lowest delay estimate will not be utilized since devices that
+// support low-latency output audio often supports HW AEC as well.
+const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
+const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_COMMON_H_
diff --git a/sdk/android/src/jni/audio_device/audio_device_template_android.h b/sdk/android/src/jni/audio_device/audio_device_template_android.h
new file mode 100644
index 0000000000..9e7c42e4a8
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_device_template_android.h
@@ -0,0 +1,670 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
+
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+#include "system_wrappers/include/metrics.h"
+
+#define CHECKinitialized_() \
+ { \
+ if (!initialized_) { \
+ return -1; \
+ } \
+ }
+
+#define CHECKinitialized__BOOL() \
+ { \
+ if (!initialized_) { \
+ return false; \
+ } \
+ }
+
+namespace webrtc {
+
+namespace android_adm {
+
+// InputType/OutputType can be any class that implements the capturing/rendering
+// part of the AudioDeviceGeneric API.
+// Construction and destruction must be done on one and the same thread. Each
+// internal implementation of InputType and OutputType will RTC_DCHECK if that
+// is not the case. All implemented methods must also be called on the same
+// thread. See comments in each InputType/OutputType class for more info.
+// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
+// and ClearAndroidAudioDeviceObjects) from a different thread but both will
+// RTC_CHECK that the calling thread is attached to a Java VM.
+
+template
+class AudioDeviceTemplateAndroid : public AudioDeviceModule {
+ public:
+ // For use with UMA logging. Must be kept in sync with histograms.xml in
+ // Chrome, located at
+ // https://cs.chromium.org/chromium/src/tools/metrics/histograms/histograms.xml
+ enum class InitStatus {
+ OK = 0,
+ PLAYOUT_ERROR = 1,
+ RECORDING_ERROR = 2,
+ OTHER_ERROR = 3,
+ NUM_STATUSES = 4
+ };
+
+ explicit AudioDeviceTemplateAndroid(AudioDeviceModule::AudioLayer audio_layer)
+ : audio_layer_(audio_layer), initialized_(false) {
+ RTC_LOG(INFO) << __FUNCTION__;
+ thread_checker_.DetachFromThread();
+ }
+
+ virtual ~AudioDeviceTemplateAndroid() { RTC_LOG(INFO) << __FUNCTION__; }
+
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer* audioLayer) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ *audioLayer = audio_layer_;
+ return 0;
+ }
+
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ return audio_device_buffer_->RegisterAudioCallback(audioCallback);
+ }
+
+ int32_t Init() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_manager_ = rtc::MakeUnique();
+ output_ = rtc::MakeUnique(audio_manager_.get());
+ input_ = rtc::MakeUnique(audio_manager_.get());
+ audio_manager_->SetActiveAudioLayer(audio_layer_);
+ audio_device_buffer_ = rtc::MakeUnique();
+ AttachAudioBuffer();
+ if (initialized_) {
+ return 0;
+ }
+ InitStatus status;
+ if (!audio_manager_->Init()) {
+ status = InitStatus::OTHER_ERROR;
+ } else if (output_->Init() != 0) {
+ audio_manager_->Close();
+ status = InitStatus::PLAYOUT_ERROR;
+ } else if (input_->Init() != 0) {
+ output_->Terminate();
+ audio_manager_->Close();
+ status = InitStatus::RECORDING_ERROR;
+ } else {
+ initialized_ = true;
+ status = InitStatus::OK;
+ }
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.InitializationResult",
+ static_cast(status),
+ static_cast(InitStatus::NUM_STATUSES));
+ if (status != InitStatus::OK) {
+ RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
+ return -1;
+ }
+ return 0;
+ }
+
+ int32_t Terminate() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ if (!initialized_)
+ return 0;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ int32_t err = input_->Terminate();
+ err |= output_->Terminate();
+ err |= !audio_manager_->Close();
+ initialized_ = false;
+ RTC_DCHECK_EQ(err, 0);
+ return err;
+ }
+
+ bool Initialized() const override {
+ RTC_LOG(INFO) << __FUNCTION__ << ":" << initialized_;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); // not done in _impl
+ return initialized_;
+ }
+
+ int16_t PlayoutDevices() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ RTC_LOG(INFO) << "output: " << 1;
+ return 1;
+ }
+
+ int16_t RecordingDevices() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ RTC_LOG(INFO) << "output: " << 1;
+ return 1;
+ }
+
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t SetPlayoutDevice(uint16_t index) override {
+ // OK to use but it has no effect currently since device selection is
+ // done using Andoid APIs instead.
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
+ return 0;
+ }
+
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t SetRecordingDevice(uint16_t index) override {
+ // OK to use but it has no effect currently since device selection is
+ // done using Andoid APIs instead.
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
+ return 0;
+ }
+
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t PlayoutIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ *available = true;
+ RTC_LOG(INFO) << "output: " << *available;
+ return 0;
+ }
+
+ int32_t InitPlayout() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (PlayoutIsInitialized()) {
+ return 0;
+ }
+ int32_t result = output_->InitPlayout();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ bool PlayoutIsInitialized() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return output_->PlayoutIsInitialized();
+ }
+
+ int32_t RecordingIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ *available = true;
+ RTC_LOG(INFO) << "output: " << *available;
+ return 0;
+ }
+
+ int32_t InitRecording() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (RecordingIsInitialized()) {
+ return 0;
+ }
+ int32_t result = input_->InitRecording();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ bool RecordingIsInitialized() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return input_->RecordingIsInitialized();
+ }
+
+ int32_t StartPlayout() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Playing()) {
+ return 0;
+ }
+ audio_device_buffer_->StartPlayout();
+ if (!audio_manager_->IsCommunicationModeEnabled()) {
+ RTC_LOG(WARNING)
+ << "The application should use MODE_IN_COMMUNICATION audio mode!";
+ }
+ int32_t result = output_->StartPlayout();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ int32_t StopPlayout() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ // Avoid using audio manger (JNI/Java cost) if playout was inactive.
+ if (!Playing())
+ return 0;
+ RTC_LOG(INFO) << __FUNCTION__;
+ audio_device_buffer_->StopPlayout();
+ int32_t result = output_->StopPlayout();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ bool Playing() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return output_->Playing();
+ }
+
+ int32_t StartRecording() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Recording()) {
+ return 0;
+ }
+ if (!audio_manager_->IsCommunicationModeEnabled()) {
+ RTC_LOG(WARNING)
+ << "The application should use MODE_IN_COMMUNICATION audio mode!";
+ }
+ audio_device_buffer_->StartRecording();
+ int32_t result = input_->StartRecording();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ int32_t StopRecording() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ // Avoid using audio manger (JNI/Java cost) if recording was inactive.
+ if (!Recording())
+ return 0;
+ audio_device_buffer_->StopRecording();
+ int32_t result = input_->StopRecording();
+ RTC_LOG(INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
+ static_cast(result == 0));
+ return result;
+ }
+
+ bool Recording() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return input_->Recording();
+ }
+
+ int32_t InitSpeaker() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return 0;
+ }
+
+ bool SpeakerIsInitialized() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ RTC_LOG(INFO) << "output: " << true;
+ return true;
+ }
+
+ int32_t InitMicrophone() override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return 0;
+ }
+
+ bool MicrophoneIsInitialized() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ RTC_LOG(INFO) << "output: " << true;
+ return true;
+ }
+
+ int32_t SpeakerVolumeIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (output_->SpeakerVolumeIsAvailable(available) == -1) {
+ return -1;
+ }
+ RTC_LOG(INFO) << "output: " << *available;
+ return 0;
+ }
+
+ int32_t SetSpeakerVolume(uint32_t volume) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return output_->SetSpeakerVolume(volume);
+ }
+
+ int32_t SpeakerVolume(uint32_t* volume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (output_->SpeakerVolume(volume) == -1) {
+ return -1;
+ }
+ RTC_LOG(INFO) << "output: " << *volume;
+ return 0;
+ }
+
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (output_->MaxSpeakerVolume(maxVolume) == -1) {
+ return -1;
+ }
+ return 0;
+ }
+
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (output_->MinSpeakerVolume(minVolume) == -1) {
+ return -1;
+ }
+ return 0;
+ }
+
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ *available = false;
+ RTC_LOG(INFO) << "output: " << *available;
+ return -1;
+ }
+
+ int32_t SetMicrophoneVolume(uint32_t volume) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")";
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t MicrophoneVolume(uint32_t* volume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t SpeakerMuteIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t SetSpeakerMute(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t SpeakerMute(bool* enabled) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Should never be called";
+ return -1;
+ }
+
+ int32_t MicrophoneMuteIsAvailable(bool* available) override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Not implemented";
+ return -1;
+ }
+
+ int32_t SetMicrophoneMute(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ FATAL() << "Not implemented";
+ return -1;
+ }
+
+ int32_t MicrophoneMute(bool* enabled) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ FATAL() << "Not implemented";
+ return -1;
+ }
+
+ // Returns true if the audio manager has been configured to support stereo
+ // and false otherwised. Default is mono.
+ int32_t StereoPlayoutIsAvailable(bool* available) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ *available = audio_manager_->IsStereoPlayoutSupported();
+ RTC_LOG(INFO) << "output: " << *available;
+ return 0;
+ }
+
+ int32_t SetStereoPlayout(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (PlayoutIsInitialized()) {
+ RTC_LOG(WARNING) << "recording in stereo is not supported";
+ return -1;
+ }
+ bool available = audio_manager_->IsStereoPlayoutSupported();
+ // Android does not support changes between mono and stero on the fly.
+ // Instead, the native audio layer is configured via the audio manager
+ // to either support mono or stereo. It is allowed to call this method
+ // if that same state is not modified.
+ if (enable != available) {
+ RTC_LOG(WARNING) << "failed to change stereo recording";
+ return -1;
+ }
+ int8_t nChannels(1);
+ if (enable) {
+ nChannels = 2;
+ }
+ audio_device_buffer_->SetPlayoutChannels(nChannels);
+ return 0;
+ }
+
+ int32_t StereoPlayout(bool* enabled) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ *enabled = audio_manager_->IsStereoPlayoutSupported();
+ RTC_LOG(INFO) << "output: " << *enabled;
+ return 0;
+ }
+
+ int32_t StereoRecordingIsAvailable(bool* available) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_manager_->IsStereoRecordSupported() == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_LOG(INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t SetStereoRecording(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (RecordingIsInitialized()) {
+ RTC_LOG(WARNING) << "recording in stereo is not supported";
+ return -1;
+ }
+ bool available = audio_manager_->IsStereoRecordSupported();
+ // Android does not support changes between mono and stero on the fly.
+ // Instead, the native audio layer is configured via the audio manager
+ // to either support mono or stereo. It is allowed to call this method
+ // if that same state is not modified.
+ if (enable != available) {
+ RTC_LOG(WARNING) << "failed to change stereo recording";
+ return -1;
+ }
+ int8_t nChannels(1);
+ if (enable) {
+ nChannels = 2;
+ }
+ audio_device_buffer_->SetRecordingChannels(nChannels);
+ return 0;
+ }
+
+ int32_t StereoRecording(bool* enabled) const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ *enabled = audio_manager_->IsStereoRecordSupported();
+ RTC_LOG(INFO) << "output: " << *enabled;
+ return 0;
+ }
+
+ int32_t PlayoutDelay(uint16_t* delay_ms) const override {
+ CHECKinitialized_();
+ // Best guess we can do is to use half of the estimated total delay.
+ *delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
+ RTC_DCHECK_GT(*delay_ms, 0);
+ return 0;
+ }
+
+ // Returns true if the device both supports built in AEC and the device
+ // is not blacklisted.
+ // Currently, if OpenSL ES is used in both directions, this method will still
+ // report the correct value and it has the correct effect. As an example:
+ // a device supports built in AEC and this method returns true. Libjingle
+ // will then disable the WebRTC based AEC and that will work for all devices
+ // (mainly Nexus) even when OpenSL ES is used for input since our current
+ // implementation will enable built-in AEC by default also for OpenSL ES.
+ // The only "bad" thing that happens today is that when Libjingle calls
+ // OpenSLESRecorder::EnableBuiltInAEC() it will not have any real effect and
+ // a "Not Implemented" log will be filed. This non-perfect state will remain
+ // until I have added full support for audio effects based on OpenSL ES APIs.
+ bool BuiltInAECIsAvailable() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_manager_->IsAcousticEchoCancelerSupported();
+ RTC_LOG(INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ // Returns true if the device both supports built in AGC and the device
+ // is not blacklisted.
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ // In addition, see comments for BuiltInAECIsAvailable().
+ bool BuiltInAGCIsAvailable() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_manager_->IsAutomaticGainControlSupported();
+ RTC_LOG(INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ // Returns true if the device both supports built in NS and the device
+ // is not blacklisted.
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ // In addition, see comments for BuiltInAECIsAvailable().
+ bool BuiltInNSIsAvailable() const override {
+ RTC_LOG(INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_manager_->IsNoiseSuppressorSupported();
+ RTC_LOG(INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInAEC(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
+ int32_t result = input_->EnableBuiltInAEC(enable);
+ RTC_LOG(INFO) << "output: " << result;
+ return result;
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInAGC(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
+ int32_t result = input_->EnableBuiltInAGC(enable);
+ RTC_LOG(INFO) << "output: " << result;
+ return result;
+ }
+
+ // TODO(henrika): add implementation for OpenSL ES based audio as well.
+ int32_t EnableBuiltInNS(bool enable) override {
+ RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
+ int32_t result = input_->EnableBuiltInNS(enable);
+ RTC_LOG(INFO) << "output: " << result;
+ return result;
+ }
+
+ AudioDeviceModule::AudioLayer PlatformAudioLayer() const {
+ RTC_LOG(INFO) << __FUNCTION__;
+ return audio_layer_;
+ }
+
+ int32_t AttachAudioBuffer() {
+ RTC_LOG(INFO) << __FUNCTION__;
+ output_->AttachAudioBuffer(audio_device_buffer_.get());
+ input_->AttachAudioBuffer(audio_device_buffer_.get());
+ return 0;
+ }
+
+ AudioDeviceBuffer* GetAudioDeviceBuffer() {
+ return audio_device_buffer_.get();
+ }
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+
+ AudioDeviceModule::AudioLayer audio_layer_;
+
+ std::unique_ptr audio_manager_;
+ std::unique_ptr output_;
+ std::unique_ptr input_;
+ std::unique_ptr audio_device_buffer_;
+
+ bool initialized_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
diff --git a/sdk/android/src/jni/audio_device/audio_manager.cc b/sdk/android/src/jni/audio_device/audio_manager.cc
new file mode 100644
index 0000000000..f707b3bf20
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_manager.cc
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+#include
+
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// AudioManager::JavaAudioManager implementation
+AudioManager::JavaAudioManager::JavaAudioManager(
+ NativeRegistration* native_reg,
+ std::unique_ptr audio_manager)
+ : audio_manager_(std::move(audio_manager)),
+ init_(native_reg->GetMethodId("init", "()Z")),
+ dispose_(native_reg->GetMethodId("dispose", "()V")),
+ is_communication_mode_enabled_(
+ native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")),
+ is_device_blacklisted_for_open_sles_usage_(
+ native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage",
+ "()Z")) {
+ RTC_LOG(INFO) << "JavaAudioManager::ctor";
+}
+
+AudioManager::JavaAudioManager::~JavaAudioManager() {
+ RTC_LOG(INFO) << "JavaAudioManager::~dtor";
+}
+
+bool AudioManager::JavaAudioManager::Init() {
+ return audio_manager_->CallBooleanMethod(init_);
+}
+
+void AudioManager::JavaAudioManager::Close() {
+ audio_manager_->CallVoidMethod(dispose_);
+}
+
+bool AudioManager::JavaAudioManager::IsCommunicationModeEnabled() {
+ return audio_manager_->CallBooleanMethod(is_communication_mode_enabled_);
+}
+
+bool AudioManager::JavaAudioManager::IsDeviceBlacklistedForOpenSLESUsage() {
+ return audio_manager_->CallBooleanMethod(
+ is_device_blacklisted_for_open_sles_usage_);
+}
+
+// AudioManager implementation
+AudioManager::AudioManager()
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
+ initialized_(false),
+ hardware_aec_(false),
+ hardware_agc_(false),
+ hardware_ns_(false),
+ low_latency_playout_(false),
+ low_latency_record_(false),
+ delay_estimate_in_milliseconds_(0) {
+ RTC_LOG(INFO) << "ctor";
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V",
+ reinterpret_cast(&AudioManager::CacheAudioParameters)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
+ arraysize(native_methods));
+ j_audio_manager_.reset(
+ new JavaAudioManager(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "", "(J)V", PointerTojlong(this))));
+}
+
+AudioManager::~AudioManager() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Close();
+}
+
+void AudioManager::SetActiveAudioLayer(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ RTC_LOG(INFO) << "SetActiveAudioLayer: " << audio_layer;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ // Store the currently utilized audio layer.
+ audio_layer_ = audio_layer;
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not. However, it is also possible
+ // that the user explicitly selects the high-latency audio path, hence we use
+ // the selected |audio_layer| here to set the delay estimate.
+ delay_estimate_in_milliseconds_ =
+ (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
+ ? kHighLatencyModeDelayEstimateInMilliseconds
+ : kLowLatencyModeDelayEstimateInMilliseconds;
+ RTC_LOG(INFO) << "delay_estimate_in_milliseconds: "
+ << delay_estimate_in_milliseconds_;
+}
+
+SLObjectItf AudioManager::GetOpenSLEngine() {
+ RTC_LOG(INFO) << "GetOpenSLEngine";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Only allow usage of OpenSL ES if such an audio layer has been specified.
+ if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio &&
+ audio_layer_ !=
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
+ RTC_LOG(INFO)
+ << "Unable to create OpenSL engine for the current audio layer: "
+ << audio_layer_;
+ return nullptr;
+ }
+ // OpenSL ES for Android only supports a single engine per application.
+ // If one already has been created, return existing object instead of
+ // creating a new.
+ if (engine_object_.Get() != nullptr) {
+ RTC_LOG(WARNING) << "The OpenSL ES engine object has already been created";
+ return engine_object_.Get();
+ }
+ // Create the engine object in thread safe mode.
+ const SLEngineOption option[] = {
+ {SL_ENGINEOPTION_THREADSAFE, static_cast(SL_BOOLEAN_TRUE)}};
+ SLresult result =
+ slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL);
+ if (result != SL_RESULT_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "slCreateEngine() failed: "
+ << GetSLErrorString(result);
+ engine_object_.Reset();
+ return nullptr;
+ }
+ // Realize the SL Engine in synchronous mode.
+ result = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
+ if (result != SL_RESULT_SUCCESS) {
+ RTC_LOG(LS_ERROR) << "Realize() failed: " << GetSLErrorString(result);
+ engine_object_.Reset();
+ return nullptr;
+ }
+ // Finally return the SLObjectItf interface of the engine object.
+ return engine_object_.Get();
+}
+
+bool AudioManager::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+ if (!j_audio_manager_->Init()) {
+ RTC_LOG(LS_ERROR) << "Init() failed";
+ return false;
+ }
+ initialized_ = true;
+ return true;
+}
+
+bool AudioManager::Close() {
+ RTC_LOG(INFO) << "Close";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_)
+ return true;
+ j_audio_manager_->Close();
+ initialized_ = false;
+ return true;
+}
+
+bool AudioManager::IsCommunicationModeEnabled() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return j_audio_manager_->IsCommunicationModeEnabled();
+}
+
+bool AudioManager::IsAcousticEchoCancelerSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_aec_;
+}
+
+bool AudioManager::IsAutomaticGainControlSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_agc_;
+}
+
+bool AudioManager::IsNoiseSuppressorSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_ns_;
+}
+
+bool AudioManager::IsLowLatencyPlayoutSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Some devices are blacklisted for usage of OpenSL ES even if they report
+ // that low-latency playout is supported. See b/21485703 for details.
+ return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
+ ? false
+ : low_latency_playout_;
+}
+
+bool AudioManager::IsLowLatencyRecordSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return low_latency_record_;
+}
+
+bool AudioManager::IsProAudioSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // TODO(henrika): return the state independently of if OpenSL ES is
+ // blacklisted or not for now. We could use the same approach as in
+ // IsLowLatencyPlayoutSupported() but I can't see the need for it yet.
+ return pro_audio_;
+}
+
+// TODO(henrika): improve comments...
+bool AudioManager::IsAAudioSupported() const {
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+ return a_audio_;
+#else
+ return false;
+#endif
+}
+
+bool AudioManager::IsStereoPlayoutSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return (playout_parameters_.channels() == 2);
+}
+
+bool AudioManager::IsStereoRecordSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return (record_parameters_.channels() == 2);
+}
+
+int AudioManager::GetDelayEstimateInMilliseconds() const {
+ return delay_estimate_in_milliseconds_;
+}
+
+void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager) {
+ AudioManager* this_object =
+ reinterpret_cast(native_audio_manager);
+ this_object->OnCacheAudioParameters(
+ env, sample_rate, output_channels, input_channels, hardware_aec,
+ hardware_agc, hardware_ns, low_latency_output, low_latency_input,
+ pro_audio, a_audio, output_buffer_size, input_buffer_size);
+}
+
+void AudioManager::OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size) {
+ RTC_LOG(INFO)
+ << "OnCacheAudioParameters: "
+ << "hardware_aec: " << static_cast(hardware_aec)
+ << ", hardware_agc: " << static_cast(hardware_agc)
+ << ", hardware_ns: " << static_cast(hardware_ns)
+ << ", low_latency_output: " << static_cast(low_latency_output)
+ << ", low_latency_input: " << static_cast(low_latency_input)
+ << ", pro_audio: " << static_cast(pro_audio)
+ << ", a_audio: " << static_cast(a_audio)
+ << ", sample_rate: " << static_cast(sample_rate)
+ << ", output_channels: " << static_cast(output_channels)
+ << ", input_channels: " << static_cast(input_channels)
+ << ", output_buffer_size: " << static_cast(output_buffer_size)
+ << ", input_buffer_size: " << static_cast(input_buffer_size);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ hardware_aec_ = hardware_aec;
+ hardware_agc_ = hardware_agc;
+ hardware_ns_ = hardware_ns;
+ low_latency_playout_ = low_latency_output;
+ low_latency_record_ = low_latency_input;
+ pro_audio_ = pro_audio;
+ a_audio_ = a_audio;
+ playout_parameters_.reset(sample_rate, static_cast(output_channels),
+ static_cast(output_buffer_size));
+ record_parameters_.reset(sample_rate, static_cast(input_channels),
+ static_cast(input_buffer_size));
+}
+
+const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
+ RTC_CHECK(playout_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return playout_parameters_;
+}
+
+const AudioParameters& AudioManager::GetRecordAudioParameters() {
+ RTC_CHECK(record_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return record_parameters_;
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/audio_manager.h b/sdk/android/src/jni/audio_device/audio_manager.h
new file mode 100644
index 0000000000..0214989b10
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_manager.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
+
+#include
+#include
+#include
+
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/opensles_common.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// Implements support for functions in the WebRTC audio stack for Android that
+// relies on the AudioManager in android.media. It also populates an
+// AudioParameter structure with native audio parameters detected at
+// construction. This class does not make any audio-related modifications
+// unless Init() is called. Caching audio parameters makes no changes but only
+// reads data from the Java side.
+class AudioManager {
+ public:
+ // Wraps the Java specific parts of the AudioManager into one helper class.
+ // Stores method IDs for all supported methods at construction and then
+ // allows calls like JavaAudioManager::Close() while hiding the Java/JNI
+ // parts that are associated with this call.
+ class JavaAudioManager {
+ public:
+ JavaAudioManager(NativeRegistration* native_registration,
+ std::unique_ptr audio_manager);
+ ~JavaAudioManager();
+
+ bool Init();
+ void Close();
+ bool IsCommunicationModeEnabled();
+ bool IsDeviceBlacklistedForOpenSLESUsage();
+
+ private:
+ std::unique_ptr audio_manager_;
+ jmethodID init_;
+ jmethodID dispose_;
+ jmethodID is_communication_mode_enabled_;
+ jmethodID is_device_blacklisted_for_open_sles_usage_;
+ };
+
+ AudioManager();
+ ~AudioManager();
+
+ // Sets the currently active audio layer combination. Must be called before
+ // Init().
+ void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer);
+
+ // Creates and realizes the main (global) Open SL engine object and returns
+ // a reference to it. The engine object is only created at the first call
+ // since OpenSL ES for Android only supports a single engine per application.
+ // Subsequent calls returns the already created engine. The SL engine object
+ // is destroyed when the AudioManager object is deleted. It means that the
+ // engine object will be the first OpenSL ES object to be created and last
+ // object to be destroyed.
+ // Note that NULL will be returned unless the audio layer is specified as
+ // AudioDeviceModule::kAndroidOpenSLESAudio or
+ // AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio.
+ SLObjectItf GetOpenSLEngine();
+
+ // Initializes the audio manager and stores the current audio mode.
+ bool Init();
+ // Revert any setting done by Init().
+ bool Close();
+
+ // Returns true if current audio mode is AudioManager.MODE_IN_COMMUNICATION.
+ bool IsCommunicationModeEnabled() const;
+
+ // Native audio parameters stored during construction.
+ const AudioParameters& GetPlayoutAudioParameters();
+ const AudioParameters& GetRecordAudioParameters();
+
+ // Returns true if the device supports built-in audio effects for AEC, AGC
+ // and NS. Some devices can also be blacklisted for use in combination with
+ // platform effects and these devices will return false.
+ // Can currently only be used in combination with a Java based audio backend
+ // for the recoring side (i.e. using the android.media.AudioRecord API).
+ bool IsAcousticEchoCancelerSupported() const;
+ bool IsAutomaticGainControlSupported() const;
+ bool IsNoiseSuppressorSupported() const;
+
+ // Returns true if the device supports the low-latency audio paths in
+ // combination with OpenSL ES.
+ bool IsLowLatencyPlayoutSupported() const;
+ bool IsLowLatencyRecordSupported() const;
+
+ // Returns true if the device supports (and has been configured for) stereo.
+ // Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as
+ // paramter to enable stereo. Default is mono in both directions and the
+ // setting is set once and for all when the audio manager object is created.
+ // TODO(henrika): stereo is not supported in combination with OpenSL ES.
+ bool IsStereoPlayoutSupported() const;
+ bool IsStereoRecordSupported() const;
+
+ // Returns true if the device supports pro-audio features in combination with
+ // OpenSL ES.
+ bool IsProAudioSupported() const;
+
+ // Returns true if the device supports AAudio.
+ bool IsAAudioSupported() const;
+
+ // Returns the estimated total delay of this device. Unit is in milliseconds.
+ // The vaule is set once at construction and never changes after that.
+ // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
+ // webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
+ int GetDelayEstimateInMilliseconds() const;
+
+ private:
+ // Called from Java side so we can cache the native audio parameters.
+ // This method will be called by the WebRtcAudioManager constructor, i.e.
+ // on the same thread that this object is created on.
+ static void JNICALL CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager);
+ void OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint output_channels,
+ jint input_channels,
+ jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
+ jboolean low_latency_output,
+ jboolean low_latency_input,
+ jboolean pro_audio,
+ jboolean a_audio,
+ jint output_buffer_size,
+ jint input_buffer_size);
+
+ // Stores thread ID in the constructor.
+ // We can then use ThreadChecker::CalledOnValidThread() to ensure that
+ // other methods are called from the same thread.
+ rtc::ThreadChecker thread_checker_;
+
+ // Calls AttachCurrentThread() if this thread is not attached at construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioManager.
+ std::unique_ptr j_audio_manager_;
+
+ // Contains the selected audio layer specified by the AudioLayer enumerator
+ // in the AudioDeviceModule class.
+ AudioDeviceModule::AudioLayer audio_layer_;
+
+ // This object is the global entry point of the OpenSL ES API.
+ // After creating the engine object, the application can obtain this object‘s
+ // SLEngineItf interface. This interface contains creation methods for all
+ // the other object types in the API. None of these interface are realized
+ // by this class. It only provides access to the global engine object.
+ ScopedSLObjectItf engine_object_;
+
+ // Set to true by Init() and false by Close().
+ bool initialized_;
+
+ // True if device supports hardware (or built-in) AEC.
+ bool hardware_aec_;
+ // True if device supports hardware (or built-in) AGC.
+ bool hardware_agc_;
+ // True if device supports hardware (or built-in) NS.
+ bool hardware_ns_;
+
+ // True if device supports the low-latency OpenSL ES audio path for output.
+ bool low_latency_playout_;
+
+ // True if device supports the low-latency OpenSL ES audio path for input.
+ bool low_latency_record_;
+
+ // True if device supports the low-latency OpenSL ES pro-audio path.
+ bool pro_audio_;
+
+ // True if device supports the low-latency AAudio audio path.
+ bool a_audio_;
+
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not.
+ int delay_estimate_in_milliseconds_;
+
+ // Contains native parameters (e.g. sample rate, channel configuration).
+ // Set at construction in OnCacheAudioParameters() which is called from
+ // Java on the same thread as this object is created on.
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.cc b/sdk/android/src/jni/audio_device/audio_record_jni.cc
new file mode 100644
index 0000000000..d688ac433b
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
+
+#include
+#include
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+namespace {
+// Scoped class which logs its time of life as a UMA statistic. It generates
+// a histogram which measures the time it takes for a method/scope to execute.
+class ScopedHistogramTimer {
+ public:
+ explicit ScopedHistogramTimer(const std::string& name)
+ : histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
+ ~ScopedHistogramTimer() {
+ const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
+ RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
+ RTC_LOG(INFO) << histogram_name_ << ": " << life_time_ms;
+ }
+
+ private:
+ const std::string histogram_name_;
+ int64_t start_time_ms_;
+};
+} // namespace
+
+// AudioRecordJni::JavaAudioRecord implementation.
+AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
+ NativeRegistration* native_reg,
+ std::unique_ptr audio_record)
+ : audio_record_(std::move(audio_record)),
+ init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
+ start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
+ stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
+ enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
+ enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
+
+AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
+
+int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
+ size_t channels) {
+ return audio_record_->CallIntMethod(init_recording_,
+ static_cast(sample_rate),
+ static_cast(channels));
+}
+
+bool AudioRecordJni::JavaAudioRecord::StartRecording() {
+ return audio_record_->CallBooleanMethod(start_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::StopRecording() {
+ return audio_record_->CallBooleanMethod(stop_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
+ return audio_record_->CallBooleanMethod(enable_built_in_aec_,
+ static_cast(enable));
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
+ return audio_record_->CallBooleanMethod(enable_built_in_ns_,
+ static_cast(enable));
+}
+
+// AudioRecordJni implementation.
+AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetRecordAudioParameters()),
+ total_delay_in_milliseconds_(0),
+ direct_buffer_address_(nullptr),
+ direct_buffer_capacity_in_bytes_(0),
+ frames_per_buffer_(0),
+ initialized_(false),
+ recording_(false),
+ audio_device_buffer_(nullptr) {
+ RTC_LOG(INFO) << "ctor";
+ RTC_DCHECK(audio_parameters_.is_valid());
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+ reinterpret_cast(&AudioRecordJni::CacheDirectBufferAddress)},
+ {"nativeDataIsRecorded", "(IJ)V",
+ reinterpret_cast(&AudioRecordJni::DataIsRecorded)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
+ arraysize(native_methods));
+ j_audio_record_.reset(
+ new JavaAudioRecord(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "", "(J)V", PointerTojlong(this))));
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the Java based audio thread.
+ thread_checker_java_.DetachFromThread();
+}
+
+AudioRecordJni::~AudioRecordJni() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+}
+
+int32_t AudioRecordJni::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return 0;
+}
+
+int32_t AudioRecordJni::Terminate() {
+ RTC_LOG(INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ StopRecording();
+ return 0;
+}
+
+int32_t AudioRecordJni::InitRecording() {
+ RTC_LOG(INFO) << "InitRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
+ int frames_per_buffer = j_audio_record_->InitRecording(
+ audio_parameters_.sample_rate(), audio_parameters_.channels());
+ if (frames_per_buffer < 0) {
+ direct_buffer_address_ = nullptr;
+ RTC_LOG(LS_ERROR) << "InitRecording failed";
+ return -1;
+ }
+ frames_per_buffer_ = static_cast(frames_per_buffer);
+ RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_;
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
+ frames_per_buffer_ * bytes_per_frame);
+ RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
+ initialized_ = true;
+ return 0;
+}
+
+int32_t AudioRecordJni::StartRecording() {
+ RTC_LOG(INFO) << "StartRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!recording_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Recording can not start since InitRecording must succeed first";
+ return 0;
+ }
+ ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
+ if (!j_audio_record_->StartRecording()) {
+ RTC_LOG(LS_ERROR) << "StartRecording failed";
+ return -1;
+ }
+ recording_ = true;
+ return 0;
+}
+
+int32_t AudioRecordJni::StopRecording() {
+ RTC_LOG(INFO) << "StopRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ if (!j_audio_record_->StopRecording()) {
+ RTC_LOG(LS_ERROR) << "StopRecording failed";
+ return -1;
+ }
+ // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+ // next time StartRecording() is called since it will create a new Java
+ // thread.
+ thread_checker_java_.DetachFromThread();
+ initialized_ = false;
+ recording_ = false;
+ direct_buffer_address_ = nullptr;
+ return 0;
+}
+
+void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ RTC_LOG(INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
+ audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")";
+ audio_device_buffer_->SetRecordingChannels(channels);
+ total_delay_in_milliseconds_ =
+ audio_manager_->GetDelayEstimateInMilliseconds();
+ RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
+ RTC_LOG(INFO) << "total_delay_in_milliseconds: "
+ << total_delay_in_milliseconds_;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
+ RTC_LOG(INFO) << "EnableBuiltInAEC(" << enable << ")";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) {
+ // TODO(henrika): possibly remove when no longer used by any client.
+ FATAL() << "Should never be called";
+ return -1;
+}
+
+int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
+ RTC_LOG(INFO) << "EnableBuiltInNS(" << enable << ")";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
+}
+
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioRecord) {
+ AudioRecordJni* this_object =
+ reinterpret_cast(nativeAudioRecord);
+ this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
+ jobject byte_buffer) {
+ RTC_LOG(INFO) << "OnCacheDirectBufferAddress";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!direct_buffer_address_);
+ direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+ jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+ RTC_LOG(INFO) << "direct buffer capacity: " << capacity;
+ direct_buffer_capacity_in_bytes_ = static_cast(capacity);
+}
+
+void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioRecord) {
+ AudioRecordJni* this_object =
+ reinterpret_cast(nativeAudioRecord);
+ this_object->OnDataIsRecorded(length);
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordThread'.
+void AudioRecordJni::OnDataIsRecorded(int length) {
+ RTC_DCHECK(thread_checker_java_.CalledOnValidThread());
+ if (!audio_device_buffer_) {
+ RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+ return;
+ }
+ audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
+ frames_per_buffer_);
+ // We provide one (combined) fixed delay estimate for the APM and use the
+ // |playDelayMs| parameter only. Components like the AEC only sees the sum
+ // of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
+ audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
+ if (audio_device_buffer_->DeliverRecordedData() == -1) {
+ RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
+ }
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.h b/sdk/android/src/jni/audio_device/audio_record_jni.h
new file mode 100644
index 0000000000..9abe814571
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_RECORD_JNI_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_RECORD_JNI_H_
+
+#include
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// Implements 16-bit mono PCM audio input support for Android using the Java
+// AudioRecord interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioRecord.java. This class is created and lives on a thread in
+// C++-land, but recorded audio buffers are delivered on a high-priority
+// thread managed by the Java class.
+//
+// The Java class makes use of AudioEffect features (mainly AEC) which are
+// first available in Jelly Bean. If it is instantiated running against earlier
+// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
+// separately instead.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioRecordJni {
+ public:
+ // Wraps the Java specific parts of the AudioRecordJni into one helper class.
+ class JavaAudioRecord {
+ public:
+ JavaAudioRecord(NativeRegistration* native_registration,
+ std::unique_ptr audio_track);
+ ~JavaAudioRecord();
+
+ int InitRecording(int sample_rate, size_t channels);
+ bool StartRecording();
+ bool StopRecording();
+ bool EnableBuiltInAEC(bool enable);
+ bool EnableBuiltInNS(bool enable);
+
+ private:
+ std::unique_ptr audio_record_;
+ jmethodID init_recording_;
+ jmethodID start_recording_;
+ jmethodID stop_recording_;
+ jmethodID enable_built_in_aec_;
+ jmethodID enable_built_in_ns_;
+ };
+
+ explicit AudioRecordJni(AudioManager* audio_manager);
+ ~AudioRecordJni();
+
+ int32_t Init();
+ int32_t Terminate();
+
+ int32_t InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int32_t StartRecording();
+ int32_t StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ int32_t EnableBuiltInAEC(bool enable);
+ int32_t EnableBuiltInAGC(bool enable);
+ int32_t EnableBuiltInNS(bool enable);
+
+ private:
+ // Called from Java side so we can cache the address of the Java-manged
+ // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
+ // is also stored in |direct_buffer_capacity_in_bytes_|.
+ // This method will be called by the WebRtcAudioRecord constructor, i.e.,
+ // on the same thread that this object is created on.
+ static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioRecord);
+ void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+ // Called periodically by the Java based WebRtcAudioRecord object when
+ // recording has started. Each call indicates that there are |length| new
+ // bytes recorded in the memory area |direct_buffer_address_| and it is
+ // now time to send these to the consumer.
+ // This method is called on a high-priority thread from Java. The name of
+ // the thread is 'AudioRecordThread'.
+ static void JNICALL DataIsRecorded(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioRecord);
+ void OnDataIsRecorded(int length);
+
+ // Stores thread ID in constructor.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to OnDataIsRecorded() from high-priority
+ // thread in Java. Detached during construction of this object.
+ rtc::ThreadChecker thread_checker_java_;
+
+ // Calls AttachCurrentThread() if this thread is not attached at construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioRecordJni class.
+ std::unique_ptr j_audio_record_;
+
+ // Raw pointer to the audio manger.
+ const AudioManager* audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Delay estimate of the total round-trip delay (input + output).
+ // Fixed value set once in AttachAudioBuffer() and it can take one out of two
+ // possible values. See audio_common.h for details.
+ int total_delay_in_milliseconds_;
+
+ // Cached copy of address to direct audio buffer owned by |j_audio_record_|.
+ void* direct_buffer_address_;
+
+ // Number of bytes in the direct audio buffer owned by |j_audio_record_|.
+ size_t direct_buffer_capacity_in_bytes_;
+
+ // Number audio frames per audio buffer. Each audio frame corresponds to
+ // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+ // frame contains 2 bytes (given that the Java layer only supports mono).
+ // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+ size_t frames_per_buffer_;
+
+ bool initialized_;
+
+ bool recording_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_RECORD_JNI_H_
diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.cc b/sdk/android/src/jni/audio_device/audio_track_jni.cc
new file mode 100644
index 0000000000..422efe1389
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_track_jni.cc
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+#include
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// AudioTrackJni::JavaAudioTrack implementation.
+AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
+ NativeRegistration* native_reg,
+ std::unique_ptr audio_track)
+ : audio_track_(std::move(audio_track)),
+ init_playout_(native_reg->GetMethodId("initPlayout", "(II)Z")),
+ start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
+ stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
+ set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
+ get_stream_max_volume_(
+ native_reg->GetMethodId("getStreamMaxVolume", "()I")),
+ get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) {}
+
+AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
+
+bool AudioTrackJni::JavaAudioTrack::InitPlayout(int sample_rate, int channels) {
+ return audio_track_->CallBooleanMethod(init_playout_, sample_rate, channels);
+}
+
+bool AudioTrackJni::JavaAudioTrack::StartPlayout() {
+ return audio_track_->CallBooleanMethod(start_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::StopPlayout() {
+ return audio_track_->CallBooleanMethod(stop_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::SetStreamVolume(int volume) {
+ return audio_track_->CallBooleanMethod(set_stream_volume_, volume);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamMaxVolume() {
+ return audio_track_->CallIntMethod(get_stream_max_volume_);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamVolume() {
+ return audio_track_->CallIntMethod(get_stream_volume_);
+}
+
+// TODO(henrika): possible extend usage of AudioManager and add it as member.
+AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+ direct_buffer_address_(nullptr),
+ direct_buffer_capacity_in_bytes_(0),
+ frames_per_buffer_(0),
+ initialized_(false),
+ playing_(false),
+ audio_device_buffer_(nullptr) {
+ RTC_LOG(INFO) << "ctor";
+ RTC_DCHECK(audio_parameters_.is_valid());
+ RTC_CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+ reinterpret_cast(&AudioTrackJni::CacheDirectBufferAddress)},
+ {"nativeGetPlayoutData", "(IJ)V",
+ reinterpret_cast(&AudioTrackJni::GetPlayoutData)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
+ arraysize(native_methods));
+ j_audio_track_.reset(
+ new JavaAudioTrack(j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "", "(J)V", PointerTojlong(this))));
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the Java based audio thread.
+ thread_checker_java_.DetachFromThread();
+}
+
+AudioTrackJni::~AudioTrackJni() {
+ RTC_LOG(INFO) << "dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+}
+
+int32_t AudioTrackJni::Init() {
+ RTC_LOG(INFO) << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return 0;
+}
+
+int32_t AudioTrackJni::Terminate() {
+ RTC_LOG(INFO) << "Terminate";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ StopPlayout();
+ return 0;
+}
+
+int32_t AudioTrackJni::InitPlayout() {
+ RTC_LOG(INFO) << "InitPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
+ audio_parameters_.channels())) {
+ RTC_LOG(LS_ERROR) << "InitPlayout failed";
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+}
+
+int32_t AudioTrackJni::StartPlayout() {
+ RTC_LOG(INFO) << "StartPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!playing_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ return 0;
+ }
+ if (!j_audio_track_->StartPlayout()) {
+ RTC_LOG(LS_ERROR) << "StartPlayout failed";
+ return -1;
+ }
+ playing_ = true;
+ return 0;
+}
+
+int32_t AudioTrackJni::StopPlayout() {
+ RTC_LOG(INFO) << "StopPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ if (!j_audio_track_->StopPlayout()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+ // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+ // next time StartRecording() is called since it will create a new Java
+ // thread.
+ thread_checker_java_.DetachFromThread();
+ initialized_ = false;
+ playing_ = false;
+ direct_buffer_address_ = nullptr;
+ return 0;
+}
+
+int AudioTrackJni::SpeakerVolumeIsAvailable(bool* available) {
+ *available = true;
+ return 0;
+}
+
+int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(INFO) << "SetSpeakerVolume(" << volume << ")";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
+}
+
+int AudioTrackJni::MaxSpeakerVolume(uint32_t* max_volume) const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ *max_volume = j_audio_track_->GetStreamMaxVolume();
+ return 0;
+}
+
+int AudioTrackJni::MinSpeakerVolume(uint32_t* min_volume) const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ *min_volume = 0;
+ return 0;
+}
+
+int AudioTrackJni::SpeakerVolume(uint32_t* volume) const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ *volume = j_audio_track_->GetStreamVolume();
+ RTC_LOG(INFO) << "SpeakerVolume: " << volume;
+ return 0;
+}
+
+// TODO(henrika): possibly add stereo support.
+void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_LOG(INFO) << "AttachAudioBuffer";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ RTC_LOG(INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
+ audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")";
+ audio_device_buffer_->SetPlayoutChannels(channels);
+}
+
+void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioTrack) {
+ AudioTrackJni* this_object =
+ reinterpret_cast(nativeAudioTrack);
+ this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
+ jobject byte_buffer) {
+ RTC_LOG(INFO) << "OnCacheDirectBufferAddress";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!direct_buffer_address_);
+ direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+ jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+ RTC_LOG(INFO) << "direct buffer capacity: " << capacity;
+ direct_buffer_capacity_in_bytes_ = static_cast(capacity);
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame;
+ RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_;
+}
+
+void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioTrack) {
+ AudioTrackJni* this_object =
+ reinterpret_cast(nativeAudioTrack);
+ this_object->OnGetPlayoutData(static_cast(length));
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordTrack'.
+void AudioTrackJni::OnGetPlayoutData(size_t length) {
+ RTC_DCHECK(thread_checker_java_.CalledOnValidThread());
+ const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+ RTC_DCHECK_EQ(frames_per_buffer_, length / bytes_per_frame);
+ if (!audio_device_buffer_) {
+ RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+ return;
+ }
+ // Pull decoded data (in 16-bit PCM format) from jitter buffer.
+ int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
+ if (samples <= 0) {
+ RTC_LOG(LS_ERROR) << "AudioDeviceBuffer::RequestPlayoutData failed";
+ return;
+ }
+ RTC_DCHECK_EQ(samples, frames_per_buffer_);
+ // Copy decoded data into common byte buffer to ensure that it can be
+ // written to the Java based audio track.
+ samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
+ RTC_DCHECK_EQ(length, bytes_per_frame * samples);
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.h b/sdk/android/src/jni/audio_device/audio_track_jni.h
new file mode 100644
index 0000000000..14ef2e030a
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/audio_track_jni.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_TRACK_JNI_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_TRACK_JNI_H_
+
+#include
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// Implements 16-bit mono PCM audio output support for Android using the Java
+// AudioTrack interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioTrack.java. This class is created and lives on a thread in
+// C++-land, but decoded audio buffers are requested on a high-priority
+// thread managed by the Java class.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioTrackJni {
+ public:
+ // Wraps the Java specific parts of the AudioTrackJni into one helper class.
+ class JavaAudioTrack {
+ public:
+ JavaAudioTrack(NativeRegistration* native_registration,
+ std::unique_ptr audio_track);
+ ~JavaAudioTrack();
+
+ bool InitPlayout(int sample_rate, int channels);
+ bool StartPlayout();
+ bool StopPlayout();
+ bool SetStreamVolume(int volume);
+ int GetStreamMaxVolume();
+ int GetStreamVolume();
+
+ private:
+ std::unique_ptr audio_track_;
+ jmethodID init_playout_;
+ jmethodID start_playout_;
+ jmethodID stop_playout_;
+ jmethodID set_stream_volume_;
+ jmethodID get_stream_max_volume_;
+ jmethodID get_stream_volume_;
+ };
+
+ explicit AudioTrackJni(AudioManager* audio_manager);
+ ~AudioTrackJni();
+
+ int32_t Init();
+ int32_t Terminate();
+
+ int32_t InitPlayout();
+ bool PlayoutIsInitialized() const { return initialized_; }
+
+ int32_t StartPlayout();
+ int32_t StopPlayout();
+ bool Playing() const { return playing_; }
+
+ int SpeakerVolumeIsAvailable(bool* available);
+ int SetSpeakerVolume(uint32_t volume);
+ int SpeakerVolume(uint32_t* volume) const;
+ int MaxSpeakerVolume(uint32_t* max_volume) const;
+ int MinSpeakerVolume(uint32_t* min_volume) const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ // Called from Java side so we can cache the address of the Java-manged
+ // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
+ // is also stored in |direct_buffer_capacity_in_bytes_|.
+ // Called on the same thread as the creating thread.
+ static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+ jobject obj,
+ jobject byte_buffer,
+ jlong nativeAudioTrack);
+ void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+ // Called periodically by the Java based WebRtcAudioTrack object when
+ // playout has started. Each call indicates that |length| new bytes should
+ // be written to the memory area |direct_buffer_address_| for playout.
+ // This method is called on a high-priority thread from Java. The name of
+ // the thread is 'AudioTrackThread'.
+ static void JNICALL GetPlayoutData(JNIEnv* env,
+ jobject obj,
+ jint length,
+ jlong nativeAudioTrack);
+ void OnGetPlayoutData(size_t length);
+
+ // Stores thread ID in constructor.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to OnGetPlayoutData() from high-priority
+ // thread in Java. Detached during construction of this object.
+ rtc::ThreadChecker thread_checker_java_;
+
+ // Calls AttachCurrentThread() if this thread is not attached at construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ // Wraps the JNI interface pointer and methods associated with it.
+ std::unique_ptr j_environment_;
+
+ // Contains factory method for creating the Java object.
+ std::unique_ptr j_native_registration_;
+
+ // Wraps the Java specific parts of the AudioTrackJni class.
+ std::unique_ptr j_audio_track_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Cached copy of address to direct audio buffer owned by |j_audio_track_|.
+ void* direct_buffer_address_;
+
+ // Number of bytes in the direct audio buffer owned by |j_audio_track_|.
+ size_t direct_buffer_capacity_in_bytes_;
+
+ // Number of audio frames per audio buffer. Each audio frame corresponds to
+ // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+ // frame contains 2 bytes (given that the Java layer only supports mono).
+ // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+ size_t frames_per_buffer_;
+
+ bool initialized_;
+
+ bool playing_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+ // and therefore outlives this object.
+ AudioDeviceBuffer* audio_device_buffer_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_TRACK_JNI_H_
diff --git a/sdk/android/src/jni/audio_device/build_info.cc b/sdk/android/src/jni/audio_device/build_info.cc
new file mode 100644
index 0000000000..673892ac01
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/build_info.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/build_info.h"
+
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+BuildInfo::BuildInfo()
+ : j_environment_(JVM::GetInstance()->environment()),
+ j_build_info_(
+ JVM::GetInstance()->GetClass("org/webrtc/voiceengine/BuildInfo")) {}
+
+std::string BuildInfo::GetStringFromJava(const char* name) {
+ jmethodID id = j_build_info_.GetStaticMethodId(name, "()Ljava/lang/String;");
+ jstring j_string =
+ static_cast(j_build_info_.CallStaticObjectMethod(id));
+ return j_environment_->JavaToStdString(j_string);
+}
+
+std::string BuildInfo::GetDeviceModel() {
+ return GetStringFromJava("getDeviceModel");
+}
+
+std::string BuildInfo::GetBrand() {
+ return GetStringFromJava("getBrand");
+}
+
+std::string BuildInfo::GetDeviceManufacturer() {
+ return GetStringFromJava("getDeviceManufacturer");
+}
+
+std::string BuildInfo::GetAndroidBuildId() {
+ return GetStringFromJava("getAndroidBuildId");
+}
+
+std::string BuildInfo::GetBuildType() {
+ return GetStringFromJava("getBuildType");
+}
+
+std::string BuildInfo::GetBuildRelease() {
+ return GetStringFromJava("getBuildRelease");
+}
+
+SdkCode BuildInfo::GetSdkVersion() {
+ jmethodID id = j_build_info_.GetStaticMethodId("getSdkVersion", "()I");
+ jint j_version = j_build_info_.CallStaticIntMethod(id);
+ return static_cast(j_version);
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/build_info.h b/sdk/android/src/jni/audio_device/build_info.h
new file mode 100644
index 0000000000..93f4db9286
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/build_info.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
+
+#include
+#include
+#include
+
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
+// indicating the Android release associated with a given SDK version.
+// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
+// for details.
+enum SdkCode {
+ SDK_CODE_JELLY_BEAN = 16, // Android 4.1
+ SDK_CODE_JELLY_BEAN_MR1 = 17, // Android 4.2
+ SDK_CODE_JELLY_BEAN_MR2 = 18, // Android 4.3
+ SDK_CODE_KITKAT = 19, // Android 4.4
+ SDK_CODE_WATCH = 20, // Android 4.4W
+ SDK_CODE_LOLLIPOP = 21, // Android 5.0
+ SDK_CODE_LOLLIPOP_MR1 = 22, // Android 5.1
+ SDK_CODE_MARSHMALLOW = 23, // Android 6.0
+ SDK_CODE_N = 24,
+};
+
+// Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
+// for device and Android build information.
+// The calling thread is attached to the JVM at construction if needed and a
+// valid Java environment object is also created.
+// All Get methods must be called on the creating thread. If not, the code will
+// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
+class BuildInfo {
+ public:
+ BuildInfo();
+ ~BuildInfo() {}
+
+ // End-user-visible name for the end product (e.g. "Nexus 6").
+ std::string GetDeviceModel();
+ // Consumer-visible brand (e.g. "google").
+ std::string GetBrand();
+ // Manufacturer of the product/hardware (e.g. "motorola").
+ std::string GetDeviceManufacturer();
+ // Android build ID (e.g. LMY47D).
+ std::string GetAndroidBuildId();
+ // The type of build (e.g. "user" or "eng").
+ std::string GetBuildType();
+ // The user-visible version string (e.g. "5.1").
+ std::string GetBuildRelease();
+ // The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
+ // for translation.
+ SdkCode GetSdkVersion();
+
+ private:
+ // Helper method which calls a static getter method with |name| and returns
+ // a string from Java.
+ std::string GetStringFromJava(const char* name);
+
+ // Ensures that this class can access a valid JNI interface pointer even
+ // if the creating thread was not attached to the JVM.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ // Provides access to the JNIEnv interface pointer and the JavaToStdString()
+ // method which is used to translate Java strings to std strings.
+ std::unique_ptr j_environment_;
+
+ // Holds the jclass object and provides access to CallStaticObjectMethod().
+ // Used by GetStringFromJava() during construction only.
+ JavaClass j_build_info_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
diff --git a/sdk/android/src/jni/audio_device/opensles_common.cc b/sdk/android/src/jni/audio_device/opensles_common.cc
new file mode 100644
index 0000000000..72ba56ff65
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_common.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/opensles_common.h"
+
+#include
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in .
+const char* GetSLErrorString(size_t code) {
+ static const char* sl_error_strings[] = {
+ "SL_RESULT_SUCCESS", // 0
+ "SL_RESULT_PRECONDITIONS_VIOLATED", // 1
+ "SL_RESULT_PARAMETER_INVALID", // 2
+ "SL_RESULT_MEMORY_FAILURE", // 3
+ "SL_RESULT_RESOURCE_ERROR", // 4
+ "SL_RESULT_RESOURCE_LOST", // 5
+ "SL_RESULT_IO_ERROR", // 6
+ "SL_RESULT_BUFFER_INSUFFICIENT", // 7
+ "SL_RESULT_CONTENT_CORRUPTED", // 8
+ "SL_RESULT_CONTENT_UNSUPPORTED", // 9
+ "SL_RESULT_CONTENT_NOT_FOUND", // 10
+ "SL_RESULT_PERMISSION_DENIED", // 11
+ "SL_RESULT_FEATURE_UNSUPPORTED", // 12
+ "SL_RESULT_INTERNAL_ERROR", // 13
+ "SL_RESULT_UNKNOWN_ERROR", // 14
+ "SL_RESULT_OPERATION_ABORTED", // 15
+ "SL_RESULT_CONTROL_LOST", // 16
+ };
+
+ if (code >= arraysize(sl_error_strings)) {
+ return "SL_RESULT_UNKNOWN_ERROR";
+ }
+ return sl_error_strings[code];
+}
+
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+ int sample_rate,
+ size_t bits_per_sample) {
+ RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+ SLDataFormat_PCM format;
+ format.formatType = SL_DATAFORMAT_PCM;
+ format.numChannels = static_cast(channels);
+ // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
+ switch (sample_rate) {
+ case 8000:
+ format.samplesPerSec = SL_SAMPLINGRATE_8;
+ break;
+ case 16000:
+ format.samplesPerSec = SL_SAMPLINGRATE_16;
+ break;
+ case 22050:
+ format.samplesPerSec = SL_SAMPLINGRATE_22_05;
+ break;
+ case 32000:
+ format.samplesPerSec = SL_SAMPLINGRATE_32;
+ break;
+ case 44100:
+ format.samplesPerSec = SL_SAMPLINGRATE_44_1;
+ break;
+ case 48000:
+ format.samplesPerSec = SL_SAMPLINGRATE_48;
+ break;
+ case 64000:
+ format.samplesPerSec = SL_SAMPLINGRATE_64;
+ break;
+ case 88200:
+ format.samplesPerSec = SL_SAMPLINGRATE_88_2;
+ break;
+ case 96000:
+ format.samplesPerSec = SL_SAMPLINGRATE_96;
+ break;
+ default:
+ RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
+ break;
+ }
+ format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ if (format.numChannels == 1) {
+ format.channelMask = SL_SPEAKER_FRONT_CENTER;
+ } else if (format.numChannels == 2) {
+ format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ } else {
+ RTC_CHECK(false) << "Unsupported number of channels: "
+ << format.numChannels;
+ }
+ return format;
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/opensles_common.h b/sdk/android/src/jni/audio_device/opensles_common.h
new file mode 100644
index 0000000000..bba8ce780a
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_common.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_COMMON_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_COMMON_H_
+
+#include
+#include
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace android_adm {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in .
+const char* GetSLErrorString(size_t code);
+
+// Configures an SL_DATAFORMAT_PCM structure based on native audio parameters.
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+ int sample_rate,
+ size_t bits_per_sample);
+
+// Helper class for using SLObjectItf interfaces.
+template
+class ScopedSLObject {
+ public:
+ ScopedSLObject() : obj_(nullptr) {}
+
+ ~ScopedSLObject() { Reset(); }
+
+ SLType* Receive() {
+ RTC_DCHECK(!obj_);
+ return &obj_;
+ }
+
+ SLDerefType operator->() { return *obj_; }
+
+ SLType Get() const { return obj_; }
+
+ void Reset() {
+ if (obj_) {
+ (*obj_)->Destroy(obj_);
+ obj_ = nullptr;
+ }
+ }
+
+ private:
+ SLType obj_;
+};
+
+typedef ScopedSLObject ScopedSLObjectItf;
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_COMMON_H_
diff --git a/sdk/android/src/jni/audio_device/opensles_player.cc b/sdk/android/src/jni/audio_device/opensles_player.cc
new file mode 100644
index 0000000000..5e9dfe090e
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_player.cc
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/opensles_player.h"
+
+#include
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+#define TAG "OpenSLESPlayer"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define RETURN_ON_ERROR(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ ALOGE("%s failed: %s", #op, GetSLErrorString(err)); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+namespace android_adm {
+
+OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
+ : audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+ audio_device_buffer_(nullptr),
+ initialized_(false),
+ playing_(false),
+ buffer_index_(0),
+ engine_(nullptr),
+ player_(nullptr),
+ simple_buffer_queue_(nullptr),
+ volume_(nullptr),
+ last_play_time_(0) {
+ ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+ // Use native audio output parameters provided by the audio manager and
+ // define the PCM format structure.
+ pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+ audio_parameters_.sample_rate(),
+ audio_parameters_.bits_per_sample());
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the internal audio thread.
+ thread_checker_opensles_.DetachFromThread();
+}
+
+OpenSLESPlayer::~OpenSLESPlayer() {
+ ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+ DestroyAudioPlayer();
+ DestroyMix();
+ engine_ = nullptr;
+ RTC_DCHECK(!engine_);
+ RTC_DCHECK(!output_mix_.Get());
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
+}
+
+int OpenSLESPlayer::Init() {
+ ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_parameters_.channels() == 2) {
+ // TODO(henrika): FineAudioBuffer needs more work to support stereo.
+ ALOGE("OpenSLESPlayer does not support stereo");
+ return -1;
+ }
+ return 0;
+}
+
+int OpenSLESPlayer::Terminate() {
+ ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ StopPlayout();
+ return 0;
+}
+
+int OpenSLESPlayer::InitPlayout() {
+ ALOGD("InitPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
+ if (!ObtainEngineInterface()) {
+ ALOGE("Failed to obtain SL Engine interface");
+ return -1;
+ }
+ CreateMix();
+ initialized_ = true;
+ buffer_index_ = 0;
+ return 0;
+}
+
+int OpenSLESPlayer::StartPlayout() {
+ ALOGD("StartPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!playing_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ CreateAudioPlayer();
+ // Fill up audio buffers to avoid initial glitch and to ensure that playback
+ // starts when mode is later changed to SL_PLAYSTATE_PLAYING.
+ // TODO(henrika): we can save some delay by only making one call to
+ // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
+ last_play_time_ = rtc::Time();
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ EnqueuePlayoutData(true);
+ }
+ // Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
+ // For a player object, when the object is in the SL_PLAYSTATE_PLAYING
+ // state, adding buffers will implicitly start playback.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
+ playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
+ RTC_DCHECK(playing_);
+ return 0;
+}
+
+int OpenSLESPlayer::StopPlayout() {
+ ALOGD("StopPlayout[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ // Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED), -1);
+ // Clear the buffer queue to flush out any remaining data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_), -1);
+#if RTC_DCHECK_IS_ON
+ // Verify that the buffer queue is in fact cleared as it should.
+ SLAndroidSimpleBufferQueueState buffer_queue_state;
+ (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
+ RTC_DCHECK_EQ(0, buffer_queue_state.count);
+ RTC_DCHECK_EQ(0, buffer_queue_state.index);
+#endif
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ DestroyAudioPlayer();
+ thread_checker_opensles_.DetachFromThread();
+ initialized_ = false;
+ playing_ = false;
+ return 0;
+}
+
+int OpenSLESPlayer::SpeakerVolumeIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+int OpenSLESPlayer::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int OpenSLESPlayer::SpeakerVolume(uint32_t* volume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::MaxSpeakerVolume(uint32_t* maxVolume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::MinSpeakerVolume(uint32_t* minVolume) const {
+ return -1;
+}
+
+void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ ALOGD("AttachAudioBuffer");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
+ audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetPlayoutChannels(%" PRIuS ")", channels);
+ audio_device_buffer_->SetPlayoutChannels(channels);
+ RTC_CHECK(audio_device_buffer_);
+ AllocateDataBuffers();
+}
+
+void OpenSLESPlayer::AllocateDataBuffers() {
+ ALOGD("AllocateDataBuffers");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the native OpenSL ES
+ // buffer size. The native buffer size corresponds to the
+ // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio
+ // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is
+ // recommended to construct audio buffers so that they contain an exact
+ // multiple of this number. If so, callbacks will occur at regular intervals,
+ // which reduces jitter.
+ const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer();
+ ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes);
+ ALOGD("native buffer size in ms: %.2f",
+ audio_parameters_.GetBufferSizeInMilliseconds());
+ fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
+ audio_parameters_.sample_rate(),
+ 2 * buffer_size_in_bytes));
+ // Allocated memory for audio buffers.
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]);
+ }
+}
+
+bool OpenSLESPlayer::ObtainEngineInterface() {
+ ALOGD("ObtainEngineInterface");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (engine_)
+ return true;
+ // Get access to (or create if not already existing) the global OpenSL Engine
+ // object.
+ SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+ if (engine_object == nullptr) {
+ ALOGE("Failed to access the global OpenSL engine");
+ return false;
+ }
+ // Get the SL Engine Interface which is implicit.
+ RETURN_ON_ERROR(
+ (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine_),
+ false);
+ return true;
+}
+
+bool OpenSLESPlayer::CreateMix() {
+ ALOGD("CreateMix");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(engine_);
+ if (output_mix_.Get())
+ return true;
+
+ // Create the ouput mix on the engine object. No interfaces will be used.
+ RETURN_ON_ERROR((*engine_)->CreateOutputMix(engine_, output_mix_.Receive(), 0,
+ nullptr, nullptr),
+ false);
+ RETURN_ON_ERROR(output_mix_->Realize(output_mix_.Get(), SL_BOOLEAN_FALSE),
+ false);
+ return true;
+}
+
+void OpenSLESPlayer::DestroyMix() {
+ ALOGD("DestroyMix");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!output_mix_.Get())
+ return;
+ output_mix_.Reset();
+}
+
+bool OpenSLESPlayer::CreateAudioPlayer() {
+ ALOGD("CreateAudioPlayer");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(output_mix_.Get());
+ if (player_object_.Get())
+ return true;
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
+
+ // source: Android Simple Buffer Queue Data Locator is source.
+ SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast(kNumOfOpenSLESBuffers)};
+ SLDataSource audio_source = {&simple_buffer_queue, &pcm_format_};
+
+ // sink: OutputMix-based data is sink.
+ SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
+ output_mix_.Get()};
+ SLDataSink audio_sink = {&locator_output_mix, nullptr};
+
+ // Define interfaces that we indend to use and realize.
+ const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
+ SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE};
+
+ // Create the audio player on the engine interface.
+ RETURN_ON_ERROR(
+ (*engine_)->CreateAudioPlayer(
+ engine_, player_object_.Receive(), &audio_source, &audio_sink,
+ arraysize(interface_ids), interface_ids, interface_required),
+ false);
+
+ // Use the Android configuration interface to set platform-specific
+ // parameters. Should be done before player is realized.
+ SLAndroidConfigurationItf player_config;
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION, &player_config),
+ false);
+ // Set audio player configuration to SL_ANDROID_STREAM_VOICE which
+ // corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
+ SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ RETURN_ON_ERROR(
+ (*player_config)
+ ->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
+ &stream_type, sizeof(SLint32)),
+ false);
+
+ // Realize the audio player object after configuration has been set.
+ RETURN_ON_ERROR(
+ player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
+
+ // Get the SLPlayItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
+ false);
+
+ // Get the SLAndroidSimpleBufferQueueItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_BUFFERQUEUE,
+ &simple_buffer_queue_),
+ false);
+
+ // Register callback method for the Android Simple Buffer Queue interface.
+ // This method will be called when the native audio layer needs audio data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback, this),
+ false);
+
+ // Get the SLVolumeItf interface on the audio player.
+ RETURN_ON_ERROR(player_object_->GetInterface(player_object_.Get(),
+ SL_IID_VOLUME, &volume_),
+ false);
+
+ // TODO(henrika): might not be required to set volume to max here since it
+ // seems to be default on most devices. Might be required for unit tests.
+ // RETURN_ON_ERROR((*volume_)->SetVolumeLevel(volume_, 0), false);
+
+ return true;
+}
+
+void OpenSLESPlayer::DestroyAudioPlayer() {
+ ALOGD("DestroyAudioPlayer");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!player_object_.Get())
+ return;
+ (*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+ player_object_.Reset();
+ player_ = nullptr;
+ simple_buffer_queue_ = nullptr;
+ volume_ = nullptr;
+}
+
+// static
+void OpenSLESPlayer::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf caller,
+ void* context) {
+ OpenSLESPlayer* stream = reinterpret_cast(context);
+ stream->FillBufferQueue();
+}
+
+void OpenSLESPlayer::FillBufferQueue() {
+ RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
+ SLuint32 state = GetPlayState();
+ if (state != SL_PLAYSTATE_PLAYING) {
+ ALOGW("Buffer callback in non-playing state!");
+ return;
+ }
+ EnqueuePlayoutData(false);
+}
+
+void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
+ // Check delta time between two successive callbacks and provide a warning
+ // if it becomes very large.
+ // TODO(henrika): using 150ms as upper limit but this value is rather random.
+ const uint32_t current_time = rtc::Time();
+ const uint32_t diff = current_time - last_play_time_;
+ if (diff > 150) {
+ ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
+ }
+ last_play_time_ = current_time;
+ SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
+ if (silence) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // Avoid aquiring real audio data from WebRTC and fill the buffer with
+ // zeros instead. Used to prime the buffer with silence and to avoid asking
+ // for audio data from two different threads.
+ memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer());
+ } else {
+ RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
+ // Read audio data from the WebRTC source using the FineAudioBuffer object
+ // to adjust for differences in buffer size between WebRTC (10ms) and native
+ // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
+ // delay estimation.
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::ArrayView(audio_ptr,
+ audio_parameters_.GetBytesPerBuffer()),
+ 25);
+ }
+ // Enqueue the decoded audio buffer for playback.
+ SLresult err = (*simple_buffer_queue_)
+ ->Enqueue(simple_buffer_queue_, audio_ptr,
+ audio_parameters_.GetBytesPerBuffer());
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("Enqueue failed: %d", err);
+ }
+ buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+}
+
+SLuint32 OpenSLESPlayer::GetPlayState() const {
+ RTC_DCHECK(player_);
+ SLuint32 state;
+ SLresult err = (*player_)->GetPlayState(player_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetPlayState failed: %d", err);
+ }
+ return state;
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/opensles_player.h b/sdk/android/src/jni/audio_device/opensles_player.h
new file mode 100644
index 0000000000..e0771b53e1
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_player.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_PLAYER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_PLAYER_H_
+
+#include
+#include
+#include
+
+#include
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+#include "sdk/android/src/jni/audio_device/opensles_common.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+namespace android_adm {
+
+// Implements 16-bit mono PCM audio output support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
+// buffers are requested on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitPlayout() after Stoplayout()
+// to be able to call StartPlayout() again. This is inline with how the Java-
+// based implementation works.
+//
+// OpenSL ES is a native C API which have no Dalvik-related overhead such as
+// garbage collection pauses and it supports reduced audio output latency.
+// If the device doesn't claim this feature but supports API level 9 (Android
+// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
+// the output latency may be higher.
+class OpenSLESPlayer {
+ public:
+ // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+ // required for lower latency. Beginning with API level 18 (Android 4.3), a
+ // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+ // size and sample rate must be compatible with the device's native output
+ // configuration provided via the audio manager at construction.
+ // TODO(henrika): perhaps set this value dynamically based on OS version.
+ static const int kNumOfOpenSLESBuffers = 2;
+
+ explicit OpenSLESPlayer(AudioManager* audio_manager);
+ ~OpenSLESPlayer();
+
+ int Init();
+ int Terminate();
+
+ int InitPlayout();
+ bool PlayoutIsInitialized() const { return initialized_; }
+
+ int StartPlayout();
+ int StopPlayout();
+ bool Playing() const { return playing_; }
+
+ int SpeakerVolumeIsAvailable(bool* available);
+ int SetSpeakerVolume(uint32_t volume);
+ int SpeakerVolume(uint32_t* volume) const;
+ int MaxSpeakerVolume(uint32_t* maxVolume) const;
+ int MinSpeakerVolume(uint32_t* minVolume) const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ // These callback methods are called when data is required for playout.
+ // They are both called from an internal "OpenSL ES thread" which is not
+ // attached to the Dalvik VM.
+ static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+ void* context);
+ void FillBufferQueue();
+ // Reads audio data in PCM format using the AudioDeviceBuffer.
+ // Can be called both on the main thread (during Start()) and from the
+ // internal audio thread while output streaming is active.
+ // If the |silence| flag is set, the audio is filled with zeros instead of
+ // asking the WebRTC layer for real audio data. This procedure is also known
+ // as audio priming.
+ void EnqueuePlayoutData(bool silence);
+
+ // Allocate memory for audio buffers which will be used to render audio
+ // via the SLAndroidSimpleBufferQueueItf interface.
+ void AllocateDataBuffers();
+
+ // Obtaines the SL Engine Interface from the existing global Engine object.
+ // The interface exposes creation methods of all the OpenSL ES object types.
+ // This method defines the |engine_| member variable.
+ bool ObtainEngineInterface();
+
+ // Creates/destroys the output mix object.
+ bool CreateMix();
+ void DestroyMix();
+
+ // Creates/destroys the audio player and the simple-buffer object.
+ // Also creates the volume object.
+ bool CreateAudioPlayer();
+ void DestroyAudioPlayer();
+
+ SLuint32 GetPlayState() const;
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+ // non-application thread which is not attached to the Dalvik JVM.
+ // Detached during construction of this object.
+ rtc::ThreadChecker thread_checker_opensles_;
+
+ // Raw pointer to the audio manager injected at construction. Used to cache
+ // audio parameters and to access the global SL engine object needed by the
+ // ObtainEngineInterface() method. The audio manager outlives any instance of
+ // this class.
+ AudioManager* audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ bool initialized_;
+ bool playing_;
+
+ // PCM-type format definition.
+ // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+ // 32-bit float representation is needed.
+ SLDataFormat_PCM pcm_format_;
+
+ // Queue of audio buffers to be used by the player object for rendering
+ // audio. They will be used in a Round-robin way and the size of each buffer
+ // is given by FineAudioBuffer::RequiredBufferSizeBytes().
+ std::unique_ptr audio_buffers_[kNumOfOpenSLESBuffers];
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples.
+ // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 192
+ // in each callback (one every 4th ms). This class can then ask for 192 and
+ // the FineAudioBuffer will ask WebRTC for new data approximately only every
+ // second callback and also cache non-utilized audio.
+ std::unique_ptr fine_audio_buffer_;
+
+ // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+ // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+ int buffer_index_;
+
+ // This interface exposes creation methods for all the OpenSL ES object types.
+ // It is the OpenSL ES API entry point.
+ SLEngineItf engine_;
+
+ // Output mix object to be used by the player object.
+ ScopedSLObjectItf output_mix_;
+
+ // The audio player media object plays out audio to the speakers. It also
+ // supports volume control.
+ ScopedSLObjectItf player_object_;
+
+ // This interface is supported on the audio player and it controls the state
+ // of the audio player.
+ SLPlayItf player_;
+
+ // The Android Simple Buffer Queue interface is supported on the audio player
+ // and it provides methods to send audio data from the source to the audio
+ // player for rendering.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ // This interface exposes controls for manipulating the object’s audio volume
+ // properties. This interface is supported on the Audio Player object.
+ SLVolumeItf volume_;
+
+ // Last time the OpenSL ES layer asked for audio data to play out.
+ uint32_t last_play_time_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_PLAYER_H_
diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.cc b/sdk/android/src/jni/audio_device/opensles_recorder.cc
new file mode 100644
index 0000000000..270d18d7aa
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_recorder.cc
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/android/src/jni/audio_device/opensles_recorder.h"
+
+#include
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+
+#define TAG "OpenSLESRecorder"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define LOG_ON_ERROR(op) \
+ [](SLresult err) { \
+ if (err != SL_RESULT_SUCCESS) { \
+ ALOGE("%s:%d %s failed: %s", __FILE__, __LINE__, #op, \
+ GetSLErrorString(err)); \
+ return true; \
+ } \
+ return false; \
+ }(op)
+
+namespace webrtc {
+
+namespace android_adm {
+
+OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager)
+ : audio_manager_(audio_manager),
+ audio_parameters_(audio_manager->GetRecordAudioParameters()),
+ audio_device_buffer_(nullptr),
+ initialized_(false),
+ recording_(false),
+ engine_(nullptr),
+ recorder_(nullptr),
+ simple_buffer_queue_(nullptr),
+ buffer_index_(0),
+ last_rec_time_(0) {
+ ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the internal audio thread.
+ thread_checker_opensles_.DetachFromThread();
+ // Use native audio output parameters provided by the audio manager and
+ // define the PCM format structure.
+ pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+ audio_parameters_.sample_rate(),
+ audio_parameters_.bits_per_sample());
+}
+
+OpenSLESRecorder::~OpenSLESRecorder() {
+ ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+ DestroyAudioRecorder();
+ engine_ = nullptr;
+ RTC_DCHECK(!engine_);
+ RTC_DCHECK(!recorder_);
+ RTC_DCHECK(!simple_buffer_queue_);
+}
+
+int OpenSLESRecorder::Init() {
+ ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_parameters_.channels() == 2) {
+ // TODO(henrika): FineAudioBuffer needs more work to support stereo.
+ ALOGE("OpenSLESRecorder does not support stereo");
+ return -1;
+ }
+ return 0;
+}
+
+int OpenSLESRecorder::Terminate() {
+ ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ StopRecording();
+ return 0;
+}
+
+int OpenSLESRecorder::InitRecording() {
+ ALOGD("InitRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!recording_);
+ if (!ObtainEngineInterface()) {
+ ALOGE("Failed to obtain SL Engine interface");
+ return -1;
+ }
+ CreateAudioRecorder();
+ initialized_ = true;
+ buffer_index_ = 0;
+ return 0;
+}
+
+int OpenSLESRecorder::StartRecording() {
+ ALOGD("StartRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!recording_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetRecord();
+ }
+ // Add buffers to the queue before changing state to SL_RECORDSTATE_RECORDING
+ // to ensure that recording starts as soon as the state is modified. On some
+ // devices, SLAndroidSimpleBufferQueue::Clear() used in Stop() does not flush
+ // the buffers as intended and we therefore check the number of buffers
+ // already queued first. Enqueue() can return SL_RESULT_BUFFER_INSUFFICIENT
+ // otherwise.
+ int num_buffers_in_queue = GetBufferCount();
+ for (int i = 0; i < kNumOfOpenSLESBuffers - num_buffers_in_queue; ++i) {
+ if (!EnqueueAudioBuffer()) {
+ recording_ = false;
+ return -1;
+ }
+ }
+ num_buffers_in_queue = GetBufferCount();
+ RTC_DCHECK_EQ(num_buffers_in_queue, kNumOfOpenSLESBuffers);
+ LogBufferState();
+ // Start audio recording by changing the state to SL_RECORDSTATE_RECORDING.
+ // Given that buffers are already enqueued, recording should start at once.
+ // The macro returns -1 if recording fails to start.
+ last_rec_time_ = rtc::Time();
+ if (LOG_ON_ERROR(
+ (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING))) {
+ return -1;
+ }
+ recording_ = (GetRecordState() == SL_RECORDSTATE_RECORDING);
+ RTC_DCHECK(recording_);
+ return 0;
+}
+
+int OpenSLESRecorder::StopRecording() {
+ ALOGD("StopRecording[tid=%d]", rtc::CurrentThreadId());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !recording_) {
+ return 0;
+ }
+ // Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
+ if (LOG_ON_ERROR(
+ (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED))) {
+ return -1;
+ }
+ // Clear the buffer queue to get rid of old data when resuming recording.
+ if (LOG_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_))) {
+ return -1;
+ }
+ thread_checker_opensles_.DetachFromThread();
+ initialized_ = false;
+ recording_ = false;
+ return 0;
+}
+
+void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ ALOGD("AttachAudioBuffer");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_CHECK(audio_buffer);
+ audio_device_buffer_ = audio_buffer;
+ // Ensure that the audio device buffer is informed about the native sample
+ // rate used on the recording side.
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
+ audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+ // Ensure that the audio device buffer is informed about the number of
+ // channels preferred by the OS on the recording side.
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetRecordingChannels(%" PRIuS ")", channels);
+ audio_device_buffer_->SetRecordingChannels(channels);
+ // Allocated memory for internal data buffers given existing audio parameters.
+ AllocateDataBuffers();
+}
+
+int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
+ ALOGD("EnableBuiltInAEC(%d)", enable);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInAGC(bool enable) {
+ ALOGD("EnableBuiltInAGC(%d)", enable);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInNS(bool enable) {
+ ALOGD("EnableBuiltInNS(%d)", enable);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ ALOGE("Not implemented");
+ return 0;
+}
+
+bool OpenSLESRecorder::ObtainEngineInterface() {
+ ALOGD("ObtainEngineInterface");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (engine_)
+ return true;
+ // Get access to (or create if not already existing) the global OpenSL Engine
+ // object.
+ SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+ if (engine_object == nullptr) {
+ ALOGE("Failed to access the global OpenSL engine");
+ return false;
+ }
+ // Get the SL Engine Interface which is implicit.
+ if (LOG_ON_ERROR(
+ (*engine_object)
+ ->GetInterface(engine_object, SL_IID_ENGINE, &engine_))) {
+ return false;
+ }
+ return true;
+}
+
+bool OpenSLESRecorder::CreateAudioRecorder() {
+ ALOGD("CreateAudioRecorder");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (recorder_object_.Get())
+ return true;
+ RTC_DCHECK(!recorder_);
+ RTC_DCHECK(!simple_buffer_queue_);
+
+ // Audio source configuration.
+ SLDataLocator_IODevice mic_locator = {SL_DATALOCATOR_IODEVICE,
+ SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
+ SLDataSource audio_source = {&mic_locator, NULL};
+
+ // Audio sink configuration.
+ SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast(kNumOfOpenSLESBuffers)};
+ SLDataSink audio_sink = {&buffer_queue, &pcm_format_};
+
+ // Create the audio recorder object (requires the RECORD_AUDIO permission).
+ // Do not realize the recorder yet. Set the configuration first.
+ const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ SL_IID_ANDROIDCONFIGURATION};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+ if (LOG_ON_ERROR((*engine_)->CreateAudioRecorder(
+ engine_, recorder_object_.Receive(), &audio_source, &audio_sink,
+ arraysize(interface_id), interface_id, interface_required))) {
+ return false;
+ }
+
+ // Configure the audio recorder (before it is realized).
+ SLAndroidConfigurationItf recorder_config;
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(recorder_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION,
+ &recorder_config)))) {
+ return false;
+ }
+
+ // Uses the default microphone tuned for audio communication.
+ // Note that, SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION leads to a fast
+ // track but also excludes usage of required effects like AEC, AGC and NS.
+ // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION
+ SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+ if (LOG_ON_ERROR(((*recorder_config)
+ ->SetConfiguration(recorder_config,
+ SL_ANDROID_KEY_RECORDING_PRESET,
+ &stream_type, sizeof(SLint32))))) {
+ return false;
+ }
+
+ // The audio recorder can now be realized (in synchronous mode).
+ if (LOG_ON_ERROR((recorder_object_->Realize(recorder_object_.Get(),
+ SL_BOOLEAN_FALSE)))) {
+ return false;
+ }
+
+ // Get the implicit recorder interface (SL_IID_RECORD).
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(
+ recorder_object_.Get(), SL_IID_RECORD, &recorder_)))) {
+ return false;
+ }
+
+ // Get the simple buffer queue interface (SL_IID_ANDROIDSIMPLEBUFFERQUEUE).
+ // It was explicitly requested.
+ if (LOG_ON_ERROR((recorder_object_->GetInterface(
+ recorder_object_.Get(), SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ &simple_buffer_queue_)))) {
+ return false;
+ }
+
+ // Register the input callback for the simple buffer queue.
+ // This callback will be called when receiving new data from the device.
+ if (LOG_ON_ERROR(((*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback, this)))) {
+ return false;
+ }
+ return true;
+}
+
+void OpenSLESRecorder::DestroyAudioRecorder() {
+ ALOGD("DestroyAudioRecorder");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!recorder_object_.Get())
+ return;
+ (*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+ recorder_object_.Reset();
+ recorder_ = nullptr;
+ simple_buffer_queue_ = nullptr;
+}
+
+void OpenSLESRecorder::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* context) {
+ OpenSLESRecorder* stream = static_cast(context);
+ stream->ReadBufferQueue();
+}
+
+void OpenSLESRecorder::AllocateDataBuffers() {
+ ALOGD("AllocateDataBuffers");
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_CHECK(audio_device_buffer_);
+ // Create a modified audio buffer class which allows us to deliver any number
+ // of samples (and not only multiple of 10ms) to match the native audio unit
+ // buffer size.
+ ALOGD("frames per native buffer: %" PRIuS,
+ audio_parameters_.frames_per_buffer());
+ ALOGD("frames per 10ms buffer: %" PRIuS,
+ audio_parameters_.frames_per_10ms_buffer());
+ ALOGD("bytes per native buffer: %" PRIuS,
+ audio_parameters_.GetBytesPerBuffer());
+ ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
+ RTC_DCHECK(audio_device_buffer_);
+ fine_audio_buffer_.reset(
+ new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
+ 2 * audio_parameters_.GetBytesPerBuffer()));
+ // Allocate queue of audio buffers that stores recorded audio samples.
+ const int data_size_bytes = audio_parameters_.GetBytesPerBuffer();
+ audio_buffers_.reset(new std::unique_ptr[kNumOfOpenSLESBuffers]);
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ audio_buffers_[i].reset(new SLint8[data_size_bytes]);
+ }
+}
+
+void OpenSLESRecorder::ReadBufferQueue() {
+ RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
+ SLuint32 state = GetRecordState();
+ if (state != SL_RECORDSTATE_RECORDING) {
+ ALOGW("Buffer callback in non-recording state!");
+ return;
+ }
+ // Check delta time between two successive callbacks and provide a warning
+ // if it becomes very large.
+ // TODO(henrika): using 150ms as upper limit but this value is rather random.
+ const uint32_t current_time = rtc::Time();
+ const uint32_t diff = current_time - last_rec_time_;
+ if (diff > 150) {
+ ALOGW("Bad OpenSL ES record timing, dT=%u [ms]", diff);
+ }
+ last_rec_time_ = current_time;
+ // Send recorded audio data to the WebRTC sink.
+ // TODO(henrika): fix delay estimates. It is OK to use fixed values for now
+ // since there is no support to turn off built-in EC in combination with
+ // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
+ // these estimates) will never be active.
+ const size_t size_in_bytes =
+ static_cast(audio_parameters_.GetBytesPerBuffer());
+ const int8_t* data =
+ static_cast(audio_buffers_[buffer_index_].get());
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::ArrayView(data, size_in_bytes), 25);
+ // Enqueue the utilized audio buffer and use if for recording again.
+ EnqueueAudioBuffer();
+}
+
+bool OpenSLESRecorder::EnqueueAudioBuffer() {
+ SLresult err =
+ (*simple_buffer_queue_)
+ ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(),
+ audio_parameters_.GetBytesPerBuffer());
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("Enqueue failed: %s", GetSLErrorString(err));
+ return false;
+ }
+ buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+ return true;
+}
+
+SLuint32 OpenSLESRecorder::GetRecordState() const {
+ RTC_DCHECK(recorder_);
+ SLuint32 state;
+ SLresult err = (*recorder_)->GetRecordState(recorder_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetRecordState failed: %s", GetSLErrorString(err));
+ }
+ return state;
+}
+
+SLAndroidSimpleBufferQueueState OpenSLESRecorder::GetBufferQueueState() const {
+ RTC_DCHECK(simple_buffer_queue_);
+ // state.count: Number of buffers currently in the queue.
+ // state.index: Index of the currently filling buffer. This is a linear index
+ // that keeps a cumulative count of the number of buffers recorded.
+ SLAndroidSimpleBufferQueueState state;
+ SLresult err =
+ (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetState failed: %s", GetSLErrorString(err));
+ }
+ return state;
+}
+
+void OpenSLESRecorder::LogBufferState() const {
+ SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+ ALOGD("state.count:%d state.index:%d", state.count, state.index);
+}
+
+SLuint32 OpenSLESRecorder::GetBufferCount() {
+ SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+ return state.count;
+}
+
+} // namespace android_adm
+
+} // namespace webrtc
diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.h b/sdk/android/src/jni/audio_device/opensles_recorder.h
new file mode 100644
index 0000000000..5a6712ccaa
--- /dev/null
+++ b/sdk/android/src/jni/audio_device/opensles_recorder.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_RECORDER_H_
+#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_RECORDER_H_
+
+#include
+#include
+#include
+
+#include
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/android/src/jni/audio_device/audio_common.h"
+#include "sdk/android/src/jni/audio_device/audio_manager.h"
+#include "sdk/android/src/jni/audio_device/opensles_common.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+namespace android_adm {
+
+// Implements 16-bit mono PCM audio input support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
+// buffers are provided on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is inline
+// with how the Java-based implementation works.
+//
+// As of API level 21, lower latency audio input is supported on select devices.
+// To take advantage of this feature, first confirm that lower latency output is
+// available. The capability for lower latency output is a prerequisite for the
+// lower latency input feature. Then, create an AudioRecorder with the same
+// sample rate and buffer size as would be used for output. OpenSL ES interfaces
+// for input effects preclude the lower latency path.
+// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
+// for more details.
+class OpenSLESRecorder {
+ public:
+ // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+ // required for lower latency. Beginning with API level 18 (Android 4.3), a
+ // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+ // size and sample rate must be compatible with the device's native input
+ // configuration provided via the audio manager at construction.
+ // TODO(henrika): perhaps set this value dynamically based on OS version.
+ static const int kNumOfOpenSLESBuffers = 2;
+
+ explicit OpenSLESRecorder(AudioManager* audio_manager);
+ ~OpenSLESRecorder();
+
+ int Init();
+ int Terminate();
+
+ int InitRecording();
+ bool RecordingIsInitialized() const { return initialized_; }
+
+ int StartRecording();
+ int StopRecording();
+ bool Recording() const { return recording_; }
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
+
+ // TODO(henrika): add support using OpenSL ES APIs when available.
+ int EnableBuiltInAEC(bool enable);
+ int EnableBuiltInAGC(bool enable);
+ int EnableBuiltInNS(bool enable);
+
+ private:
+ // Obtaines the SL Engine Interface from the existing global Engine object.
+ // The interface exposes creation methods of all the OpenSL ES object types.
+ // This method defines the |engine_| member variable.
+ bool ObtainEngineInterface();
+
+ // Creates/destroys the audio recorder and the simple-buffer queue object.
+ bool CreateAudioRecorder();
+ void DestroyAudioRecorder();
+
+ // Allocate memory for audio buffers which will be used to capture audio
+ // via the SLAndroidSimpleBufferQueueItf interface.
+ void AllocateDataBuffers();
+
+ // These callback methods are called when data has been written to the input
+ // buffer queue. They are both called from an internal "OpenSL ES thread"
+ // which is not attached to the Dalvik VM.
+ static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+ void* context);
+ void ReadBufferQueue();
+
+ // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
+ // called both on the main thread (but before recording has started) and from
+ // the internal audio thread while input streaming is active. It uses
+ // |simple_buffer_queue_| but no lock is needed since the initial calls from
+ // the main thread and the native callback thread are mutually exclusive.
+ bool EnqueueAudioBuffer();
+
+ // Returns the current recorder state.
+ SLuint32 GetRecordState() const;
+
+ // Returns the current buffer queue state.
+ SLAndroidSimpleBufferQueueState GetBufferQueueState() const;
+
+ // Number of buffers currently in the queue.
+ SLuint32 GetBufferCount();
+
+ // Prints a log message of the current queue state. Can be used for debugging
+ // purposes.
+ void LogBufferState() const;
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+ // non-application thread which is not attached to the Dalvik JVM.
+ // Detached during construction of this object.
+ rtc::ThreadChecker thread_checker_opensles_;
+
+ // Raw pointer to the audio manager injected at construction. Used to cache
+ // audio parameters and to access the global SL engine object needed by the
+ // ObtainEngineInterface() method. The audio manager outlives any instance of
+ // this class.
+ AudioManager* const audio_manager_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ // PCM-type format definition.
+ // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+ // 32-bit float representation is needed.
+ SLDataFormat_PCM pcm_format_;
+
+ bool initialized_;
+ bool recording_;
+
+ // This interface exposes creation methods for all the OpenSL ES object types.
+ // It is the OpenSL ES API entry point.
+ SLEngineItf engine_;
+
+ // The audio recorder media object records audio to the destination specified
+ // by the data sink capturing it from the input specified by the data source.
+ ScopedSLObjectItf recorder_object_;
+
+ // This interface is supported on the audio recorder object and it controls
+ // the state of the audio recorder.
+ SLRecordItf recorder_;
+
+ // The Android Simple Buffer Queue interface is supported on the audio
+ // recorder. For recording, an app should enqueue empty buffers. When a
+ // registered callback sends notification that the system has finished writing
+ // data to the buffer, the app can read the buffer.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+ // chunks of audio.
+ std::unique_ptr fine_audio_buffer_;
+
+ // Queue of audio buffers to be used by the recorder object for capturing
+ // audio. They will be used in a Round-robin way and the size of each buffer
+ // is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to
+ // the native OpenSL ES buffer size.
+ std::unique_ptr[]> audio_buffers_;
+
+ // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+ // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+ int buffer_index_;
+
+ // Last time the OpenSL ES layer delivered recorded audio data.
+ uint32_t last_rec_time_;
+};
+
+} // namespace android_adm
+
+} // namespace webrtc
+
+#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_OPENSLES_RECORDER_H_
diff --git a/sdk/android/src/jni/pc/peerconnectionfactory.cc b/sdk/android/src/jni/pc/peerconnectionfactory.cc
index b251a17bfb..cf6459de5c 100644
--- a/sdk/android/src/jni/pc/peerconnectionfactory.cc
+++ b/sdk/android/src/jni/pc/peerconnectionfactory.cc
@@ -26,6 +26,7 @@
#include "rtc_base/stringutils.h"
#include "rtc_base/thread.h"
#include "sdk/android/generated_peerconnection_jni/jni/PeerConnectionFactory_jni.h"
+#include "sdk/android/native_api/audio_device_module/audio_device_android.h"
#include "sdk/android/native_api/jni/java_types.h"
#include "sdk/android/src/jni/jni_helpers.h"
#include "sdk/android/src/jni/pc/androidnetworkmonitor.h"
@@ -77,6 +78,9 @@ static char* field_trials_init_string = nullptr;
static bool factory_static_initialized = false;
static bool video_hw_acceleration_enabled = true;
+static const char* kExternalAndroidAudioDeviceFieldTrialName =
+ "WebRTC-ExternalAndroidAudioDevice";
+
void PeerConnectionFactoryNetworkThreadReady() {
RTC_LOG(LS_INFO) << "Network thread JavaCallback";
JNIEnv* env = AttachCurrentThreadIfNeeded();
@@ -211,7 +215,10 @@ jlong CreatePeerConnectionFactoryForJava(
rtc::NetworkMonitorFactory::SetFactory(network_monitor_factory);
}
- AudioDeviceModule* adm = nullptr;
+ rtc::scoped_refptr adm =
+ field_trial::IsEnabled(kExternalAndroidAudioDeviceFieldTrialName)
+ ? CreateAndroidAudioDeviceModule()
+ : nullptr;
rtc::scoped_refptr audio_mixer = nullptr;
std::unique_ptr call_factory(CreateCallFactory());
std::unique_ptr rtc_event_log_factory(
@@ -259,9 +266,8 @@ jlong CreatePeerConnectionFactoryForJava(
CreateVideoDecoderFactory(jni, jdecoder_factory));
}
- rtc::scoped_refptr adm_scoped = nullptr;
media_engine.reset(CreateMediaEngine(
- adm_scoped, audio_encoder_factory, audio_decoder_factory,
+ adm, audio_encoder_factory, audio_decoder_factory,
std::move(video_encoder_factory), std::move(video_decoder_factory),
audio_mixer, audio_processor));
}