Move android audio device code into sdk/android

This CL adds a stand-alone Android AudioDeviceModule in the
sdk/android folder. It's forked from modules/audio_device/android/
and then simplified for the Android case. The stand-alone Android
ADM is available both in the native_api and also under a field trial
in the Java API.

Bug: webrtc:7452
Change-Id: If6e558026bd0ccb52f56d78ac833339a5789d300
Reviewed-on: https://webrtc-review.googlesource.com/60541
Commit-Queue: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22517}
This commit is contained in:
Paulina Hensman
2018-03-20 15:27:52 +01:00
committed by Commit Bot
parent 4d22a6d8db
commit 89dd7bf924
37 changed files with 7491 additions and 39 deletions

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import android.os.Build;
public final class BuildInfo {
public static String getDevice() {
return Build.DEVICE;
}
public static String getDeviceModel() {
return Build.MODEL;
}
public static String getProduct() {
return Build.PRODUCT;
}
public static String getBrand() {
return Build.BRAND;
}
public static String getDeviceManufacturer() {
return Build.MANUFACTURER;
}
public static String getAndroidBuildId() {
return Build.ID;
}
public static String getBuildType() {
return Build.TYPE;
}
public static String getBuildRelease() {
return Build.VERSION.RELEASE;
}
public static int getSdkVersion() {
return Build.VERSION.SDK_INT;
}
}

View File

@ -0,0 +1,323 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import android.annotation.TargetApi;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
import android.media.audiofx.AutomaticGainControl;
import android.media.audiofx.NoiseSuppressor;
import android.os.Build;
import java.util.List;
import java.util.UUID;
import org.webrtc.Logging;
// This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
// Calling enable() will active all effects that are
// supported by the device if the corresponding |shouldEnableXXX| member is set.
class WebRtcAudioEffects {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioEffects";
// UUIDs for Software Audio Effects that we want to avoid using.
// The implementor field will be set to "The Android Open Source Project".
private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
private static final UUID AOSP_NOISE_SUPPRESSOR =
UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
// Contains the available effect descriptors returned from the
// AudioEffect.getEffects() call. This result is cached to avoid doing the
// slow OS call multiple times.
private static Descriptor[] cachedEffects = null;
// Contains the audio effect objects. Created in enable() and destroyed
// in release().
private AcousticEchoCanceler aec = null;
private NoiseSuppressor ns = null;
// Affects the final state given to the setEnabled() method on each effect.
// The default state is set to "disabled" but each effect can also be enabled
// by calling setAEC() and setNS().
// To enable an effect, both the shouldEnableXXX member and the static
// canUseXXX() must be true.
private boolean shouldEnableAec = false;
private boolean shouldEnableNs = false;
// Checks if the device implements Acoustic Echo Cancellation (AEC).
// Returns true if the device implements AEC, false otherwise.
public static boolean isAcousticEchoCancelerSupported() {
// Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
// AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
// OS API call.
return isAcousticEchoCancelerEffectAvailable();
}
// Checks if the device implements Noise Suppression (NS).
// Returns true if the device implements NS, false otherwise.
public static boolean isNoiseSuppressorSupported() {
// Note: we're using isNoiseSuppressorEffectAvailable() instead of
// NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
// OS API call.
return isNoiseSuppressorEffectAvailable();
}
// Returns true if the device is blacklisted for HW AEC usage.
public static boolean isAcousticEchoCancelerBlacklisted() {
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
}
return isBlacklisted;
}
// Returns true if the device is blacklisted for HW NS usage.
public static boolean isNoiseSuppressorBlacklisted() {
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
}
return isBlacklisted;
}
// Returns true if the platform AEC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
@TargetApi(18)
private static boolean isAcousticEchoCancelerExcludedByUUID() {
for (Descriptor d : getAvailableEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
&& d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
return true;
}
}
return false;
}
// Returns true if the platform NS should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
@TargetApi(18)
private static boolean isNoiseSuppressorExcludedByUUID() {
for (Descriptor d : getAvailableEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
return true;
}
}
return false;
}
// Returns true if the device supports Acoustic Echo Cancellation (AEC).
@TargetApi(18)
private static boolean isAcousticEchoCancelerEffectAvailable() {
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
}
// Returns true if the device supports Noise Suppression (NS).
@TargetApi(18)
private static boolean isNoiseSuppressorEffectAvailable() {
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
}
// Returns true if all conditions for supporting the HW AEC are fulfilled.
// It will not be possible to enable the HW AEC if this method returns false.
public static boolean canUseAcousticEchoCanceler() {
boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
&& !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
&& !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
return canUseAcousticEchoCanceler;
}
// Returns true if all conditions for supporting the HW NS are fulfilled.
// It will not be possible to enable the HW NS if this method returns false.
public static boolean canUseNoiseSuppressor() {
boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
&& !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
&& !isNoiseSuppressorExcludedByUUID();
Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
return canUseNoiseSuppressor;
}
public static WebRtcAudioEffects create() {
return new WebRtcAudioEffects();
}
private WebRtcAudioEffects() {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
}
// Call this method to enable or disable the platform AEC. It modifies
// |shouldEnableAec| which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise.
public boolean setAEC(boolean enable) {
Logging.d(TAG, "setAEC(" + enable + ")");
if (!canUseAcousticEchoCanceler()) {
Logging.w(TAG, "Platform AEC is not supported");
shouldEnableAec = false;
return false;
}
if (aec != null && (enable != shouldEnableAec)) {
Logging.e(TAG, "Platform AEC state can't be modified while recording");
return false;
}
shouldEnableAec = enable;
return true;
}
// Call this method to enable or disable the platform NS. It modifies
// |shouldEnableNs| which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise.
public boolean setNS(boolean enable) {
Logging.d(TAG, "setNS(" + enable + ")");
if (!canUseNoiseSuppressor()) {
Logging.w(TAG, "Platform NS is not supported");
shouldEnableNs = false;
return false;
}
if (ns != null && (enable != shouldEnableNs)) {
Logging.e(TAG, "Platform NS state can't be modified while recording");
return false;
}
shouldEnableNs = enable;
return true;
}
public void enable(int audioSession) {
Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
assertTrue(aec == null);
assertTrue(ns == null);
if (DEBUG) {
// Add logging of supported effects but filter out "VoIP effects", i.e.,
// AEC, AEC and NS. Avoid calling AudioEffect.queryEffects() unless the
// DEBUG flag is set since we have seen crashes in this API.
for (Descriptor d : AudioEffect.queryEffects()) {
if (effectTypeIsVoIP(d.type)) {
Logging.d(TAG,
"name: " + d.name + ", "
+ "mode: " + d.connectMode + ", "
+ "implementor: " + d.implementor + ", "
+ "UUID: " + d.uuid);
}
}
}
if (isAcousticEchoCancelerSupported()) {
// Create an AcousticEchoCanceler and attach it to the AudioRecord on
// the specified audio session.
aec = AcousticEchoCanceler.create(audioSession);
if (aec != null) {
boolean enabled = aec.getEnabled();
boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
}
Logging.d(TAG,
"AcousticEchoCanceler: was " + (enabled ? "enabled" : "disabled") + ", enable: "
+ enable + ", is now: " + (aec.getEnabled() ? "enabled" : "disabled"));
} else {
Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
}
}
if (isNoiseSuppressorSupported()) {
// Create an NoiseSuppressor and attach it to the AudioRecord on the
// specified audio session.
ns = NoiseSuppressor.create(audioSession);
if (ns != null) {
boolean enabled = ns.getEnabled();
boolean enable = shouldEnableNs && canUseNoiseSuppressor();
if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
Logging.e(TAG, "Failed to set the NoiseSuppressor state");
}
Logging.d(TAG,
"NoiseSuppressor: was " + (enabled ? "enabled" : "disabled") + ", enable: " + enable
+ ", is now: " + (ns.getEnabled() ? "enabled" : "disabled"));
} else {
Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
}
}
}
// Releases all native audio effect resources. It is a good practice to
// release the effect engine when not in use as control can be returned
// to other applications or the native resources released.
public void release() {
Logging.d(TAG, "release");
if (aec != null) {
aec.release();
aec = null;
}
if (ns != null) {
ns.release();
ns = null;
}
}
// Returns true for effect types in |type| that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS). Note that, an extra check for support is needed
// in each comparison since some devices includes effects in the
// AudioEffect.Descriptor array that are actually not available on the device.
// As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
// AutomaticGainControl.isAvailable() returns false.
@TargetApi(18)
private boolean effectTypeIsVoIP(UUID type) {
if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher())
return false;
return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported())
|| (AudioEffect.EFFECT_TYPE_NS.equals(type) && isNoiseSuppressorSupported());
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
// Returns the cached copy of the audio effects array, if available, or
// queries the operating system for the list of effects.
private static Descriptor[] getAvailableEffects() {
if (cachedEffects != null) {
return cachedEffects;
}
// The caching is best effort only - if this method is called from several
// threads in parallel, they may end up doing the underlying OS call
// multiple times. It's normally only called on one thread so there's no
// real need to optimize for the multiple threads case.
cachedEffects = AudioEffect.queryEffects();
return cachedEffects;
}
// Returns true if an effect of the specified type is available. Functionally
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
// faster as it avoids the expensive OS call to enumerate effects.
private static boolean isEffectTypeAvailable(UUID effectType) {
Descriptor[] effects = getAvailableEffects();
if (effects == null) {
return false;
}
for (Descriptor d : effects) {
if (d.type.equals(effectType)) {
return true;
}
}
return false;
}
}

View File

@ -0,0 +1,383 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.os.Build;
import java.util.Timer;
import java.util.TimerTask;
import org.webrtc.ContextUtils;
import org.webrtc.Logging;
// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
// At construction, storeAudioParameters() is called and it retrieves
// fundamental audio parameters like native sample rate and number of channels.
// The result is then provided to the caller by nativeCacheAudioParameters().
// It is also possible to call init() to set up the audio environment for best
// possible "VoIP performance". All settings done in init() are reverted by
// dispose(). This class can also be used without calling init() if the user
// prefers to set up the audio environment separately. However, it is
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
class WebRtcAudioManager {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioManager";
// TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
// been completed. Goal is to always return false on Android O MR1 and higher.
private static final boolean blacklistDeviceForAAudioUsage = true;
// Use mono as default for both audio directions.
private static boolean useStereoOutput = false;
private static boolean useStereoInput = false;
private static boolean blacklistDeviceForOpenSLESUsage = false;
private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false;
// Call this method to override the default list of blacklisted devices
// specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
// Allows an app to take control over which devices to exclude from using
// the OpenSL ES audio output path
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
blacklistDeviceForOpenSLESUsageIsOverridden = true;
blacklistDeviceForOpenSLESUsage = enable;
}
// Call these methods to override the default mono audio modes for the specified direction(s)
// (input and/or output).
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setStereoOutput(boolean enable) {
Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
useStereoOutput = enable;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setStereoInput(boolean enable) {
Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
useStereoInput = enable;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean getStereoOutput() {
return useStereoOutput;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean getStereoInput() {
return useStereoInput;
}
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
private static final int DEFAULT_FRAME_PER_BUFFER = 256;
// Private utility class that periodically checks and logs the volume level
// of the audio stream that is currently controlled by the volume control.
// A timer triggers logs once every 30 seconds and the timer's associated
// thread is named "WebRtcVolumeLevelLoggerThread".
private static class VolumeLogger {
private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
private static final int TIMER_PERIOD_IN_SECONDS = 30;
private final AudioManager audioManager;
private Timer timer;
public VolumeLogger(AudioManager audioManager) {
this.audioManager = audioManager;
}
public void start() {
timer = new Timer(THREAD_NAME);
timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
0, TIMER_PERIOD_IN_SECONDS * 1000);
}
private class LogVolumeTask extends TimerTask {
private final int maxRingVolume;
private final int maxVoiceCallVolume;
LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
this.maxRingVolume = maxRingVolume;
this.maxVoiceCallVolume = maxVoiceCallVolume;
}
@Override
public void run() {
final int mode = audioManager.getMode();
if (mode == AudioManager.MODE_RINGTONE) {
Logging.d(TAG,
"STREAM_RING stream volume: " + audioManager.getStreamVolume(AudioManager.STREAM_RING)
+ " (max=" + maxRingVolume + ")");
} else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
Logging.d(TAG,
"VOICE_CALL stream volume: "
+ audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL)
+ " (max=" + maxVoiceCallVolume + ")");
}
}
}
private void stop() {
if (timer != null) {
timer.cancel();
timer = null;
}
}
}
private final long nativeAudioManager;
private final AudioManager audioManager;
private boolean initialized = false;
private int nativeSampleRate;
private int nativeChannels;
private boolean hardwareAEC;
private boolean hardwareAGC;
private boolean hardwareNS;
private boolean lowLatencyOutput;
private boolean lowLatencyInput;
private boolean proAudio;
private boolean aAudio;
private int sampleRate;
private int outputChannels;
private int inputChannels;
private int outputBufferSize;
private int inputBufferSize;
private final VolumeLogger volumeLogger;
WebRtcAudioManager(long nativeAudioManager) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.nativeAudioManager = nativeAudioManager;
audioManager =
(AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
volumeLogger = new VolumeLogger(audioManager);
storeAudioParameters();
nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
inputBufferSize, nativeAudioManager);
WebRtcAudioUtils.logAudioState(TAG);
}
private boolean init() {
Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
if (initialized) {
return true;
}
Logging.d(TAG, "audio mode is: " + WebRtcAudioUtils.modeToString(audioManager.getMode()));
initialized = true;
volumeLogger.start();
return true;
}
private void dispose() {
Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
if (!initialized) {
return;
}
volumeLogger.stop();
}
private boolean isCommunicationModeEnabled() {
return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
}
private boolean isDeviceBlacklistedForOpenSLESUsage() {
boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
? blacklistDeviceForOpenSLESUsage
: WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
if (blacklisted) {
Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
}
return blacklisted;
}
private void storeAudioParameters() {
outputChannels = getStereoOutput() ? 2 : 1;
inputChannels = getStereoInput() ? 2 : 1;
sampleRate = getNativeOutputSampleRate();
hardwareAEC = isAcousticEchoCancelerSupported();
// TODO(henrika): use of hardware AGC is no longer supported. Currently
// hardcoded to false. To be removed.
hardwareAGC = false;
hardwareNS = isNoiseSuppressorSupported();
lowLatencyOutput = isLowLatencyOutputSupported();
lowLatencyInput = isLowLatencyInputSupported();
proAudio = isProAudioSupported();
aAudio = isAAudioSupported();
outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
: getMinOutputFrameSize(sampleRate, outputChannels);
inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
: getMinInputFrameSize(sampleRate, inputChannels);
}
// Gets the current earpiece state.
private boolean hasEarpiece() {
return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
PackageManager.FEATURE_TELEPHONY);
}
// Returns true if low-latency audio output is supported.
private boolean isLowLatencyOutputSupported() {
return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
PackageManager.FEATURE_AUDIO_LOW_LATENCY);
}
// Returns true if low-latency audio input is supported.
// TODO(henrika): remove the hardcoded false return value when OpenSL ES
// input performance has been evaluated and tested more.
public boolean isLowLatencyInputSupported() {
// TODO(henrika): investigate if some sort of device list is needed here
// as well. The NDK doc states that: "As of API level 21, lower latency
// audio input is supported on select devices. To take advantage of this
// feature, first confirm that lower latency output is available".
return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported();
}
// Returns true if the device has professional audio level of functionality
// and therefore supports the lowest possible round-trip latency.
@TargetApi(23)
private boolean isProAudioSupported() {
return WebRtcAudioUtils.runningOnMarshmallowOrHigher()
&& ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
PackageManager.FEATURE_AUDIO_PRO);
}
// AAudio is supported on Androio Oreo MR1 (API 27) and higher.
// TODO(bugs.webrtc.org/8914): currently disabled by default.
private boolean isAAudioSupported() {
if (blacklistDeviceForAAudioUsage) {
Logging.w(TAG, "AAudio support is currently disabled on all devices!");
}
return !blacklistDeviceForAAudioUsage && WebRtcAudioUtils.runningOnOreoMR1OrHigher();
}
// Returns the native output sample rate for this device's output stream.
private int getNativeOutputSampleRate() {
// Override this if we're running on an old emulator image which only
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
if (WebRtcAudioUtils.runningOnEmulator()) {
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
return 8000;
}
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
// If so, use that value and return here.
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
Logging.d(TAG,
"Default sample rate is overriden to " + WebRtcAudioUtils.getDefaultSampleRateHz()
+ " Hz");
return WebRtcAudioUtils.getDefaultSampleRateHz();
}
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
} else {
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
@TargetApi(17)
private int getSampleRateOnJellyBeanMR10OrHigher() {
String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
: Integer.parseInt(sampleRateString);
}
// Returns the native output buffer size for low-latency output streams.
@TargetApi(17)
private int getLowLatencyOutputFramesPerBuffer() {
assertTrue(isLowLatencyOutputSupported());
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
return DEFAULT_FRAME_PER_BUFFER;
}
String framesPerBuffer =
audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
}
// Returns true if the device supports an audio effect (AEC or NS).
// Four conditions must be fulfilled if functions are to return true:
// 1) the platform must support the built-in (HW) effect,
// 2) explicit use (override) of a WebRTC based version must not be set,
// 3) the device must not be blacklisted for use of the effect, and
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
private static boolean isAcousticEchoCancelerSupported() {
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
}
private static boolean isNoiseSuppressorSupported() {
return WebRtcAudioEffects.canUseNoiseSuppressor();
}
// Returns the minimum output buffer size for Java based audio (AudioTrack).
// This size can also be used for OpenSL ES implementations on devices that
// lacks support of low-latency output.
private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
return AudioTrack.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
// Returns the native input buffer size for input streams.
private int getLowLatencyInputFramesPerBuffer() {
assertTrue(isLowLatencyInputSupported());
return getLowLatencyOutputFramesPerBuffer();
}
// Returns the minimum input buffer size for Java based audio (AudioRecord).
// This size can calso be used for OpenSL ES implementations on devices that
// lacks support of low-latency input.
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
return AudioRecord.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
int outputBufferSize, int inputBufferSize, long nativeAudioManager);
}

View File

@ -0,0 +1,397 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import android.annotation.TargetApi;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder.AudioSource;
import android.os.Process;
import java.lang.System;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import org.webrtc.Logging;
import org.webrtc.ThreadUtils;
import org.webrtc.audio.AudioDeviceModule.AudioRecordErrorCallback;
import org.webrtc.audio.AudioDeviceModule.AudioRecordStartErrorCode;
class WebRtcAudioRecord {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioRecord";
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
// Requested size of each recorded buffer provided to the client.
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
// Average number of callbacks per second.
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
// We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
// buffer size). The extra space is allocated to guard against glitches under
// high load.
private static final int BUFFER_SIZE_FACTOR = 2;
// The AudioRecordJavaThread is allowed to wait for successful call to join()
// but the wait times out afther this amount of time.
private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
private static int audioSource = DEFAULT_AUDIO_SOURCE;
private final long nativeAudioRecord;
private WebRtcAudioEffects effects = null;
private ByteBuffer byteBuffer;
private AudioRecord audioRecord = null;
private AudioRecordThread audioThread = null;
private static volatile boolean microphoneMute = false;
private byte[] emptyBytes;
private static AudioRecordErrorCallback errorCallback = null;
public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
Logging.d(TAG, "Set error callback");
WebRtcAudioRecord.errorCallback = errorCallback;
}
/**
* Contains audio sample information. Object is passed using {@link
* WebRtcAudioRecord.WebRtcAudioRecordSamplesReadyCallback}
*/
public static class AudioSamples {
/** See {@link AudioRecord#getAudioFormat()} */
private final int audioFormat;
/** See {@link AudioRecord#getChannelCount()} */
private final int channelCount;
/** See {@link AudioRecord#getSampleRate()} */
private final int sampleRate;
private final byte[] data;
private AudioSamples(AudioRecord audioRecord, byte[] data) {
this.audioFormat = audioRecord.getAudioFormat();
this.channelCount = audioRecord.getChannelCount();
this.sampleRate = audioRecord.getSampleRate();
this.data = data;
}
public int getAudioFormat() {
return audioFormat;
}
public int getChannelCount() {
return channelCount;
}
public int getSampleRate() {
return sampleRate;
}
public byte[] getData() {
return data;
}
}
/** Called when new audio samples are ready. This should only be set for debug purposes */
public static interface WebRtcAudioRecordSamplesReadyCallback {
void onWebRtcAudioRecordSamplesReady(AudioSamples samples);
}
private static WebRtcAudioRecordSamplesReadyCallback audioSamplesReadyCallback = null;
public static void setOnAudioSamplesReady(WebRtcAudioRecordSamplesReadyCallback callback) {
audioSamplesReadyCallback = callback;
}
/**
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
* to be recorded. Feeds recorded data to the native counterpart as a
* periodic sequence of callbacks using DataIsRecorded().
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
*/
private class AudioRecordThread extends Thread {
private volatile boolean keepAlive = true;
public AudioRecordThread(String name) {
super(name);
}
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
long lastTime = System.nanoTime();
while (keepAlive) {
int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
if (bytesRead == byteBuffer.capacity()) {
if (microphoneMute) {
byteBuffer.clear();
byteBuffer.put(emptyBytes);
}
// It's possible we've been shut down during the read, and stopRecording() tried and
// failed to join this thread. To be a bit safer, try to avoid calling any native methods
// in case they've been unregistered after stopRecording() returned.
if (keepAlive) {
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
}
if (audioSamplesReadyCallback != null) {
// Copy the entire byte buffer array. Assume that the start of the byteBuffer is
// at index 0.
byte[] data = Arrays.copyOf(byteBuffer.array(), byteBuffer.capacity());
audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
new AudioSamples(audioRecord, data));
}
} else {
String errorMessage = "AudioRecord.read failed: " + bytesRead;
Logging.e(TAG, errorMessage);
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
keepAlive = false;
reportWebRtcAudioRecordError(errorMessage);
}
}
if (DEBUG) {
long nowTime = System.nanoTime();
long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
lastTime = nowTime;
Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
}
}
try {
if (audioRecord != null) {
audioRecord.stop();
}
} catch (IllegalStateException e) {
Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
}
}
// Stops the inner thread loop and also calls AudioRecord.stop().
// Does not block the calling thread.
public void stopThread() {
Logging.d(TAG, "stopThread");
keepAlive = false;
}
}
WebRtcAudioRecord(long nativeAudioRecord) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.nativeAudioRecord = nativeAudioRecord;
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
effects = WebRtcAudioEffects.create();
}
private boolean enableBuiltInAEC(boolean enable) {
Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
if (effects == null) {
Logging.e(TAG, "Built-in AEC is not supported on this platform");
return false;
}
return effects.setAEC(enable);
}
private boolean enableBuiltInNS(boolean enable) {
Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
if (effects == null) {
Logging.e(TAG, "Built-in NS is not supported on this platform");
return false;
}
return effects.setNS(enable);
}
private int initRecording(int sampleRate, int channels) {
Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
if (audioRecord != null) {
reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
return -1;
}
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
emptyBytes = new byte[byteBuffer.capacity()];
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
// Get the minimum buffer size required for the successful creation of
// an AudioRecord object, in byte units.
// Note that this size doesn't guarantee a smooth recording under load.
final int channelConfig = channelCountToConfiguration(channels);
int minBufferSize =
AudioRecord.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
return -1;
}
Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
// Use a larger buffer size than the minimum required when creating the
// AudioRecord instance to ensure smooth recording under load. It has been
// verified that it does not increase the actual recording latency.
int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
audioRecord = new AudioRecord(audioSource, sampleRate, channelConfig,
AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
} catch (IllegalArgumentException e) {
reportWebRtcAudioRecordInitError("AudioRecord ctor error: " + e.getMessage());
releaseAudioResources();
return -1;
}
if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
reportWebRtcAudioRecordInitError("Failed to create a new AudioRecord instance");
releaseAudioResources();
return -1;
}
if (effects != null) {
effects.enable(audioRecord.getAudioSessionId());
}
logMainParameters();
logMainParametersExtended();
return framesPerBuffer;
}
private boolean startRecording() {
Logging.d(TAG, "startRecording");
assertTrue(audioRecord != null);
assertTrue(audioThread == null);
try {
audioRecord.startRecording();
} catch (IllegalStateException e) {
reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
"AudioRecord.startRecording failed: " + e.getMessage());
return false;
}
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
"AudioRecord.startRecording failed - incorrect state :"
+ audioRecord.getRecordingState());
return false;
}
audioThread = new AudioRecordThread("AudioRecordJavaThread");
audioThread.start();
return true;
}
private boolean stopRecording() {
Logging.d(TAG, "stopRecording");
assertTrue(audioThread != null);
audioThread.stopThread();
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
WebRtcAudioUtils.logAudioState(TAG);
}
audioThread = null;
if (effects != null) {
effects.release();
}
releaseAudioResources();
return true;
}
private void logMainParameters() {
Logging.d(TAG,
"AudioRecord: "
+ "session ID: " + audioRecord.getAudioSessionId() + ", "
+ "channels: " + audioRecord.getChannelCount() + ", "
+ "sample rate: " + audioRecord.getSampleRate());
}
@TargetApi(23)
private void logMainParametersExtended() {
if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
Logging.d(TAG,
"AudioRecord: "
// The frame count of the native AudioRecord buffer.
+ "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
}
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
private int channelCountToConfiguration(int channels) {
return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setAudioSource(int source) {
Logging.w(TAG, "Audio source is changed from: " + audioSource + " to " + source);
audioSource = source;
}
private static int getDefaultAudioSource() {
return AudioSource.VOICE_COMMUNICATION;
}
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that
// the microphone is muted.
public static void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
microphoneMute = mute;
}
// Releases the native AudioRecord resources.
private void releaseAudioResources() {
Logging.d(TAG, "releaseAudioResources");
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
}
private void reportWebRtcAudioRecordInitError(String errorMessage) {
Logging.e(TAG, "Init recording error: " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioRecordInitError(errorMessage);
}
}
private void reportWebRtcAudioRecordStartError(
AudioRecordStartErrorCode errorCode, String errorMessage) {
Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
}
}
private void reportWebRtcAudioRecordError(String errorMessage) {
Logging.e(TAG, "Run-time recording error: " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioRecordError(errorMessage);
}
}
}

View File

@ -0,0 +1,494 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
import android.content.Context;
import android.media.AudioAttributes;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.os.Process;
import java.lang.Thread;
import java.nio.ByteBuffer;
import org.webrtc.ContextUtils;
import org.webrtc.Logging;
import org.webrtc.ThreadUtils;
import org.webrtc.audio.AudioDeviceModule.AudioTrackErrorCallback;
import org.webrtc.audio.AudioDeviceModule.AudioTrackStartErrorCode;
class WebRtcAudioTrack {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioTrack";
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
// Requested size of each recorded buffer provided to the client.
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
// Average number of callbacks per second.
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
// The AudioTrackThread is allowed to wait for successful call to join()
// but the wait times out afther this amount of time.
private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
// By default, WebRTC creates audio tracks with a usage attribute
// corresponding to voice communications, such as telephony or VoIP.
private static final int DEFAULT_USAGE = getDefaultUsageAttribute();
private static int usageAttribute = DEFAULT_USAGE;
// This method overrides the default usage attribute and allows the user
// to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
// NOTE: calling this method will most likely break existing VoIP tuning.
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setAudioTrackUsageAttribute(int usage) {
Logging.w(TAG, "Default usage attribute is changed from: " + DEFAULT_USAGE + " to " + usage);
usageAttribute = usage;
}
private static int getDefaultUsageAttribute() {
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
return getDefaultUsageAttributeOnLollipopOrHigher();
} else {
// Not used on SDKs lower than L.
return 0;
}
}
@TargetApi(21)
private static int getDefaultUsageAttributeOnLollipopOrHigher() {
return AudioAttributes.USAGE_VOICE_COMMUNICATION;
}
private final long nativeAudioTrack;
private final AudioManager audioManager;
private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
private ByteBuffer byteBuffer;
private AudioTrack audioTrack = null;
private AudioTrackThread audioThread = null;
// Samples to be played are replaced by zeros if |speakerMute| is set to true.
// Can be used to ensure that the speaker is fully muted.
private static volatile boolean speakerMute = false;
private byte[] emptyBytes;
private static AudioTrackErrorCallback errorCallback = null;
public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
Logging.d(TAG, "Set extended error callback");
WebRtcAudioTrack.errorCallback = errorCallback;
}
/**
* Audio thread which keeps calling AudioTrack.write() to stream audio.
* Data is periodically acquired from the native WebRTC layer using the
* nativeGetPlayoutData callback function.
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
*/
private class AudioTrackThread extends Thread {
private volatile boolean keepAlive = true;
public AudioTrackThread(String name) {
super(name);
}
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
// Fixed size in bytes of each 10ms block of audio data that we ask for
// using callbacks to the native WebRTC client.
final int sizeInBytes = byteBuffer.capacity();
while (keepAlive) {
// Get 10ms of PCM data from the native WebRTC client. Audio data is
// written into the common ByteBuffer using the address that was
// cached at construction.
nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
// Write data until all data has been written to the audio sink.
// Upon return, the buffer position will have been advanced to reflect
// the amount of data that was successfully written to the AudioTrack.
assertTrue(sizeInBytes <= byteBuffer.remaining());
if (speakerMute) {
byteBuffer.clear();
byteBuffer.put(emptyBytes);
byteBuffer.position(0);
}
int bytesWritten = 0;
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes);
} else {
bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes);
}
if (bytesWritten != sizeInBytes) {
Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
// If a write() returns a negative value, an error has occurred.
// Stop playing and report an error in this case.
if (bytesWritten < 0) {
keepAlive = false;
reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
}
}
// The byte buffer must be rewinded since byteBuffer.position() is
// increased at each call to AudioTrack.write(). If we don't do this,
// next call to AudioTrack.write() will fail.
byteBuffer.rewind();
// TODO(henrika): it is possible to create a delay estimate here by
// counting number of written frames and subtracting the result from
// audioTrack.getPlaybackHeadPosition().
}
// Stops playing the audio data. Since the instance was created in
// MODE_STREAM mode, audio will stop playing after the last buffer that
// was written has been played.
if (audioTrack != null) {
Logging.d(TAG, "Calling AudioTrack.stop...");
try {
audioTrack.stop();
Logging.d(TAG, "AudioTrack.stop is done.");
} catch (IllegalStateException e) {
Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
}
}
}
@TargetApi(21)
private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
}
private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
}
// Stops the inner thread loop which results in calling AudioTrack.stop().
// Does not block the calling thread.
public void stopThread() {
Logging.d(TAG, "stopThread");
keepAlive = false;
}
}
WebRtcAudioTrack(long nativeAudioTrack) {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.nativeAudioTrack = nativeAudioTrack;
audioManager =
(AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
private boolean initPlayout(int sampleRate, int channels) {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
emptyBytes = new byte[byteBuffer.capacity()];
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
// Get the minimum buffer size required for the successful creation of an
// AudioTrack object to be created in the MODE_STREAM mode.
// Note that this size doesn't guarantee a smooth playback under load.
// TODO(henrika): should we extend the buffer size to avoid glitches?
final int channelConfig = channelCountToConfiguration(channels);
final int minBufferSizeInBytes =
AudioTrack.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
// For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal
// to the total buffer size |minBufferSizeInBytes|. But, we have seen
// reports of "getMinBufferSize(): error querying hardware". Hence, it
// can happen that |minBufferSizeInBytes| contains an invalid value.
if (minBufferSizeInBytes < byteBuffer.capacity()) {
reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
return false;
}
// Ensure that prevision audio session was stopped correctly before trying
// to create a new AudioTrack.
if (audioTrack != null) {
reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
return false;
}
try {
// Create an AudioTrack object and initialize its associated audio buffer.
// The size of this buffer determines how long an AudioTrack can play
// before running out of data.
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
// If we are on API level 21 or higher, it is possible to use a special AudioTrack
// constructor that uses AudioAttributes and AudioFormat as input. It allows us to
// supersede the notion of stream types for defining the behavior of audio playback,
// and to allow certain platforms or routing policies to use this information for more
// refined volume or routing decisions.
audioTrack =
createAudioTrackOnLollipopOrHigher(sampleRate, channelConfig, minBufferSizeInBytes);
} else {
// Use default constructor for API levels below 21.
audioTrack =
createAudioTrackOnLowerThanLollipop(sampleRate, channelConfig, minBufferSizeInBytes);
}
} catch (IllegalArgumentException e) {
reportWebRtcAudioTrackInitError(e.getMessage());
releaseAudioResources();
return false;
}
// It can happen that an AudioTrack is created but it was not successfully
// initialized upon creation. Seems to be the case e.g. when the maximum
// number of globally available audio tracks is exceeded.
if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
releaseAudioResources();
return false;
}
logMainParameters();
logMainParametersExtended();
return true;
}
private boolean startPlayout() {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "startPlayout");
assertTrue(audioTrack != null);
assertTrue(audioThread == null);
// Starts playing an audio track.
try {
audioTrack.play();
} catch (IllegalStateException e) {
reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
"AudioTrack.play failed: " + e.getMessage());
releaseAudioResources();
return false;
}
if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
"AudioTrack.play failed - incorrect state :" + audioTrack.getPlayState());
releaseAudioResources();
return false;
}
// Create and start new high-priority thread which calls AudioTrack.write()
// and where we also call the native nativeGetPlayoutData() callback to
// request decoded audio from WebRTC.
audioThread = new AudioTrackThread("AudioTrackJavaThread");
audioThread.start();
return true;
}
private boolean stopPlayout() {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "stopPlayout");
assertTrue(audioThread != null);
logUnderrunCount();
audioThread.stopThread();
Logging.d(TAG, "Stopping the AudioTrackThread...");
audioThread.interrupt();
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
Logging.e(TAG, "Join of AudioTrackThread timed out.");
WebRtcAudioUtils.logAudioState(TAG);
}
Logging.d(TAG, "AudioTrackThread has now been stopped.");
audioThread = null;
releaseAudioResources();
return true;
}
// Get max possible volume index for a phone call audio stream.
private int getStreamMaxVolume() {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
// Set current volume level for a phone call audio stream.
private boolean setStreamVolume(int volume) {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
if (isVolumeFixed()) {
Logging.e(TAG, "The device implements a fixed volume policy.");
return false;
}
audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
return true;
}
// TODO(bugs.webrtc.org/8580): Call requires API level 21 (current min is 16):
// `android.media.AudioManager#isVolumeFixed`: NewApi [warning]
@SuppressLint("NewApi")
private boolean isVolumeFixed() {
if (!WebRtcAudioUtils.runningOnLollipopOrHigher())
return false;
return audioManager.isVolumeFixed();
}
/** Get current volume level for a phone call audio stream. */
private int getStreamVolume() {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "getStreamVolume");
assertTrue(audioManager != null);
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}
private void logMainParameters() {
Logging.d(TAG,
"AudioTrack: "
+ "session ID: " + audioTrack.getAudioSessionId() + ", "
+ "channels: " + audioTrack.getChannelCount() + ", "
+ "sample rate: " + audioTrack.getSampleRate()
+ ", "
// Gain (>=1.0) expressed as linear multiplier on sample values.
+ "max gain: " + AudioTrack.getMaxVolume());
}
// Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
// It allows certain platforms or routing policies to use this information for more
// refined volume or routing decisions.
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
// performance when Android O is supported. Add some logging in the mean time.
final int nativeOutputSampleRate =
AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
if (sampleRateInHz != nativeOutputSampleRate) {
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
}
if (usageAttribute != DEFAULT_USAGE) {
Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
}
// Create an audio track where the audio usage is for VoIP and the content type is speech.
return new AudioTrack(new AudioAttributes.Builder()
.setUsage(usageAttribute)
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
.build(),
new AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRateInHz)
.setChannelMask(channelConfig)
.build(),
bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
}
@SuppressWarnings("deprecation") // Deprecated in API level 25.
private static AudioTrack createAudioTrackOnLowerThanLollipop(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
return new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRateInHz, channelConfig,
AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes, AudioTrack.MODE_STREAM);
}
@TargetApi(24)
private void logMainParametersExtended() {
if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
Logging.d(TAG,
"AudioTrack: "
// The effective size of the AudioTrack buffer that the app writes to.
+ "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
}
if (WebRtcAudioUtils.runningOnNougatOrHigher()) {
Logging.d(TAG,
"AudioTrack: "
// Maximum size of the AudioTrack buffer in frames.
+ "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
}
}
// Prints the number of underrun occurrences in the application-level write
// buffer since the AudioTrack was created. An underrun occurs if the app does
// not write audio data quickly enough, causing the buffer to underflow and a
// potential audio glitch.
// TODO(henrika): keep track of this value in the field and possibly add new
// UMA stat if needed.
@TargetApi(24)
private void logUnderrunCount() {
if (WebRtcAudioUtils.runningOnNougatOrHigher()) {
Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
}
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
private int channelCountToConfiguration(int channels) {
return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
}
private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
// Sets all samples to be played out to zero if |mute| is true, i.e.,
// ensures that the speaker is muted.
public static void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")");
speakerMute = mute;
}
// Releases the native AudioTrack resources.
private void releaseAudioResources() {
Logging.d(TAG, "releaseAudioResources");
if (audioTrack != null) {
audioTrack.release();
audioTrack = null;
}
}
private void reportWebRtcAudioTrackInitError(String errorMessage) {
Logging.e(TAG, "Init playout error: " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioTrackInitError(errorMessage);
}
}
private void reportWebRtcAudioTrackStartError(
AudioTrackStartErrorCode errorCode, String errorMessage) {
Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
}
}
private void reportWebRtcAudioTrackError(String errorMessage) {
Logging.e(TAG, "Run-time playback error: " + errorMessage);
WebRtcAudioUtils.logAudioState(TAG);
if (errorCallback != null) {
errorCallback.onWebRtcAudioTrackError(errorMessage);
}
}
}

View File

@ -0,0 +1,422 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.audio;
import static android.media.AudioManager.MODE_IN_CALL;
import static android.media.AudioManager.MODE_IN_COMMUNICATION;
import static android.media.AudioManager.MODE_NORMAL;
import static android.media.AudioManager.MODE_RINGTONE;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import android.media.AudioRecordingConfiguration;
import android.media.MediaRecorder.AudioSource;
import android.os.Build;
import android.os.Process;
import java.lang.Thread;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.webrtc.ContextUtils;
import org.webrtc.Logging;
final class WebRtcAudioUtils {
private static final String TAG = "WebRtcAudioUtils";
// List of devices where we have seen issues (e.g. bad audio quality) using
// the low latency output mode in combination with OpenSL ES.
// The device name is given by Build.MODEL.
private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
// It is recommended to maintain a list of blacklisted models outside
// this package and instead call
// WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true)
// from the client for devices where OpenSL ES shall be disabled.
};
// List of devices where it has been verified that the built-in effect
// bad and where it makes sense to avoid using it and instead rely on the
// native WebRTC version instead. The device name is given by Build.MODEL.
private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
// It is recommended to maintain a list of blacklisted models outside
// this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
// from the client for devices where the built-in AEC shall be disabled.
};
private static final String[] BLACKLISTED_NS_MODELS = new String[] {
// It is recommended to maintain a list of blacklisted models outside
// this package and instead call setWebRtcBasedNoiseSuppressor(true)
// from the client for devices where the built-in NS shall be disabled.
};
// Use 16kHz as the default sample rate. A higher sample rate might prevent
// us from supporting communication mode on some older (e.g. ICS) devices.
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
// Set to true if setDefaultSampleRateHz() has been called.
private static boolean isDefaultSampleRateOverridden = false;
// By default, utilize hardware based audio effects for AEC and NS when
// available.
private static boolean useWebRtcBasedAcousticEchoCanceler = false;
private static boolean useWebRtcBasedNoiseSuppressor = false;
// Call these methods if any hardware based effect shall be replaced by a
// software based version provided by the WebRTC stack instead.
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
useWebRtcBasedAcousticEchoCanceler = enable;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
useWebRtcBasedNoiseSuppressor = enable;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
// TODO(henrika): deprecated; remove when no longer used by any client.
Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
if (useWebRtcBasedAcousticEchoCanceler) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
}
return useWebRtcBasedAcousticEchoCanceler;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
if (useWebRtcBasedNoiseSuppressor) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
}
return useWebRtcBasedNoiseSuppressor;
}
// TODO(henrika): deprecated; remove when no longer used by any client.
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
// Always return true here to avoid trying to use any built-in AGC.
return true;
}
// Returns true if the device supports an audio effect (AEC or NS).
// Four conditions must be fulfilled if functions are to return true:
// 1) the platform must support the built-in (HW) effect,
// 2) explicit use (override) of a WebRTC based version must not be set,
// 3) the device must not be blacklisted for use of the effect, and
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
public static boolean isAcousticEchoCancelerSupported() {
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
}
public static boolean isNoiseSuppressorSupported() {
return WebRtcAudioEffects.canUseNoiseSuppressor();
}
// TODO(henrika): deprecated; remove when no longer used by any client.
public static boolean isAutomaticGainControlSupported() {
// Always return false here to avoid trying to use any built-in AGC.
return false;
}
// Call this method if the default handling of querying the native sample
// rate shall be overridden. Can be useful on some devices where the
// available Android APIs are known to return invalid results.
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
isDefaultSampleRateOverridden = true;
defaultSampleRateHz = sampleRateHz;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized boolean isDefaultSampleRateOverridden() {
return isDefaultSampleRateOverridden;
}
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
@SuppressWarnings("NoSynchronizedMethodCheck")
public static synchronized int getDefaultSampleRateHz() {
return defaultSampleRateHz;
}
public static List<String> getBlackListedModelsForAecUsage() {
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
}
public static List<String> getBlackListedModelsForNsUsage() {
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
}
public static boolean runningOnJellyBeanMR1OrHigher() {
// November 2012: Android 4.2. API Level 17.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
public static boolean runningOnJellyBeanMR2OrHigher() {
// July 24, 2013: Android 4.3. API Level 18.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2;
}
public static boolean runningOnLollipopOrHigher() {
// API Level 21.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
}
public static boolean runningOnMarshmallowOrHigher() {
// API Level 23.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.M;
}
public static boolean runningOnNougatOrHigher() {
// API Level 24.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.N;
}
public static boolean runningOnOreoOrHigher() {
// API Level 26.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O;
}
public static boolean runningOnOreoMR1OrHigher() {
// API Level 27.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O_MR1;
}
// Helper method for building a string of thread information.
public static String getThreadInfo() {
return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
+ "]";
}
// Returns true if we're running on emulator.
public static boolean runningOnEmulator() {
return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
}
// Returns true if the device is blacklisted for OpenSL ES usage.
public static boolean deviceIsBlacklistedForOpenSLESUsage() {
List<String> blackListedModels = Arrays.asList(BLACKLISTED_OPEN_SL_ES_MODELS);
return blackListedModels.contains(Build.MODEL);
}
// Information about the current build, taken from system properties.
static void logDeviceInfo(String tag) {
Logging.d(tag,
"Android SDK: " + Build.VERSION.SDK_INT + ", "
+ "Release: " + Build.VERSION.RELEASE + ", "
+ "Brand: " + Build.BRAND + ", "
+ "Device: " + Build.DEVICE + ", "
+ "Id: " + Build.ID + ", "
+ "Hardware: " + Build.HARDWARE + ", "
+ "Manufacturer: " + Build.MANUFACTURER + ", "
+ "Model: " + Build.MODEL + ", "
+ "Product: " + Build.PRODUCT);
}
// Logs information about the current audio state. The idea is to call this
// method when errors are detected to log under what conditions the error
// occurred. Hopefully it will provide clues to what might be the root cause.
static void logAudioState(String tag) {
logDeviceInfo(tag);
final Context context = ContextUtils.getApplicationContext();
final AudioManager audioManager =
(AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
logAudioStateBasic(tag, audioManager);
logAudioStateVolume(tag, audioManager);
logAudioDeviceInfo(tag, audioManager);
}
// Reports basic audio statistics.
private static void logAudioStateBasic(String tag, AudioManager audioManager) {
Logging.d(tag,
"Audio State: "
+ "audio mode: " + modeToString(audioManager.getMode()) + ", "
+ "has mic: " + hasMicrophone() + ", "
+ "mic muted: " + audioManager.isMicrophoneMute() + ", "
+ "music active: " + audioManager.isMusicActive() + ", "
+ "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
+ "BT SCO: " + audioManager.isBluetoothScoOn());
}
// TODO(bugs.webrtc.org/8580): Call requires API level 21 (current min is 16):
// `android.media.AudioManager#isVolumeFixed`: NewApi [warning]
@SuppressLint("NewApi")
// Adds volume information for all possible stream types.
private static void logAudioStateVolume(String tag, AudioManager audioManager) {
final int[] streams = {AudioManager.STREAM_VOICE_CALL, AudioManager.STREAM_MUSIC,
AudioManager.STREAM_RING, AudioManager.STREAM_ALARM, AudioManager.STREAM_NOTIFICATION,
AudioManager.STREAM_SYSTEM};
Logging.d(tag, "Audio State: ");
boolean fixedVolume = false;
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
fixedVolume = audioManager.isVolumeFixed();
// Some devices may not have volume controls and might use a fixed volume.
Logging.d(tag, " fixed volume=" + fixedVolume);
}
if (!fixedVolume) {
for (int stream : streams) {
StringBuilder info = new StringBuilder();
info.append(" " + streamTypeToString(stream) + ": ");
info.append("volume=").append(audioManager.getStreamVolume(stream));
info.append(", max=").append(audioManager.getStreamMaxVolume(stream));
logIsStreamMute(tag, audioManager, stream, info);
Logging.d(tag, info.toString());
}
}
}
@TargetApi(23)
private static void logIsStreamMute(
String tag, AudioManager audioManager, int stream, StringBuilder info) {
if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
info.append(", muted=").append(audioManager.isStreamMute(stream));
}
}
@TargetApi(23)
private static void logAudioDeviceInfo(String tag, AudioManager audioManager) {
if (!WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
return;
}
final AudioDeviceInfo[] devices = audioManager.getDevices(AudioManager.GET_DEVICES_ALL);
if (devices.length == 0) {
return;
}
Logging.d(tag, "Audio Devices: ");
for (AudioDeviceInfo device : devices) {
StringBuilder info = new StringBuilder();
info.append(" ").append(deviceTypeToString(device.getType()));
info.append(device.isSource() ? "(in): " : "(out): ");
// An empty array indicates that the device supports arbitrary channel counts.
if (device.getChannelCounts().length > 0) {
info.append("channels=").append(Arrays.toString(device.getChannelCounts()));
info.append(", ");
}
if (device.getEncodings().length > 0) {
// Examples: ENCODING_PCM_16BIT = 2, ENCODING_PCM_FLOAT = 4.
info.append("encodings=").append(Arrays.toString(device.getEncodings()));
info.append(", ");
}
if (device.getSampleRates().length > 0) {
info.append("sample rates=").append(Arrays.toString(device.getSampleRates()));
info.append(", ");
}
info.append("id=").append(device.getId());
Logging.d(tag, info.toString());
}
}
// Converts media.AudioManager modes into local string representation.
static String modeToString(int mode) {
switch (mode) {
case MODE_IN_CALL:
return "MODE_IN_CALL";
case MODE_IN_COMMUNICATION:
return "MODE_IN_COMMUNICATION";
case MODE_NORMAL:
return "MODE_NORMAL";
case MODE_RINGTONE:
return "MODE_RINGTONE";
default:
return "MODE_INVALID";
}
}
private static String streamTypeToString(int stream) {
switch (stream) {
case AudioManager.STREAM_VOICE_CALL:
return "STREAM_VOICE_CALL";
case AudioManager.STREAM_MUSIC:
return "STREAM_MUSIC";
case AudioManager.STREAM_RING:
return "STREAM_RING";
case AudioManager.STREAM_ALARM:
return "STREAM_ALARM";
case AudioManager.STREAM_NOTIFICATION:
return "STREAM_NOTIFICATION";
case AudioManager.STREAM_SYSTEM:
return "STREAM_SYSTEM";
default:
return "STREAM_INVALID";
}
}
// Converts AudioDeviceInfo types to local string representation.
private static String deviceTypeToString(int type) {
switch (type) {
case AudioDeviceInfo.TYPE_UNKNOWN:
return "TYPE_UNKNOWN";
case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
return "TYPE_BUILTIN_EARPIECE";
case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
return "TYPE_BUILTIN_SPEAKER";
case AudioDeviceInfo.TYPE_WIRED_HEADSET:
return "TYPE_WIRED_HEADSET";
case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
return "TYPE_WIRED_HEADPHONES";
case AudioDeviceInfo.TYPE_LINE_ANALOG:
return "TYPE_LINE_ANALOG";
case AudioDeviceInfo.TYPE_LINE_DIGITAL:
return "TYPE_LINE_DIGITAL";
case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
return "TYPE_BLUETOOTH_SCO";
case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
return "TYPE_BLUETOOTH_A2DP";
case AudioDeviceInfo.TYPE_HDMI:
return "TYPE_HDMI";
case AudioDeviceInfo.TYPE_HDMI_ARC:
return "TYPE_HDMI_ARC";
case AudioDeviceInfo.TYPE_USB_DEVICE:
return "TYPE_USB_DEVICE";
case AudioDeviceInfo.TYPE_USB_ACCESSORY:
return "TYPE_USB_ACCESSORY";
case AudioDeviceInfo.TYPE_DOCK:
return "TYPE_DOCK";
case AudioDeviceInfo.TYPE_FM:
return "TYPE_FM";
case AudioDeviceInfo.TYPE_BUILTIN_MIC:
return "TYPE_BUILTIN_MIC";
case AudioDeviceInfo.TYPE_FM_TUNER:
return "TYPE_FM_TUNER";
case AudioDeviceInfo.TYPE_TV_TUNER:
return "TYPE_TV_TUNER";
case AudioDeviceInfo.TYPE_TELEPHONY:
return "TYPE_TELEPHONY";
case AudioDeviceInfo.TYPE_AUX_LINE:
return "TYPE_AUX_LINE";
case AudioDeviceInfo.TYPE_IP:
return "TYPE_IP";
case AudioDeviceInfo.TYPE_BUS:
return "TYPE_BUS";
case AudioDeviceInfo.TYPE_USB_HEADSET:
return "TYPE_USB_HEADSET";
default:
return "TYPE_UNKNOWN";
}
}
// Returns true if the device can record audio via a microphone.
private static boolean hasMicrophone() {
return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
PackageManager.FEATURE_MICROPHONE);
}
}