Android: Add AudioDeviceModule interface and clean up implementation code
This CL introduces sdk/android/api/org/webrtc/audio/AudioDeviceModule.java, which is the new interface for audio device modules on Android. This CL also refactors the main AudioDeviceModule implementation, which is sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java and makes it conform to the new interface. The old code used global static methods to configure the audio device code. This CL gets rid of all that and uses a builder pattern in JavaAudioDeviceModule instead. The only two dynamic methods left in the interface are setSpeakerMute() and setMicrophoneMute(). Removing the global static methods allowed a significant cleanup, and e.g. the file sdk/android/src/jni/audio_device/audio_manager.cc has been completely removed. The PeerConnectionFactory interface is also updated to allow passing in an external AudioDeviceModule. The current built-in ADM is encapsulated under LegacyAudioDeviceModule.java, which is the default for now to ensure backwards compatibility. Bug: webrtc:7452 Change-Id: I64d5f4dba9a004da001f1acb2bd0c1b1f2b64f21 Reviewed-on: https://webrtc-review.googlesource.com/65360 Commit-Queue: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Paulina Hensman <phensman@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22765}
This commit is contained in:
committed by
Commit Bot
parent
3ab5c40f72
commit
66f1e9eb34
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
package org.webrtc.audio;
|
||||
|
||||
import android.os.Build;
|
||||
import org.webrtc.CalledByNative;
|
||||
|
||||
public final class BuildInfo {
|
||||
public static String getDevice() {
|
||||
return Build.DEVICE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getDeviceModel() {
|
||||
return Build.MODEL;
|
||||
}
|
||||
|
||||
public static String getProduct() {
|
||||
return Build.PRODUCT;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBrand() {
|
||||
return Build.BRAND;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getDeviceManufacturer() {
|
||||
return Build.MANUFACTURER;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getAndroidBuildId() {
|
||||
return Build.ID;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBuildType() {
|
||||
return Build.TYPE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBuildRelease() {
|
||||
return Build.VERSION.RELEASE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static int getSdkVersion() {
|
||||
return Build.VERSION.SDK_INT;
|
||||
}
|
||||
}
|
||||
@ -29,7 +29,7 @@ import org.webrtc.Logging;
|
||||
class WebRtcAudioEffects {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioEffects";
|
||||
private static final String TAG = "WebRtcAudioEffectsExternal";
|
||||
|
||||
// UUIDs for Software Audio Effects that we want to avoid using.
|
||||
// The implementor field will be set to "The Android Open Source Project".
|
||||
@ -56,106 +56,20 @@ class WebRtcAudioEffects {
|
||||
private boolean shouldEnableAec = false;
|
||||
private boolean shouldEnableNs = false;
|
||||
|
||||
// Checks if the device implements Acoustic Echo Cancellation (AEC).
|
||||
// Returns true if the device implements AEC, false otherwise.
|
||||
// Returns true if all conditions for supporting HW Acoustic Echo Cancellation (AEC) are
|
||||
// fulfilled.
|
||||
@TargetApi(18)
|
||||
public static boolean isAcousticEchoCancelerSupported() {
|
||||
// Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
|
||||
// AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
|
||||
// OS API call.
|
||||
return isAcousticEchoCancelerEffectAvailable();
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC, AOSP_ACOUSTIC_ECHO_CANCELER);
|
||||
}
|
||||
|
||||
// Checks if the device implements Noise Suppression (NS).
|
||||
// Returns true if the device implements NS, false otherwise.
|
||||
// Returns true if all conditions for supporting HW Noise Suppression (NS) are fulfilled.
|
||||
@TargetApi(18)
|
||||
public static boolean isNoiseSuppressorSupported() {
|
||||
// Note: we're using isNoiseSuppressorEffectAvailable() instead of
|
||||
// NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
|
||||
// OS API call.
|
||||
return isNoiseSuppressorEffectAvailable();
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS, AOSP_NOISE_SUPPRESSOR);
|
||||
}
|
||||
|
||||
// Returns true if the device is blacklisted for HW AEC usage.
|
||||
public static boolean isAcousticEchoCancelerBlacklisted() {
|
||||
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
|
||||
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
|
||||
if (isBlacklisted) {
|
||||
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
|
||||
}
|
||||
return isBlacklisted;
|
||||
}
|
||||
|
||||
// Returns true if the device is blacklisted for HW NS usage.
|
||||
public static boolean isNoiseSuppressorBlacklisted() {
|
||||
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
|
||||
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
|
||||
if (isBlacklisted) {
|
||||
Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
|
||||
}
|
||||
return isBlacklisted;
|
||||
}
|
||||
|
||||
// Returns true if the platform AEC should be excluded based on its UUID.
|
||||
// AudioEffect.queryEffects() can throw IllegalStateException.
|
||||
@TargetApi(18)
|
||||
private static boolean isAcousticEchoCancelerExcludedByUUID() {
|
||||
for (Descriptor d : getAvailableEffects()) {
|
||||
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
|
||||
&& d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the platform NS should be excluded based on its UUID.
|
||||
// AudioEffect.queryEffects() can throw IllegalStateException.
|
||||
@TargetApi(18)
|
||||
private static boolean isNoiseSuppressorExcludedByUUID() {
|
||||
for (Descriptor d : getAvailableEffects()) {
|
||||
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the device supports Acoustic Echo Cancellation (AEC).
|
||||
@TargetApi(18)
|
||||
private static boolean isAcousticEchoCancelerEffectAvailable() {
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
|
||||
}
|
||||
|
||||
// Returns true if the device supports Noise Suppression (NS).
|
||||
@TargetApi(18)
|
||||
private static boolean isNoiseSuppressorEffectAvailable() {
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
|
||||
}
|
||||
|
||||
// Returns true if all conditions for supporting the HW AEC are fulfilled.
|
||||
// It will not be possible to enable the HW AEC if this method returns false.
|
||||
public static boolean canUseAcousticEchoCanceler() {
|
||||
boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
|
||||
&& !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
|
||||
&& !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
|
||||
Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
|
||||
return canUseAcousticEchoCanceler;
|
||||
}
|
||||
|
||||
// Returns true if all conditions for supporting the HW NS are fulfilled.
|
||||
// It will not be possible to enable the HW NS if this method returns false.
|
||||
public static boolean canUseNoiseSuppressor() {
|
||||
boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
|
||||
&& !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
|
||||
&& !isNoiseSuppressorExcludedByUUID();
|
||||
Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
|
||||
return canUseNoiseSuppressor;
|
||||
}
|
||||
|
||||
public static WebRtcAudioEffects create() {
|
||||
return new WebRtcAudioEffects();
|
||||
}
|
||||
|
||||
private WebRtcAudioEffects() {
|
||||
public WebRtcAudioEffects() {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
}
|
||||
|
||||
@ -165,7 +79,7 @@ class WebRtcAudioEffects {
|
||||
// false otherwise.
|
||||
public boolean setAEC(boolean enable) {
|
||||
Logging.d(TAG, "setAEC(" + enable + ")");
|
||||
if (!canUseAcousticEchoCanceler()) {
|
||||
if (!isAcousticEchoCancelerSupported()) {
|
||||
Logging.w(TAG, "Platform AEC is not supported");
|
||||
shouldEnableAec = false;
|
||||
return false;
|
||||
@ -184,7 +98,7 @@ class WebRtcAudioEffects {
|
||||
// false otherwise.
|
||||
public boolean setNS(boolean enable) {
|
||||
Logging.d(TAG, "setNS(" + enable + ")");
|
||||
if (!canUseNoiseSuppressor()) {
|
||||
if (!isNoiseSuppressorSupported()) {
|
||||
Logging.w(TAG, "Platform NS is not supported");
|
||||
shouldEnableNs = false;
|
||||
return false;
|
||||
@ -223,7 +137,7 @@ class WebRtcAudioEffects {
|
||||
aec = AcousticEchoCanceler.create(audioSession);
|
||||
if (aec != null) {
|
||||
boolean enabled = aec.getEnabled();
|
||||
boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
|
||||
boolean enable = shouldEnableAec && isAcousticEchoCancelerSupported();
|
||||
if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
|
||||
Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
|
||||
}
|
||||
@ -241,7 +155,7 @@ class WebRtcAudioEffects {
|
||||
ns = NoiseSuppressor.create(audioSession);
|
||||
if (ns != null) {
|
||||
boolean enabled = ns.getEnabled();
|
||||
boolean enable = shouldEnableNs && canUseNoiseSuppressor();
|
||||
boolean enable = shouldEnableNs && isNoiseSuppressorSupported();
|
||||
if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
|
||||
Logging.e(TAG, "Failed to set the NoiseSuppressor state");
|
||||
}
|
||||
@ -309,14 +223,15 @@ class WebRtcAudioEffects {
|
||||
// Returns true if an effect of the specified type is available. Functionally
|
||||
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
|
||||
// faster as it avoids the expensive OS call to enumerate effects.
|
||||
private static boolean isEffectTypeAvailable(UUID effectType) {
|
||||
@TargetApi(18)
|
||||
private static boolean isEffectTypeAvailable(UUID effectType, UUID blackListedUuid) {
|
||||
Descriptor[] effects = getAvailableEffects();
|
||||
if (effects == null) {
|
||||
return false;
|
||||
}
|
||||
for (Descriptor d : effects) {
|
||||
if (d.type.equals(effectType)) {
|
||||
return true;
|
||||
return !d.uuid.equals(blackListedUuid);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
@ -17,58 +17,16 @@ import android.media.AudioFormat;
|
||||
import android.media.AudioManager;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioTrack;
|
||||
import android.os.Build;
|
||||
import javax.annotation.Nullable;
|
||||
import org.webrtc.Logging;
|
||||
import org.webrtc.CalledByNative;
|
||||
|
||||
// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
|
||||
// At construction, storeAudioParameters() is called and it retrieves
|
||||
// fundamental audio parameters like native sample rate and number of channels.
|
||||
// The result is then provided to the caller by nativeCacheAudioParameters().
|
||||
// It is also possible to call init() to set up the audio environment for best
|
||||
// possible "VoIP performance". All settings done in init() are reverted by
|
||||
// dispose(). This class can also be used without calling init() if the user
|
||||
// prefers to set up the audio environment separately. However, it is
|
||||
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
|
||||
/**
|
||||
* This class contains static functions to query sample rate and input/output audio buffer sizes.
|
||||
*/
|
||||
class WebRtcAudioManager {
|
||||
private static final boolean DEBUG = false;
|
||||
private static final String TAG = "WebRtcAudioManagerExternal";
|
||||
|
||||
private static final String TAG = "WebRtcAudioManager";
|
||||
|
||||
// Use mono as default for both audio directions.
|
||||
private static boolean useStereoOutput = false;
|
||||
private static boolean useStereoInput = false;
|
||||
|
||||
// Call these methods to override the default mono audio modes for the specified direction(s)
|
||||
// (input and/or output).
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setStereoOutput(boolean enable) {
|
||||
Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
|
||||
useStereoOutput = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setStereoInput(boolean enable) {
|
||||
Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
|
||||
useStereoInput = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
@CalledByNative
|
||||
public synchronized boolean getStereoOutput() {
|
||||
return useStereoOutput;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
@CalledByNative
|
||||
public synchronized boolean getStereoInput() {
|
||||
return useStereoInput;
|
||||
}
|
||||
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -76,67 +34,32 @@ class WebRtcAudioManager {
|
||||
|
||||
private static final int DEFAULT_FRAME_PER_BUFFER = 256;
|
||||
|
||||
private final AudioManager audioManager;
|
||||
private final int sampleRate;
|
||||
private final int outputBufferSize;
|
||||
private final int inputBufferSize;
|
||||
private final VolumeLogger volumeLogger;
|
||||
|
||||
private boolean initialized = false;
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioManager(Context context) {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
}
|
||||
this.volumeLogger = new VolumeLogger(audioManager);
|
||||
|
||||
final int outputChannels = getStereoOutput() ? 2 : 1;
|
||||
final int inputChannels = getStereoInput() ? 2 : 1;
|
||||
|
||||
this.sampleRate = getNativeOutputSampleRate();
|
||||
this.outputBufferSize = isLowLatencyOutputSupported(context)
|
||||
? getLowLatencyOutputFramesPerBuffer()
|
||||
: getMinOutputFrameSize(sampleRate, outputChannels);
|
||||
this.inputBufferSize = isLowLatencyInputSupported(context)
|
||||
? getLowLatencyInputFramesPerBuffer()
|
||||
: getMinInputFrameSize(sampleRate, inputChannels);
|
||||
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
static AudioManager getAudioManager(Context context) {
|
||||
return (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean init() {
|
||||
Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
|
||||
if (initialized) {
|
||||
return true;
|
||||
}
|
||||
Logging.d(TAG, "audio mode is: " + WebRtcAudioUtils.modeToString(audioManager.getMode()));
|
||||
initialized = true;
|
||||
volumeLogger.start();
|
||||
return true;
|
||||
static int getOutputBufferSize(
|
||||
Context context, AudioManager audioManager, int sampleRate, int numberOfOutputChannels) {
|
||||
return isLowLatencyOutputSupported(context)
|
||||
? getLowLatencyFramesPerBuffer(audioManager)
|
||||
: getMinOutputFrameSize(sampleRate, numberOfOutputChannels);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private void dispose() {
|
||||
Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
volumeLogger.stop();
|
||||
static int getInputBufferSize(
|
||||
Context context, AudioManager audioManager, int sampleRate, int numberOfInputChannels) {
|
||||
return isLowLatencyInputSupported(context)
|
||||
? getLowLatencyFramesPerBuffer(audioManager)
|
||||
: getMinInputFrameSize(sampleRate, numberOfInputChannels);
|
||||
}
|
||||
|
||||
// Returns true if low-latency audio output is supported.
|
||||
public static boolean isLowLatencyOutputSupported(Context context) {
|
||||
private static boolean isLowLatencyOutputSupported(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
|
||||
}
|
||||
|
||||
// Returns true if low-latency audio input is supported.
|
||||
// TODO(henrika): remove the hardcoded false return value when OpenSL ES
|
||||
// input performance has been evaluated and tested more.
|
||||
public static boolean isLowLatencyInputSupported(Context context) {
|
||||
private static boolean isLowLatencyInputSupported(Context context) {
|
||||
// TODO(henrika): investigate if some sort of device list is needed here
|
||||
// as well. The NDK doc states that: "As of API level 21, lower latency
|
||||
// audio input is supported on select devices. To take advantage of this
|
||||
@ -144,49 +67,34 @@ class WebRtcAudioManager {
|
||||
return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported(context);
|
||||
}
|
||||
|
||||
// Returns the native output sample rate for this device's output stream.
|
||||
private int getNativeOutputSampleRate() {
|
||||
/**
|
||||
* Returns the native input/output sample rate for this device's output stream.
|
||||
*/
|
||||
@CalledByNative
|
||||
static int getSampleRate(AudioManager audioManager) {
|
||||
// Override this if we're running on an old emulator image which only
|
||||
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
|
||||
if (WebRtcAudioUtils.runningOnEmulator()) {
|
||||
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
|
||||
return 8000;
|
||||
}
|
||||
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
|
||||
// If so, use that value and return here.
|
||||
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
|
||||
Logging.d(TAG,
|
||||
"Default sample rate is overriden to " + WebRtcAudioUtils.getDefaultSampleRateHz()
|
||||
+ " Hz");
|
||||
return WebRtcAudioUtils.getDefaultSampleRateHz();
|
||||
}
|
||||
// No overrides available. Deliver best possible estimate based on default
|
||||
// Android AudioManager APIs.
|
||||
final int sampleRateHz;
|
||||
if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
|
||||
sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
|
||||
} else {
|
||||
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
|
||||
}
|
||||
// Deliver best possible estimate based on default Android AudioManager APIs.
|
||||
final int sampleRateHz = WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()
|
||||
? getSampleRateOnJellyBeanMR10OrHigher(audioManager)
|
||||
: DEFAULT_SAMPLE_RATE_HZ;
|
||||
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
|
||||
return sampleRateHz;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getSampleRate() {
|
||||
return sampleRate;
|
||||
}
|
||||
|
||||
@TargetApi(17)
|
||||
private int getSampleRateOnJellyBeanMR10OrHigher() {
|
||||
private static int getSampleRateOnJellyBeanMR10OrHigher(AudioManager audioManager) {
|
||||
String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||
return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
|
||||
: Integer.parseInt(sampleRateString);
|
||||
return (sampleRateString == null) ? DEFAULT_SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
||||
}
|
||||
|
||||
// Returns the native output buffer size for low-latency output streams.
|
||||
@TargetApi(17)
|
||||
private int getLowLatencyOutputFramesPerBuffer() {
|
||||
private static int getLowLatencyFramesPerBuffer(AudioManager audioManager) {
|
||||
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
|
||||
return DEFAULT_FRAME_PER_BUFFER;
|
||||
}
|
||||
@ -195,32 +103,6 @@ class WebRtcAudioManager {
|
||||
return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
|
||||
}
|
||||
|
||||
// Returns true if the device supports an audio effect (AEC or NS).
|
||||
// Four conditions must be fulfilled if functions are to return true:
|
||||
// 1) the platform must support the built-in (HW) effect,
|
||||
// 2) explicit use (override) of a WebRTC based version must not be set,
|
||||
// 3) the device must not be blacklisted for use of the effect, and
|
||||
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
|
||||
@CalledByNative
|
||||
boolean isAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.canUseNoiseSuppressor();
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getOutputBufferSize() {
|
||||
return outputBufferSize;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getInputBufferSize() {
|
||||
return inputBufferSize;
|
||||
}
|
||||
|
||||
// Returns the minimum output buffer size for Java based audio (AudioTrack).
|
||||
// This size can also be used for OpenSL ES implementations on devices that
|
||||
// lacks support of low-latency output.
|
||||
@ -233,11 +115,6 @@ class WebRtcAudioManager {
|
||||
/ bytesPerFrame;
|
||||
}
|
||||
|
||||
// Returns the native input buffer size for input streams.
|
||||
private int getLowLatencyInputFramesPerBuffer() {
|
||||
return getLowLatencyOutputFramesPerBuffer();
|
||||
}
|
||||
|
||||
// Returns the minimum input buffer size for Java based audio (AudioRecord).
|
||||
// This size can calso be used for OpenSL ES implementations on devices that
|
||||
// lacks support of low-latency input.
|
||||
|
||||
@ -13,6 +13,8 @@ package org.webrtc.audio;
|
||||
import android.annotation.TargetApi;
|
||||
import android.media.AudioFormat;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioManager;
|
||||
import android.content.Context;
|
||||
import android.media.MediaRecorder.AudioSource;
|
||||
import android.os.Process;
|
||||
import java.lang.System;
|
||||
@ -29,9 +31,7 @@ import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStartErrorCode;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.SamplesReadyCallback;
|
||||
|
||||
class WebRtcAudioRecord {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioRecord";
|
||||
private static final String TAG = "WebRtcAudioRecordExternal";
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -52,33 +52,28 @@ class WebRtcAudioRecord {
|
||||
// but the wait times out afther this amount of time.
|
||||
private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
|
||||
|
||||
private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
|
||||
private static int audioSource = DEFAULT_AUDIO_SOURCE;
|
||||
public static final int DEFAULT_AUDIO_SOURCE = AudioSource.VOICE_COMMUNICATION;
|
||||
|
||||
private final long nativeAudioRecord;
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private final int audioSource;
|
||||
|
||||
private @Nullable WebRtcAudioEffects effects = null;
|
||||
private long nativeAudioRecord;
|
||||
|
||||
private final WebRtcAudioEffects effects = new WebRtcAudioEffects();
|
||||
|
||||
private @Nullable ByteBuffer byteBuffer;
|
||||
|
||||
private @Nullable AudioRecord audioRecord = null;
|
||||
private @Nullable AudioRecordThread audioThread = null;
|
||||
|
||||
private static volatile boolean microphoneMute = false;
|
||||
private volatile boolean microphoneMute = false;
|
||||
private byte[] emptyBytes;
|
||||
|
||||
private static @Nullable AudioRecordErrorCallback errorCallback = null;
|
||||
|
||||
public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
|
||||
Logging.d(TAG, "Set error callback");
|
||||
WebRtcAudioRecord.errorCallback = errorCallback;
|
||||
}
|
||||
|
||||
private static @Nullable SamplesReadyCallback audioSamplesReadyCallback = null;
|
||||
|
||||
public static void setOnAudioSamplesReady(SamplesReadyCallback callback) {
|
||||
audioSamplesReadyCallback = callback;
|
||||
}
|
||||
private final @Nullable AudioRecordErrorCallback errorCallback;
|
||||
private final @Nullable SamplesReadyCallback audioSamplesReadyCallback;
|
||||
private final boolean isAcousticEchoCancelerSupported;
|
||||
private final boolean isNoiseSuppressorSupported;
|
||||
|
||||
/**
|
||||
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
||||
@ -129,12 +124,6 @@ class WebRtcAudioRecord {
|
||||
reportWebRtcAudioRecordError(errorMessage);
|
||||
}
|
||||
}
|
||||
if (DEBUG) {
|
||||
long nowTime = System.nanoTime();
|
||||
long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
||||
lastTime = nowTime;
|
||||
Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
@ -155,32 +144,55 @@ class WebRtcAudioRecord {
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioRecord(long nativeAudioRecord) {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.nativeAudioRecord = nativeAudioRecord;
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
WebRtcAudioRecord(Context context, AudioManager audioManager) {
|
||||
this(context, audioManager, DEFAULT_AUDIO_SOURCE, null /* errorCallback */,
|
||||
null /* audioSamplesReadyCallback */, WebRtcAudioEffects.isAcousticEchoCancelerSupported(),
|
||||
WebRtcAudioEffects.isNoiseSuppressorSupported());
|
||||
}
|
||||
|
||||
public WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSource,
|
||||
@Nullable AudioRecordErrorCallback errorCallback,
|
||||
@Nullable SamplesReadyCallback audioSamplesReadyCallback,
|
||||
boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) {
|
||||
if (isAcousticEchoCancelerSupported && !WebRtcAudioEffects.isAcousticEchoCancelerSupported()) {
|
||||
throw new IllegalArgumentException("HW AEC not supported");
|
||||
}
|
||||
effects = WebRtcAudioEffects.create();
|
||||
if (isNoiseSuppressorSupported && !WebRtcAudioEffects.isNoiseSuppressorSupported()) {
|
||||
throw new IllegalArgumentException("HW NS not supported");
|
||||
}
|
||||
this.context = context;
|
||||
this.audioManager = audioManager;
|
||||
this.audioSource = audioSource;
|
||||
this.errorCallback = errorCallback;
|
||||
this.audioSamplesReadyCallback = audioSamplesReadyCallback;
|
||||
this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
|
||||
this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public void setNativeAudioRecord(long nativeAudioRecord) {
|
||||
this.nativeAudioRecord = nativeAudioRecord;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isAcousticEchoCancelerSupported() {
|
||||
return isAcousticEchoCancelerSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isNoiseSuppressorSupported() {
|
||||
return isNoiseSuppressorSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean enableBuiltInAEC(boolean enable) {
|
||||
Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
|
||||
if (effects == null) {
|
||||
Logging.e(TAG, "Built-in AEC is not supported on this platform");
|
||||
return false;
|
||||
}
|
||||
return effects.setAEC(enable);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean enableBuiltInNS(boolean enable) {
|
||||
Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
|
||||
if (effects == null) {
|
||||
Logging.e(TAG, "Built-in NS is not supported on this platform");
|
||||
return false;
|
||||
}
|
||||
return effects.setNS(enable);
|
||||
}
|
||||
|
||||
@ -231,9 +243,7 @@ class WebRtcAudioRecord {
|
||||
releaseAudioResources();
|
||||
return -1;
|
||||
}
|
||||
if (effects != null) {
|
||||
effects.enable(audioRecord.getAudioSessionId());
|
||||
}
|
||||
effects.enable(audioRecord.getAudioSessionId());
|
||||
logMainParameters();
|
||||
logMainParametersExtended();
|
||||
return framesPerBuffer;
|
||||
@ -269,12 +279,10 @@ class WebRtcAudioRecord {
|
||||
audioThread.stopThread();
|
||||
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
|
||||
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
}
|
||||
audioThread = null;
|
||||
if (effects != null) {
|
||||
effects.release();
|
||||
}
|
||||
effects.release();
|
||||
releaseAudioResources();
|
||||
return true;
|
||||
}
|
||||
@ -314,19 +322,9 @@ class WebRtcAudioRecord {
|
||||
@NativeClassQualifiedName("webrtc::android_adm::AudioRecordJni")
|
||||
private native void nativeDataIsRecorded(long nativeAudioRecord, int bytes);
|
||||
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setAudioSource(int source) {
|
||||
Logging.w(TAG, "Audio source is changed from: " + audioSource + " to " + source);
|
||||
audioSource = source;
|
||||
}
|
||||
|
||||
private static int getDefaultAudioSource() {
|
||||
return AudioSource.VOICE_COMMUNICATION;
|
||||
}
|
||||
|
||||
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that
|
||||
// the microphone is muted.
|
||||
public static void setMicrophoneMute(boolean mute) {
|
||||
public void setMicrophoneMute(boolean mute) {
|
||||
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
|
||||
microphoneMute = mute;
|
||||
}
|
||||
@ -342,7 +340,7 @@ class WebRtcAudioRecord {
|
||||
|
||||
private void reportWebRtcAudioRecordInitError(String errorMessage) {
|
||||
Logging.e(TAG, "Init recording error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordInitError(errorMessage);
|
||||
}
|
||||
@ -351,7 +349,7 @@ class WebRtcAudioRecord {
|
||||
private void reportWebRtcAudioRecordStartError(
|
||||
AudioRecordStartErrorCode errorCode, String errorMessage) {
|
||||
Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
|
||||
}
|
||||
@ -359,7 +357,7 @@ class WebRtcAudioRecord {
|
||||
|
||||
private void reportWebRtcAudioRecordError(String errorMessage) {
|
||||
Logging.e(TAG, "Run-time recording error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordError(errorMessage);
|
||||
}
|
||||
|
||||
@ -21,7 +21,6 @@ import android.os.Process;
|
||||
import java.lang.Thread;
|
||||
import java.nio.ByteBuffer;
|
||||
import javax.annotation.Nullable;
|
||||
import org.webrtc.ContextUtils;
|
||||
import org.webrtc.Logging;
|
||||
import org.webrtc.ThreadUtils;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
|
||||
@ -30,9 +29,7 @@ import org.webrtc.CalledByNative;
|
||||
import org.webrtc.NativeClassQualifiedName;
|
||||
|
||||
class WebRtcAudioTrack {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioTrack";
|
||||
private static final String TAG = "WebRtcAudioTrackExternal";
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -51,17 +48,6 @@ class WebRtcAudioTrack {
|
||||
// By default, WebRTC creates audio tracks with a usage attribute
|
||||
// corresponding to voice communications, such as telephony or VoIP.
|
||||
private static final int DEFAULT_USAGE = getDefaultUsageAttribute();
|
||||
private static int usageAttribute = DEFAULT_USAGE;
|
||||
|
||||
// This method overrides the default usage attribute and allows the user
|
||||
// to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
|
||||
// NOTE: calling this method will most likely break existing VoIP tuning.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setAudioTrackUsageAttribute(int usage) {
|
||||
Logging.w(TAG, "Default usage attribute is changed from: " + DEFAULT_USAGE + " to " + usage);
|
||||
usageAttribute = usage;
|
||||
}
|
||||
|
||||
private static int getDefaultUsageAttribute() {
|
||||
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
|
||||
@ -77,7 +63,8 @@ class WebRtcAudioTrack {
|
||||
return AudioAttributes.USAGE_VOICE_COMMUNICATION;
|
||||
}
|
||||
|
||||
private final long nativeAudioTrack;
|
||||
private long nativeAudioTrack;
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
|
||||
|
||||
@ -85,18 +72,14 @@ class WebRtcAudioTrack {
|
||||
|
||||
private @Nullable AudioTrack audioTrack = null;
|
||||
private @Nullable AudioTrackThread audioThread = null;
|
||||
private final VolumeLogger volumeLogger;
|
||||
|
||||
// Samples to be played are replaced by zeros if |speakerMute| is set to true.
|
||||
// Can be used to ensure that the speaker is fully muted.
|
||||
private static volatile boolean speakerMute = false;
|
||||
private volatile boolean speakerMute = false;
|
||||
private byte[] emptyBytes;
|
||||
|
||||
private static @Nullable AudioTrackErrorCallback errorCallback = null;
|
||||
|
||||
public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
|
||||
Logging.d(TAG, "Set extended error callback");
|
||||
WebRtcAudioTrack.errorCallback = errorCallback;
|
||||
}
|
||||
private final @Nullable AudioTrackErrorCallback errorCallback;
|
||||
|
||||
/**
|
||||
* Audio thread which keeps calling AudioTrack.write() to stream audio.
|
||||
@ -192,15 +175,22 @@ class WebRtcAudioTrack {
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioTrack(long nativeAudioTrack) {
|
||||
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
||||
this(context, audioManager, null /* errorCallback */);
|
||||
}
|
||||
|
||||
WebRtcAudioTrack(
|
||||
Context context, AudioManager audioManager, @Nullable AudioTrackErrorCallback errorCallback) {
|
||||
threadChecker.detachThread();
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.context = context;
|
||||
this.audioManager = audioManager;
|
||||
this.errorCallback = errorCallback;
|
||||
this.volumeLogger = new VolumeLogger(audioManager);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public void setNativeAudioTrack(long nativeAudioTrack) {
|
||||
this.nativeAudioTrack = nativeAudioTrack;
|
||||
audioManager =
|
||||
(AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
}
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
@ -279,6 +269,7 @@ class WebRtcAudioTrack {
|
||||
@CalledByNative
|
||||
private boolean startPlayout() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
volumeLogger.start();
|
||||
Logging.d(TAG, "startPlayout");
|
||||
assertTrue(audioTrack != null);
|
||||
assertTrue(audioThread == null);
|
||||
@ -310,6 +301,7 @@ class WebRtcAudioTrack {
|
||||
@CalledByNative
|
||||
private boolean stopPlayout() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
volumeLogger.stop();
|
||||
Logging.d(TAG, "stopPlayout");
|
||||
assertTrue(audioThread != null);
|
||||
logUnderrunCount();
|
||||
@ -319,7 +311,7 @@ class WebRtcAudioTrack {
|
||||
audioThread.interrupt();
|
||||
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
|
||||
Logging.e(TAG, "Join of AudioTrackThread timed out.");
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
}
|
||||
Logging.d(TAG, "AudioTrackThread has now been stopped.");
|
||||
audioThread = null;
|
||||
@ -332,7 +324,6 @@ class WebRtcAudioTrack {
|
||||
private int getStreamMaxVolume() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "getStreamMaxVolume");
|
||||
assertTrue(audioManager != null);
|
||||
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
|
||||
}
|
||||
|
||||
@ -341,7 +332,6 @@ class WebRtcAudioTrack {
|
||||
private boolean setStreamVolume(int volume) {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "setStreamVolume(" + volume + ")");
|
||||
assertTrue(audioManager != null);
|
||||
if (isVolumeFixed()) {
|
||||
Logging.e(TAG, "The device implements a fixed volume policy.");
|
||||
return false;
|
||||
@ -364,7 +354,6 @@ class WebRtcAudioTrack {
|
||||
private int getStreamVolume() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "getStreamVolume");
|
||||
assertTrue(audioManager != null);
|
||||
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
|
||||
}
|
||||
|
||||
@ -394,12 +383,9 @@ class WebRtcAudioTrack {
|
||||
if (sampleRateInHz != nativeOutputSampleRate) {
|
||||
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
||||
}
|
||||
if (usageAttribute != DEFAULT_USAGE) {
|
||||
Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
|
||||
}
|
||||
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
||||
return new AudioTrack(new AudioAttributes.Builder()
|
||||
.setUsage(usageAttribute)
|
||||
.setUsage(DEFAULT_USAGE)
|
||||
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
||||
.build(),
|
||||
new AudioFormat.Builder()
|
||||
@ -466,7 +452,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
// Sets all samples to be played out to zero if |mute| is true, i.e.,
|
||||
// ensures that the speaker is muted.
|
||||
public static void setSpeakerMute(boolean mute) {
|
||||
public void setSpeakerMute(boolean mute) {
|
||||
Logging.w(TAG, "setSpeakerMute(" + mute + ")");
|
||||
speakerMute = mute;
|
||||
}
|
||||
@ -482,7 +468,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
private void reportWebRtcAudioTrackInitError(String errorMessage) {
|
||||
Logging.e(TAG, "Init playout error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackInitError(errorMessage);
|
||||
}
|
||||
@ -491,7 +477,7 @@ class WebRtcAudioTrack {
|
||||
private void reportWebRtcAudioTrackStartError(
|
||||
AudioTrackStartErrorCode errorCode, String errorMessage) {
|
||||
Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
|
||||
}
|
||||
@ -499,7 +485,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
private void reportWebRtcAudioTrackError(String errorMessage) {
|
||||
Logging.e(TAG, "Run-time playback error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackError(errorMessage);
|
||||
}
|
||||
|
||||
@ -29,127 +29,10 @@ import java.lang.Thread;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.webrtc.ContextUtils;
|
||||
import org.webrtc.Logging;
|
||||
|
||||
final class WebRtcAudioUtils {
|
||||
private static final String TAG = "WebRtcAudioUtils";
|
||||
|
||||
// List of devices where it has been verified that the built-in effect
|
||||
// bad and where it makes sense to avoid using it and instead rely on the
|
||||
// native WebRTC version instead. The device name is given by Build.MODEL.
|
||||
private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
|
||||
// It is recommended to maintain a list of blacklisted models outside
|
||||
// this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
|
||||
// from the client for devices where the built-in AEC shall be disabled.
|
||||
};
|
||||
private static final String[] BLACKLISTED_NS_MODELS = new String[] {
|
||||
// It is recommended to maintain a list of blacklisted models outside
|
||||
// this package and instead call setWebRtcBasedNoiseSuppressor(true)
|
||||
// from the client for devices where the built-in NS shall be disabled.
|
||||
};
|
||||
|
||||
// Use 16kHz as the default sample rate. A higher sample rate might prevent
|
||||
// us from supporting communication mode on some older (e.g. ICS) devices.
|
||||
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
|
||||
private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
|
||||
// Set to true if setDefaultSampleRateHz() has been called.
|
||||
private static boolean isDefaultSampleRateOverridden = false;
|
||||
|
||||
// By default, utilize hardware based audio effects for AEC and NS when
|
||||
// available.
|
||||
private static boolean useWebRtcBasedAcousticEchoCanceler = false;
|
||||
private static boolean useWebRtcBasedNoiseSuppressor = false;
|
||||
|
||||
// Call these methods if any hardware based effect shall be replaced by a
|
||||
// software based version provided by the WebRTC stack instead.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
|
||||
useWebRtcBasedAcousticEchoCanceler = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
|
||||
useWebRtcBasedNoiseSuppressor = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
|
||||
// TODO(henrika): deprecated; remove when no longer used by any client.
|
||||
Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
|
||||
if (useWebRtcBasedAcousticEchoCanceler) {
|
||||
Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
|
||||
}
|
||||
return useWebRtcBasedAcousticEchoCanceler;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
|
||||
if (useWebRtcBasedNoiseSuppressor) {
|
||||
Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
|
||||
}
|
||||
return useWebRtcBasedNoiseSuppressor;
|
||||
}
|
||||
|
||||
// TODO(henrika): deprecated; remove when no longer used by any client.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
|
||||
// Always return true here to avoid trying to use any built-in AGC.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if the device supports an audio effect (AEC or NS).
|
||||
// Four conditions must be fulfilled if functions are to return true:
|
||||
// 1) the platform must support the built-in (HW) effect,
|
||||
// 2) explicit use (override) of a WebRTC based version must not be set,
|
||||
// 3) the device must not be blacklisted for use of the effect, and
|
||||
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
|
||||
public static boolean isAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
|
||||
}
|
||||
public static boolean isNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.canUseNoiseSuppressor();
|
||||
}
|
||||
|
||||
// Call this method if the default handling of querying the native sample
|
||||
// rate shall be overridden. Can be useful on some devices where the
|
||||
// available Android APIs are known to return invalid results.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
|
||||
isDefaultSampleRateOverridden = true;
|
||||
defaultSampleRateHz = sampleRateHz;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean isDefaultSampleRateOverridden() {
|
||||
return isDefaultSampleRateOverridden;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized int getDefaultSampleRateHz() {
|
||||
return defaultSampleRateHz;
|
||||
}
|
||||
|
||||
public static List<String> getBlackListedModelsForAecUsage() {
|
||||
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
|
||||
}
|
||||
|
||||
public static List<String> getBlackListedModelsForNsUsage() {
|
||||
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
|
||||
}
|
||||
private static final String TAG = "WebRtcAudioUtilsExternal";
|
||||
|
||||
public static boolean runningOnJellyBeanMR1OrHigher() {
|
||||
// November 2012: Android 4.2. API Level 17.
|
||||
@ -214,22 +97,19 @@ final class WebRtcAudioUtils {
|
||||
// Logs information about the current audio state. The idea is to call this
|
||||
// method when errors are detected to log under what conditions the error
|
||||
// occurred. Hopefully it will provide clues to what might be the root cause.
|
||||
static void logAudioState(String tag) {
|
||||
static void logAudioState(String tag, Context context, AudioManager audioManager) {
|
||||
logDeviceInfo(tag);
|
||||
final Context context = ContextUtils.getApplicationContext();
|
||||
final AudioManager audioManager =
|
||||
(AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
logAudioStateBasic(tag, audioManager);
|
||||
logAudioStateBasic(tag, context, audioManager);
|
||||
logAudioStateVolume(tag, audioManager);
|
||||
logAudioDeviceInfo(tag, audioManager);
|
||||
}
|
||||
|
||||
// Reports basic audio statistics.
|
||||
private static void logAudioStateBasic(String tag, AudioManager audioManager) {
|
||||
private static void logAudioStateBasic(String tag, Context context, AudioManager audioManager) {
|
||||
Logging.d(tag,
|
||||
"Audio State: "
|
||||
+ "audio mode: " + modeToString(audioManager.getMode()) + ", "
|
||||
+ "has mic: " + hasMicrophone() + ", "
|
||||
+ "has mic: " + hasMicrophone(context) + ", "
|
||||
+ "mic muted: " + audioManager.isMicrophoneMute() + ", "
|
||||
+ "music active: " + audioManager.isMusicActive() + ", "
|
||||
+ "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
|
||||
@ -394,8 +274,7 @@ final class WebRtcAudioUtils {
|
||||
}
|
||||
|
||||
// Returns true if the device can record audio via a microphone.
|
||||
private static boolean hasMicrophone() {
|
||||
return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
|
||||
PackageManager.FEATURE_MICROPHONE);
|
||||
private static boolean hasMicrophone(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user