Adds logging of audio sessions status on the recording side in ADM for Android.
Goal is to be able to retrieve more details about possible microphone conflicts in cases where Init/Start of audio recording fails. Only supported on Android N and higher. Also adds new boolean UMA histogram called WebRTC.Audio.SourceMatchesRecordingSession. Its value is stored after the recording session has been stopped. Does not affect the media flow or functionality of the ADM. Time to start audio should not be affected either since the new check and logging takes place on a separate ExecutorService thread. See go/webrtc-adm-android for more details and examples. Bug: webrtc:10971 Change-Id: Ia80c1534e326907a1582824225d5f58caa016922 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/150793 Commit-Queue: Henrik Andreassson <henrika@webrtc.org> Reviewed-by: Alex Glaznev <glaznev@webrtc.org> Reviewed-by: Paulina Hensman <phensman@webrtc.org> Cr-Commit-Position: refs/heads/master@{#29236}
This commit is contained in:
@ -12,9 +12,11 @@ package org.webrtc.audio;
|
||||
|
||||
import android.annotation.TargetApi;
|
||||
import android.content.Context;
|
||||
import android.media.AudioDeviceInfo;
|
||||
import android.media.AudioFormat;
|
||||
import android.media.AudioManager;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioRecordingConfiguration;
|
||||
import android.media.MediaRecorder.AudioSource;
|
||||
import android.os.Build;
|
||||
import android.os.Process;
|
||||
@ -22,6 +24,12 @@ import android.support.annotation.Nullable;
|
||||
import java.lang.System;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.webrtc.CalledByNative;
|
||||
import org.webrtc.Logging;
|
||||
@ -61,6 +69,11 @@ class WebRtcAudioRecord {
|
||||
// Indicates AudioRecord has stopped recording audio.
|
||||
private static final int AUDIO_RECORD_STOP = 1;
|
||||
|
||||
// Time to wait before checking recording status after start has been called. Tests have
|
||||
// shown that the result can sometimes be invalid (our own status might be missing) if we check
|
||||
// directly after start.
|
||||
private static final int CHECK_REC_STATUS_DELAY_MS = 100;
|
||||
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private final int audioSource;
|
||||
@ -75,7 +88,12 @@ class WebRtcAudioRecord {
|
||||
private @Nullable AudioRecord audioRecord;
|
||||
private @Nullable AudioRecordThread audioThread;
|
||||
|
||||
private @Nullable ScheduledExecutorService executor;
|
||||
private @Nullable ScheduledFuture<String> future;
|
||||
|
||||
private volatile boolean microphoneMute;
|
||||
private boolean audioSourceMatchesRecordingSession;
|
||||
private boolean audioConfigHasBeenVerified;
|
||||
private byte[] emptyBytes;
|
||||
|
||||
private final @Nullable AudioRecordErrorCallback errorCallback;
|
||||
@ -185,6 +203,7 @@ class WebRtcAudioRecord {
|
||||
this.audioSamplesReadyCallback = audioSamplesReadyCallback;
|
||||
this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
|
||||
this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
@ -202,6 +221,19 @@ class WebRtcAudioRecord {
|
||||
return isNoiseSuppressorSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
// Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
|
||||
// startRecording() has been called. Hence, should preferably be called in combination with
|
||||
// stopRecording() to ensure that it has been set properly. |audioConfigHasBeenChecked| is
|
||||
// enabled in WebRtcAudioRecord to ensure that the returned value is valid.
|
||||
boolean isAudioSourceMatchingRecordingSession() {
|
||||
if (!audioConfigHasBeenVerified) {
|
||||
Logging.w(TAG, "Audio configuration has not yet been verified");
|
||||
return false;
|
||||
}
|
||||
return audioSourceMatchesRecordingSession;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean enableBuiltInAEC(boolean enable) {
|
||||
Logging.d(TAG, "enableBuiltInAEC(" + enable + ")");
|
||||
@ -277,6 +309,16 @@ class WebRtcAudioRecord {
|
||||
effects.enable(audioRecord.getAudioSessionId());
|
||||
logMainParameters();
|
||||
logMainParametersExtended();
|
||||
// Check number of active recording sessions. Should be zero but we have seen conflict cases
|
||||
// and adding a log for it can help us figure out details about conflicting sessions.
|
||||
final int numActiveRecordingSessions =
|
||||
logRecordingConfigurations(false /* verifyAudioConfig */);
|
||||
if (numActiveRecordingSessions != 0) {
|
||||
// Log the conflict as a warning since initialization did in fact succeed. Most likely, the
|
||||
// upcoming call to startRecording() will fail under these conditions.
|
||||
Logging.w(
|
||||
TAG, "Potential microphone conflict. Active sessions: " + numActiveRecordingSessions);
|
||||
}
|
||||
return framesPerBuffer;
|
||||
}
|
||||
|
||||
@ -300,6 +342,7 @@ class WebRtcAudioRecord {
|
||||
}
|
||||
audioThread = new AudioRecordThread("AudioRecordJavaThread");
|
||||
audioThread.start();
|
||||
scheduleLogRecordingConfigurationsTask();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -307,6 +350,17 @@ class WebRtcAudioRecord {
|
||||
private boolean stopRecording() {
|
||||
Logging.d(TAG, "stopRecording");
|
||||
assertTrue(audioThread != null);
|
||||
if (future != null) {
|
||||
if (!future.isDone()) {
|
||||
// Might be needed if the client calls startRecording(), stopRecording() back-to-back.
|
||||
future.cancel(true /* mayInterruptIfRunning */);
|
||||
}
|
||||
future = null;
|
||||
}
|
||||
if (executor != null) {
|
||||
executor.shutdownNow();
|
||||
executor = null;
|
||||
}
|
||||
audioThread.stopThread();
|
||||
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
|
||||
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
|
||||
@ -347,8 +401,9 @@ class WebRtcAudioRecord {
|
||||
+ "sample rate: " + audioRecord.getSampleRate());
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.M)
|
||||
private void logMainParametersExtended() {
|
||||
if (Build.VERSION.SDK_INT >= 23) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
||||
Logging.d(TAG,
|
||||
"AudioRecord: "
|
||||
// The frame count of the native AudioRecord buffer.
|
||||
@ -356,6 +411,35 @@ class WebRtcAudioRecord {
|
||||
}
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
// Checks the number of active recording sessions and logs the states of all active sessions.
|
||||
// Returns number of active sessions.
|
||||
private int logRecordingConfigurations(boolean verifyAudioConfig) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
|
||||
Logging.w(TAG, "AudioManager#getActiveRecordingConfigurations() requires N or higher");
|
||||
return 0;
|
||||
}
|
||||
// Get a list of the currently active audio recording configurations of the device (can be more
|
||||
// than one). An empty list indicates there is no recording active when queried.
|
||||
List<AudioRecordingConfiguration> configs = audioManager.getActiveRecordingConfigurations();
|
||||
final int numActiveRecordingSessions = configs.size();
|
||||
Logging.d(TAG, "Number of active recording sessions: " + numActiveRecordingSessions);
|
||||
if (numActiveRecordingSessions > 0) {
|
||||
logActiveRecordingConfigs(audioRecord.getAudioSessionId(), configs);
|
||||
if (verifyAudioConfig) {
|
||||
// Run an extra check to verify that the existing audio source doing the recording (tied
|
||||
// to the AudioRecord instance) is matching what the audio recording configuration lists
|
||||
// as its client parameters. If these do not match, recording might work but under invalid
|
||||
// conditions.
|
||||
audioSourceMatchesRecordingSession =
|
||||
verifyAudioConfig(audioRecord.getAudioSource(), audioRecord.getAudioSessionId(),
|
||||
audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs);
|
||||
audioConfigHasBeenVerified = true;
|
||||
}
|
||||
}
|
||||
return numActiveRecordingSessions;
|
||||
}
|
||||
|
||||
// Helper method which throws an exception when an assertion has failed.
|
||||
private static void assertTrue(boolean condition) {
|
||||
if (!condition) {
|
||||
@ -390,6 +474,7 @@ class WebRtcAudioRecord {
|
||||
private void reportWebRtcAudioRecordInitError(String errorMessage) {
|
||||
Logging.e(TAG, "Init recording error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
logRecordingConfigurations(false /* verifyAudioConfig */);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordInitError(errorMessage);
|
||||
}
|
||||
@ -399,6 +484,7 @@ class WebRtcAudioRecord {
|
||||
AudioRecordStartErrorCode errorCode, String errorMessage) {
|
||||
Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
logRecordingConfigurations(false /* verifyAudioConfig */);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
|
||||
}
|
||||
@ -413,7 +499,7 @@ class WebRtcAudioRecord {
|
||||
}
|
||||
|
||||
private void doAudioRecordStateCallback(int audioState) {
|
||||
Logging.d(TAG, "doAudioRecordStateCallback: " + audioState);
|
||||
Logging.d(TAG, "doAudioRecordStateCallback: " + audioStateToString(audioState));
|
||||
if (stateCallback != null) {
|
||||
if (audioState == WebRtcAudioRecord.AUDIO_RECORD_START) {
|
||||
stateCallback.onWebRtcAudioRecordStart();
|
||||
@ -443,4 +529,147 @@ class WebRtcAudioRecord {
|
||||
throw new IllegalArgumentException("Bad audio format " + audioFormat);
|
||||
}
|
||||
}
|
||||
|
||||
// Use an ExecutorService to schedule a task after a given delay where the task consists of
|
||||
// checking (by logging) the current status of active recording sessions.
|
||||
private void scheduleLogRecordingConfigurationsTask() {
|
||||
Logging.d(TAG, "scheduleLogRecordingConfigurationsTask");
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
|
||||
return;
|
||||
}
|
||||
if (executor != null) {
|
||||
executor.shutdownNow();
|
||||
}
|
||||
executor = Executors.newSingleThreadScheduledExecutor();
|
||||
|
||||
Callable<String> callable = () -> {
|
||||
logRecordingConfigurations(true /* verifyAudioConfig */);
|
||||
return "Scheduled task is done";
|
||||
};
|
||||
|
||||
if (future != null && !future.isDone()) {
|
||||
future.cancel(true /* mayInterruptIfRunning */);
|
||||
}
|
||||
// Schedule call to logRecordingConfigurations() from executor thread after fixed delay.
|
||||
future = executor.schedule(callable, CHECK_REC_STATUS_DELAY_MS, TimeUnit.MILLISECONDS);
|
||||
};
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
private static boolean logActiveRecordingConfigs(
|
||||
int session, List<AudioRecordingConfiguration> configs) {
|
||||
assertTrue(!configs.isEmpty());
|
||||
final Iterator<AudioRecordingConfiguration> it = configs.iterator();
|
||||
Logging.d(TAG, "AudioRecordingConfigurations: ");
|
||||
while (it.hasNext()) {
|
||||
final AudioRecordingConfiguration config = it.next();
|
||||
StringBuilder conf = new StringBuilder();
|
||||
// The audio source selected by the client.
|
||||
final int audioSource = config.getClientAudioSource();
|
||||
conf.append(" client audio source=")
|
||||
.append(WebRtcAudioUtils.audioSourceToString(audioSource))
|
||||
.append(", client session id=")
|
||||
.append(config.getClientAudioSessionId())
|
||||
// Compare with our own id (based on AudioRecord#getAudioSessionId()).
|
||||
.append(" (")
|
||||
.append(session)
|
||||
.append(")")
|
||||
.append("\n");
|
||||
// Audio format at which audio is recorded on this Android device. Note that it may differ
|
||||
// from the client application recording format (see getClientFormat()).
|
||||
AudioFormat format = config.getFormat();
|
||||
conf.append(" Device AudioFormat: ")
|
||||
.append("channel count=")
|
||||
.append(format.getChannelCount())
|
||||
.append(", channel index mask=")
|
||||
.append(format.getChannelIndexMask())
|
||||
// Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
|
||||
.append(", channel mask=")
|
||||
.append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
|
||||
.append(", encoding=")
|
||||
.append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
|
||||
.append(", sample rate=")
|
||||
.append(format.getSampleRate())
|
||||
.append("\n");
|
||||
// Audio format at which the client application is recording audio.
|
||||
format = config.getClientFormat();
|
||||
conf.append(" Client AudioFormat: ")
|
||||
.append("channel count=")
|
||||
.append(format.getChannelCount())
|
||||
.append(", channel index mask=")
|
||||
.append(format.getChannelIndexMask())
|
||||
// Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
|
||||
.append(", channel mask=")
|
||||
.append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
|
||||
.append(", encoding=")
|
||||
.append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
|
||||
.append(", sample rate=")
|
||||
.append(format.getSampleRate())
|
||||
.append("\n");
|
||||
// Audio input device used for this recording session.
|
||||
final AudioDeviceInfo device = config.getAudioDevice();
|
||||
if (device != null) {
|
||||
assertTrue(device.isSource());
|
||||
conf.append(" AudioDevice: ")
|
||||
.append("type=")
|
||||
.append(WebRtcAudioUtils.deviceTypeToString(device.getType()))
|
||||
.append(", id=")
|
||||
.append(device.getId());
|
||||
}
|
||||
Logging.d(TAG, conf.toString());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Verify that the client audio configuration (device and format) matches the requested
|
||||
// configuration (same as AudioRecord's).
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
private static boolean verifyAudioConfig(int source, int session, AudioFormat format,
|
||||
AudioDeviceInfo device, List<AudioRecordingConfiguration> configs) {
|
||||
assertTrue(!configs.isEmpty());
|
||||
final Iterator<AudioRecordingConfiguration> it = configs.iterator();
|
||||
while (it.hasNext()) {
|
||||
final AudioRecordingConfiguration config = it.next();
|
||||
final AudioDeviceInfo configDevice = config.getAudioDevice();
|
||||
if (configDevice == null) {
|
||||
continue;
|
||||
}
|
||||
if ((config.getClientAudioSource() == source)
|
||||
&& (config.getClientAudioSessionId() == session)
|
||||
// Check the client format (should match the format of the AudioRecord instance).
|
||||
&& (config.getClientFormat().getEncoding() == format.getEncoding())
|
||||
&& (config.getClientFormat().getSampleRate() == format.getSampleRate())
|
||||
&& (config.getClientFormat().getChannelMask() == format.getChannelMask())
|
||||
&& (config.getClientFormat().getChannelIndexMask() == format.getChannelIndexMask())
|
||||
// Ensure that the device format is properly configured.
|
||||
&& (config.getFormat().getEncoding() != AudioFormat.ENCODING_INVALID)
|
||||
&& (config.getFormat().getSampleRate() > 0)
|
||||
// For the channel mask, either the position or index-based value must be valid.
|
||||
&& ((config.getFormat().getChannelMask() != AudioFormat.CHANNEL_INVALID)
|
||||
|| (config.getFormat().getChannelIndexMask() != AudioFormat.CHANNEL_INVALID))
|
||||
&& checkDeviceMatch(configDevice, device)) {
|
||||
Logging.d(TAG, "verifyAudioConfig: PASS");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
Logging.e(TAG, "verifyAudioConfig: FAILED");
|
||||
return false;
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
// Returns true if device A parameters matches those of device B.
|
||||
// TODO(henrika): can be improved by adding AudioDeviceInfo#getAddress() but it requires API 29.
|
||||
private static boolean checkDeviceMatch(AudioDeviceInfo devA, AudioDeviceInfo devB) {
|
||||
return ((devA.getId() == devB.getId() && (devA.getType() == devB.getType())));
|
||||
}
|
||||
|
||||
private static String audioStateToString(int state) {
|
||||
switch (state) {
|
||||
case WebRtcAudioRecord.AUDIO_RECORD_START:
|
||||
return "START";
|
||||
case WebRtcAudioRecord.AUDIO_RECORD_STOP:
|
||||
return "STOP";
|
||||
default:
|
||||
return "INVALID";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,6 +188,7 @@ class WebRtcAudioTrack {
|
||||
this.errorCallback = errorCallback;
|
||||
this.stateCallback = stateCallback;
|
||||
this.volumeLogger = new VolumeLogger(audioManager);
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
@ -238,7 +239,7 @@ class WebRtcAudioTrack {
|
||||
// Create an AudioTrack object and initialize its associated audio buffer.
|
||||
// The size of this buffer determines how long an AudioTrack can play
|
||||
// before running out of data.
|
||||
if (Build.VERSION.SDK_INT >= 21) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
|
||||
// If we are on API level 21 or higher, it is possible to use a special AudioTrack
|
||||
// constructor that uses AudioAttributes and AudioFormat as input. It allows us to
|
||||
// supersede the notion of stream types for defining the behavior of audio playback,
|
||||
@ -345,7 +346,7 @@ class WebRtcAudioTrack {
|
||||
}
|
||||
|
||||
private boolean isVolumeFixed() {
|
||||
if (Build.VERSION.SDK_INT < 21)
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP)
|
||||
return false;
|
||||
return audioManager.isVolumeFixed();
|
||||
}
|
||||
@ -385,7 +386,7 @@ class WebRtcAudioTrack {
|
||||
// Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
|
||||
// It allows certain platforms or routing policies to use this information for more
|
||||
// refined volume or routing decisions.
|
||||
@TargetApi(21)
|
||||
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
|
||||
private static AudioTrack createAudioTrackOnLollipopOrHigher(
|
||||
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
|
||||
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
|
||||
@ -418,7 +419,7 @@ class WebRtcAudioTrack {
|
||||
}
|
||||
|
||||
private void logBufferSizeInFrames() {
|
||||
if (Build.VERSION.SDK_INT >= 23) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
||||
Logging.d(TAG,
|
||||
"AudioTrack: "
|
||||
// The effective size of the AudioTrack buffer that the app writes to.
|
||||
@ -427,7 +428,7 @@ class WebRtcAudioTrack {
|
||||
}
|
||||
|
||||
private void logBufferCapacityInFrames() {
|
||||
if (Build.VERSION.SDK_INT >= 24) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||
Logging.d(TAG,
|
||||
"AudioTrack: "
|
||||
// Maximum size of the AudioTrack buffer in frames.
|
||||
@ -447,7 +448,7 @@ class WebRtcAudioTrack {
|
||||
// TODO(henrika): keep track of this value in the field and possibly add new
|
||||
// UMA stat if needed.
|
||||
private void logUnderrunCount() {
|
||||
if (Build.VERSION.SDK_INT >= 24) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||
Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,9 +15,11 @@ import static android.media.AudioManager.MODE_IN_COMMUNICATION;
|
||||
import static android.media.AudioManager.MODE_NORMAL;
|
||||
import static android.media.AudioManager.MODE_RINGTONE;
|
||||
|
||||
import android.annotation.TargetApi;
|
||||
import android.content.Context;
|
||||
import android.content.pm.PackageManager;
|
||||
import android.media.AudioDeviceInfo;
|
||||
import android.media.AudioFormat;
|
||||
import android.media.AudioManager;
|
||||
import android.media.AudioRecordingConfiguration;
|
||||
import android.media.MediaRecorder.AudioSource;
|
||||
@ -67,6 +69,130 @@ final class WebRtcAudioUtils {
|
||||
logAudioDeviceInfo(tag, audioManager);
|
||||
}
|
||||
|
||||
// Converts AudioDeviceInfo types to local string representation.
|
||||
static String deviceTypeToString(int type) {
|
||||
switch (type) {
|
||||
case AudioDeviceInfo.TYPE_UNKNOWN:
|
||||
return "TYPE_UNKNOWN";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
|
||||
return "TYPE_BUILTIN_EARPIECE";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
|
||||
return "TYPE_BUILTIN_SPEAKER";
|
||||
case AudioDeviceInfo.TYPE_WIRED_HEADSET:
|
||||
return "TYPE_WIRED_HEADSET";
|
||||
case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
|
||||
return "TYPE_WIRED_HEADPHONES";
|
||||
case AudioDeviceInfo.TYPE_LINE_ANALOG:
|
||||
return "TYPE_LINE_ANALOG";
|
||||
case AudioDeviceInfo.TYPE_LINE_DIGITAL:
|
||||
return "TYPE_LINE_DIGITAL";
|
||||
case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
|
||||
return "TYPE_BLUETOOTH_SCO";
|
||||
case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
|
||||
return "TYPE_BLUETOOTH_A2DP";
|
||||
case AudioDeviceInfo.TYPE_HDMI:
|
||||
return "TYPE_HDMI";
|
||||
case AudioDeviceInfo.TYPE_HDMI_ARC:
|
||||
return "TYPE_HDMI_ARC";
|
||||
case AudioDeviceInfo.TYPE_USB_DEVICE:
|
||||
return "TYPE_USB_DEVICE";
|
||||
case AudioDeviceInfo.TYPE_USB_ACCESSORY:
|
||||
return "TYPE_USB_ACCESSORY";
|
||||
case AudioDeviceInfo.TYPE_DOCK:
|
||||
return "TYPE_DOCK";
|
||||
case AudioDeviceInfo.TYPE_FM:
|
||||
return "TYPE_FM";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_MIC:
|
||||
return "TYPE_BUILTIN_MIC";
|
||||
case AudioDeviceInfo.TYPE_FM_TUNER:
|
||||
return "TYPE_FM_TUNER";
|
||||
case AudioDeviceInfo.TYPE_TV_TUNER:
|
||||
return "TYPE_TV_TUNER";
|
||||
case AudioDeviceInfo.TYPE_TELEPHONY:
|
||||
return "TYPE_TELEPHONY";
|
||||
case AudioDeviceInfo.TYPE_AUX_LINE:
|
||||
return "TYPE_AUX_LINE";
|
||||
case AudioDeviceInfo.TYPE_IP:
|
||||
return "TYPE_IP";
|
||||
case AudioDeviceInfo.TYPE_BUS:
|
||||
return "TYPE_BUS";
|
||||
case AudioDeviceInfo.TYPE_USB_HEADSET:
|
||||
return "TYPE_USB_HEADSET";
|
||||
default:
|
||||
return "TYPE_UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
public static String audioSourceToString(int source) {
|
||||
// AudioSource.UNPROCESSED requires API level 29. Use local define instead.
|
||||
final int VOICE_PERFORMANCE = 10;
|
||||
switch (source) {
|
||||
case AudioSource.DEFAULT:
|
||||
return "DEFAULT";
|
||||
case AudioSource.MIC:
|
||||
return "MIC";
|
||||
case AudioSource.VOICE_UPLINK:
|
||||
return "VOICE_UPLINK";
|
||||
case AudioSource.VOICE_DOWNLINK:
|
||||
return "VOICE_DOWNLINK";
|
||||
case AudioSource.VOICE_CALL:
|
||||
return "VOICE_CALL";
|
||||
case AudioSource.CAMCORDER:
|
||||
return "CAMCORDER";
|
||||
case AudioSource.VOICE_RECOGNITION:
|
||||
return "VOICE_RECOGNITION";
|
||||
case AudioSource.VOICE_COMMUNICATION:
|
||||
return "VOICE_COMMUNICATION";
|
||||
case AudioSource.UNPROCESSED:
|
||||
return "UNPROCESSED";
|
||||
case VOICE_PERFORMANCE:
|
||||
return "VOICE_PERFORMANCE";
|
||||
default:
|
||||
return "INVALID";
|
||||
}
|
||||
}
|
||||
|
||||
public static String channelMaskToString(int mask) {
|
||||
// For input or AudioRecord, the mask should be AudioFormat#CHANNEL_IN_MONO or
|
||||
// AudioFormat#CHANNEL_IN_STEREO. AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all
|
||||
// devices.
|
||||
switch (mask) {
|
||||
case AudioFormat.CHANNEL_IN_STEREO:
|
||||
return "IN_STEREO";
|
||||
case AudioFormat.CHANNEL_IN_MONO:
|
||||
return "IN_MONO";
|
||||
default:
|
||||
return "INVALID";
|
||||
}
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.N)
|
||||
public static String audioEncodingToString(int enc) {
|
||||
switch (enc) {
|
||||
case AudioFormat.ENCODING_INVALID:
|
||||
return "INVALID";
|
||||
case AudioFormat.ENCODING_PCM_16BIT:
|
||||
return "PCM_16BIT";
|
||||
case AudioFormat.ENCODING_PCM_8BIT:
|
||||
return "PCM_8BIT";
|
||||
case AudioFormat.ENCODING_PCM_FLOAT:
|
||||
return "PCM_FLOAT";
|
||||
case AudioFormat.ENCODING_AC3:
|
||||
return "AC3";
|
||||
case AudioFormat.ENCODING_E_AC3:
|
||||
return "AC3";
|
||||
case AudioFormat.ENCODING_DTS:
|
||||
return "DTS";
|
||||
case AudioFormat.ENCODING_DTS_HD:
|
||||
return "DTS_HD";
|
||||
case AudioFormat.ENCODING_MP3:
|
||||
return "MP3";
|
||||
default:
|
||||
return "Invalid encoding: " + enc;
|
||||
}
|
||||
}
|
||||
|
||||
// Reports basic audio statistics.
|
||||
private static void logAudioStateBasic(String tag, Context context, AudioManager audioManager) {
|
||||
Logging.d(tag,
|
||||
@ -181,60 +307,6 @@ final class WebRtcAudioUtils {
|
||||
}
|
||||
}
|
||||
|
||||
// Converts AudioDeviceInfo types to local string representation.
|
||||
private static String deviceTypeToString(int type) {
|
||||
switch (type) {
|
||||
case AudioDeviceInfo.TYPE_UNKNOWN:
|
||||
return "TYPE_UNKNOWN";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
|
||||
return "TYPE_BUILTIN_EARPIECE";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
|
||||
return "TYPE_BUILTIN_SPEAKER";
|
||||
case AudioDeviceInfo.TYPE_WIRED_HEADSET:
|
||||
return "TYPE_WIRED_HEADSET";
|
||||
case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
|
||||
return "TYPE_WIRED_HEADPHONES";
|
||||
case AudioDeviceInfo.TYPE_LINE_ANALOG:
|
||||
return "TYPE_LINE_ANALOG";
|
||||
case AudioDeviceInfo.TYPE_LINE_DIGITAL:
|
||||
return "TYPE_LINE_DIGITAL";
|
||||
case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
|
||||
return "TYPE_BLUETOOTH_SCO";
|
||||
case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
|
||||
return "TYPE_BLUETOOTH_A2DP";
|
||||
case AudioDeviceInfo.TYPE_HDMI:
|
||||
return "TYPE_HDMI";
|
||||
case AudioDeviceInfo.TYPE_HDMI_ARC:
|
||||
return "TYPE_HDMI_ARC";
|
||||
case AudioDeviceInfo.TYPE_USB_DEVICE:
|
||||
return "TYPE_USB_DEVICE";
|
||||
case AudioDeviceInfo.TYPE_USB_ACCESSORY:
|
||||
return "TYPE_USB_ACCESSORY";
|
||||
case AudioDeviceInfo.TYPE_DOCK:
|
||||
return "TYPE_DOCK";
|
||||
case AudioDeviceInfo.TYPE_FM:
|
||||
return "TYPE_FM";
|
||||
case AudioDeviceInfo.TYPE_BUILTIN_MIC:
|
||||
return "TYPE_BUILTIN_MIC";
|
||||
case AudioDeviceInfo.TYPE_FM_TUNER:
|
||||
return "TYPE_FM_TUNER";
|
||||
case AudioDeviceInfo.TYPE_TV_TUNER:
|
||||
return "TYPE_TV_TUNER";
|
||||
case AudioDeviceInfo.TYPE_TELEPHONY:
|
||||
return "TYPE_TELEPHONY";
|
||||
case AudioDeviceInfo.TYPE_AUX_LINE:
|
||||
return "TYPE_AUX_LINE";
|
||||
case AudioDeviceInfo.TYPE_IP:
|
||||
return "TYPE_IP";
|
||||
case AudioDeviceInfo.TYPE_BUS:
|
||||
return "TYPE_BUS";
|
||||
case AudioDeviceInfo.TYPE_USB_HEADSET:
|
||||
return "TYPE_USB_HEADSET";
|
||||
default:
|
||||
return "TYPE_UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the device can record audio via a microphone.
|
||||
private static boolean hasMicrophone(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);
|
||||
|
||||
@ -158,6 +158,13 @@ int32_t AudioRecordJni::StopRecording() {
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
const bool session_was_ok =
|
||||
Java_WebRtcAudioRecord_isAudioSourceMatchingRecordingSession(
|
||||
env_, j_audio_record_);
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.SourceMatchesRecordingSession",
|
||||
session_was_ok);
|
||||
RTC_LOG(INFO) << "HISTOGRAM(WebRTC.Audio.SourceMatchesRecordingSession): "
|
||||
<< session_was_ok;
|
||||
if (!Java_WebRtcAudioRecord_stopRecording(env_, j_audio_record_)) {
|
||||
RTC_LOG(LS_ERROR) << "StopRecording failed";
|
||||
return -1;
|
||||
|
||||
Reference in New Issue
Block a user