Add setting to switch between new and legacy AudioDeviceModule in AppRTC

To facilitate testing both the old and new AudioDeviceModule path, a
setting is added to AppRTC. Enable "Use legacy audio device" to use
the old path.

Bug: webrtc:7452
Change-Id: I221378ac7bb0fa4e543c3fd081c7a322621621a0
Reviewed-on: https://webrtc-review.googlesource.com/64760
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Paulina Hensman <phensman@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22609}
This commit is contained in:
Paulina Hensman
2018-03-26 16:10:29 +02:00
committed by Commit Bot
parent 6c9a786c60
commit 17071682f7
9 changed files with 252 additions and 67 deletions

View File

@ -120,6 +120,8 @@ public class CallActivity extends Activity implements AppRTCClient.SignalingEven
public static final String EXTRA_NEGOTIATED = "org.appspot.apprtc.NEGOTIATED";
public static final String EXTRA_ID = "org.appspot.apprtc.ID";
public static final String EXTRA_ENABLE_RTCEVENTLOG = "org.appspot.apprtc.ENABLE_RTCEVENTLOG";
public static final String EXTRA_USE_LEGACY_AUDIO_DEVICE =
"org.appspot.apprtc.USE_LEGACY_AUDIO_DEVICE";
private static final int CAPTURE_PERMISSION_REQUEST_CODE = 1;
@ -347,7 +349,8 @@ public class CallActivity extends Activity implements AppRTCClient.SignalingEven
intent.getBooleanExtra(EXTRA_DISABLE_BUILT_IN_AGC, false),
intent.getBooleanExtra(EXTRA_DISABLE_BUILT_IN_NS, false),
intent.getBooleanExtra(EXTRA_DISABLE_WEBRTC_AGC_AND_HPF, false),
intent.getBooleanExtra(EXTRA_ENABLE_RTCEVENTLOG, false), dataChannelParameters);
intent.getBooleanExtra(EXTRA_ENABLE_RTCEVENTLOG, false),
intent.getBooleanExtra(EXTRA_USE_LEGACY_AUDIO_DEVICE, false), dataChannelParameters);
commandLineRun = intent.getBooleanExtra(EXTRA_CMDLINE, false);
int runTimeMs = intent.getIntExtra(EXTRA_RUNTIME, 0);

View File

@ -434,6 +434,10 @@ public class ConnectActivity extends Activity {
CallActivity.EXTRA_ENABLE_RTCEVENTLOG, R.string.pref_enable_rtceventlog_default,
useValuesFromIntent);
boolean useLegacyAudioDevice = sharedPrefGetBoolean(R.string.pref_use_legacy_audio_device_key,
CallActivity.EXTRA_USE_LEGACY_AUDIO_DEVICE, R.string.pref_use_legacy_audio_device_default,
useValuesFromIntent);
// Get datachannel options
boolean dataChannelEnabled = sharedPrefGetBoolean(R.string.pref_enable_datachannel_key,
CallActivity.EXTRA_DATA_CHANNEL_ENABLED, R.string.pref_enable_datachannel_default,
@ -488,6 +492,7 @@ public class ConnectActivity extends Activity {
intent.putExtra(CallActivity.EXTRA_ENABLE_RTCEVENTLOG, rtcEventLogEnabled);
intent.putExtra(CallActivity.EXTRA_CMDLINE, commandLineRun);
intent.putExtra(CallActivity.EXTRA_RUNTIME, runTimeMs);
intent.putExtra(CallActivity.EXTRA_USE_LEGACY_AUDIO_DEVICE, useLegacyAudioDevice);
intent.putExtra(CallActivity.EXTRA_DATA_CHANNEL_ENABLED, dataChannelEnabled);

View File

@ -71,6 +71,13 @@ import org.webrtc.VideoSink;
import org.webrtc.VideoSource;
import org.webrtc.VideoTrack;
import org.webrtc.audio.AudioDeviceModule;
import org.webrtc.voiceengine.WebRtcAudioManager;
import org.webrtc.voiceengine.WebRtcAudioRecord;
import org.webrtc.voiceengine.WebRtcAudioRecord.AudioRecordStartErrorCode;
import org.webrtc.voiceengine.WebRtcAudioRecord.WebRtcAudioRecordErrorCallback;
import org.webrtc.voiceengine.WebRtcAudioTrack;
import org.webrtc.voiceengine.WebRtcAudioTrack.AudioTrackStartErrorCode;
import org.webrtc.voiceengine.WebRtcAudioUtils;
/**
* Peer connection client implementation.
@ -233,6 +240,7 @@ public class PeerConnectionClient {
public final boolean disableBuiltInNS;
public final boolean disableWebRtcAGCAndHPF;
public final boolean enableRtcEventLog;
public final boolean useLegacyAudioDevice;
private final DataChannelParameters dataChannelParameters;
public PeerConnectionParameters(boolean videoCallEnabled, boolean loopback, boolean tracing,
@ -241,7 +249,7 @@ public class PeerConnectionClient {
String audioCodec, boolean noAudioProcessing, boolean aecDump, boolean saveInputAudioToFile,
boolean useOpenSLES, boolean disableBuiltInAEC, boolean disableBuiltInAGC,
boolean disableBuiltInNS, boolean disableWebRtcAGCAndHPF, boolean enableRtcEventLog,
DataChannelParameters dataChannelParameters) {
boolean useLegacyAudioDevice, DataChannelParameters dataChannelParameters) {
this.videoCallEnabled = videoCallEnabled;
this.loopback = loopback;
this.tracing = tracing;
@ -263,6 +271,7 @@ public class PeerConnectionClient {
this.disableBuiltInNS = disableBuiltInNS;
this.disableWebRtcAGCAndHPF = disableWebRtcAGCAndHPF;
this.enableRtcEventLog = enableRtcEventLog;
this.useLegacyAudioDevice = useLegacyAudioDevice;
this.dataChannelParameters = dataChannelParameters;
}
}
@ -418,8 +427,10 @@ public class PeerConnectionClient {
fieldTrials += DISABLE_WEBRTC_AGC_FIELDTRIAL;
Log.d(TAG, "Disable WebRTC AGC field trial.");
}
fieldTrials += EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL;
Log.d(TAG, "Enable WebRTC external Android audio device field trial.");
if (!peerConnectionParameters.useLegacyAudioDevice) {
fieldTrials += EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL;
Log.d(TAG, "Enable WebRTC external Android audio device field trial.");
}
// Check preferred video codec.
preferredVideoCodec = VIDEO_CODEC_VP8;
@ -465,6 +476,121 @@ public class PeerConnectionClient {
preferIsac = peerConnectionParameters.audioCodec != null
&& peerConnectionParameters.audioCodec.equals(AUDIO_CODEC_ISAC);
if (peerConnectionParameters.useLegacyAudioDevice) {
setupAudioDeviceLegacy();
} else {
setupAudioDevice();
}
// It is possible to save a copy in raw PCM format on a file by checking
// the "Save input audio to file" checkbox in the Settings UI. A callback
// interface is set when this flag is enabled. As a result, a copy of recorded
// audio samples are provided to this client directly from the native audio
// layer in Java.
if (peerConnectionParameters.saveInputAudioToFile) {
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Enable recording of microphone input audio to file");
saveRecordedAudioToFile = new RecordedAudioToFileController(
executor, peerConnectionParameters.useLegacyAudioDevice);
} else {
// TODO(henrika): ensure that the UI reflects that if OpenSL ES is selected,
// then the "Save inut audio to file" option shall be grayed out.
Log.e(TAG, "Recording of input audio is not supported for OpenSL ES");
}
}
// Create peer connection factory.
if (options != null) {
Log.d(TAG, "Factory networkIgnoreMask option: " + options.networkIgnoreMask);
}
final boolean enableH264HighProfile =
VIDEO_CODEC_H264_HIGH.equals(peerConnectionParameters.videoCodec);
final VideoEncoderFactory encoderFactory;
final VideoDecoderFactory decoderFactory;
if (peerConnectionParameters.videoCodecHwAcceleration) {
encoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true /* enableIntelVp8Encoder */, enableH264HighProfile);
decoderFactory = new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
} else {
encoderFactory = new SoftwareVideoEncoderFactory();
decoderFactory = new SoftwareVideoDecoderFactory();
}
factory = new PeerConnectionFactory(options, encoderFactory, decoderFactory);
Log.d(TAG, "Peer connection factory created.");
}
void setupAudioDeviceLegacy() {
// Enable/disable OpenSL ES playback.
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Disable OpenSL ES audio even if device supports it");
WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true /* enable */);
} else {
Log.d(TAG, "Allow OpenSL ES audio if device supports it");
WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(false);
}
if (peerConnectionParameters.disableBuiltInAEC) {
Log.d(TAG, "Disable built-in AEC even if device supports it");
WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(true);
} else {
Log.d(TAG, "Enable built-in AEC if device supports it");
WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(false);
}
if (peerConnectionParameters.disableBuiltInNS) {
Log.d(TAG, "Disable built-in NS even if device supports it");
WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(true);
} else {
Log.d(TAG, "Enable built-in NS if device supports it");
WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(false);
}
// Set audio record error callbacks.
WebRtcAudioRecord.setErrorCallback(new WebRtcAudioRecordErrorCallback() {
@Override
public void onWebRtcAudioRecordInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordInitError: " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioRecordStartError(
AudioRecordStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioRecordError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordError: " + errorMessage);
reportError(errorMessage);
}
});
WebRtcAudioTrack.setErrorCallback(new WebRtcAudioTrack.ErrorCallback() {
@Override
public void onWebRtcAudioTrackInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackInitError: " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioTrackStartError(
AudioTrackStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioTrackError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackError: " + errorMessage);
reportError(errorMessage);
}
});
}
void setupAudioDevice() {
// Enable/disable OpenSL ES playback.
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Disable OpenSL ES audio even if device supports it");
@ -512,22 +638,6 @@ public class PeerConnectionClient {
}
});
// It is possible to save a copy in raw PCM format on a file by checking
// the "Save input audio to file" checkbox in the Settings UI. A callback
// interface is set when this flag is enabled. As a result, a copy of recorded
// audio samples are provided to this client directly from the native audio
// layer in Java.
if (peerConnectionParameters.saveInputAudioToFile) {
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Enable recording of microphone input audio to file");
saveRecordedAudioToFile = new RecordedAudioToFileController(executor);
} else {
// TODO(henrika): ensure that the UI reflects that if OpenSL ES is selected,
// then the "Save inut audio to file" option shall be grayed out.
Log.e(TAG, "Recording of input audio is not supported for OpenSL ES");
}
}
AudioDeviceModule.setErrorCallback(new AudioDeviceModule.AudioTrackErrorCallback() {
@Override
public void onWebRtcAudioTrackInitError(String errorMessage) {
@ -548,27 +658,6 @@ public class PeerConnectionClient {
reportError(errorMessage);
}
});
// Create peer connection factory.
if (options != null) {
Log.d(TAG, "Factory networkIgnoreMask option: " + options.networkIgnoreMask);
}
final boolean enableH264HighProfile =
VIDEO_CODEC_H264_HIGH.equals(peerConnectionParameters.videoCodec);
final VideoEncoderFactory encoderFactory;
final VideoDecoderFactory decoderFactory;
if (peerConnectionParameters.videoCodecHwAcceleration) {
encoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true /* enableIntelVp8Encoder */, enableH264HighProfile);
decoderFactory = new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
} else {
encoderFactory = new SoftwareVideoEncoderFactory();
decoderFactory = new SoftwareVideoDecoderFactory();
}
factory = new PeerConnectionFactory(options, encoderFactory, decoderFactory);
Log.d(TAG, "Peer connection factory created.");
}
private void createMediaConstraintsInternal() {

View File

@ -21,14 +21,16 @@ import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.ExecutorService;
import org.webrtc.audio.AudioDeviceModule;
import org.webrtc.audio.AudioDeviceModule.AudioSamples;
import org.webrtc.audio.AudioDeviceModule.SamplesReadyCallback;
import org.webrtc.voiceengine.WebRtcAudioRecord;
import org.webrtc.voiceengine.WebRtcAudioRecord.WebRtcAudioRecordSamplesReadyCallback;
/**
* Implements the AudioRecordSamplesReadyCallback interface and writes
* recorded raw audio samples to an output file.
*/
public class RecordedAudioToFileController implements SamplesReadyCallback {
public class RecordedAudioToFileController
implements SamplesReadyCallback, WebRtcAudioRecordSamplesReadyCallback {
private static final String TAG = "RecordedAudioToFile";
private static final long MAX_FILE_SIZE_IN_BYTES = 58348800L;
@ -37,10 +39,12 @@ public class RecordedAudioToFileController implements SamplesReadyCallback {
@Nullable
private OutputStream rawAudioFileOutputStream = null;
private long fileSizeInBytes = 0;
private boolean useLegacyAudioDevice;
public RecordedAudioToFileController(ExecutorService executor) {
public RecordedAudioToFileController(ExecutorService executor, boolean useLegacyAudioDevice) {
Log.d(TAG, "ctor");
this.executor = executor;
this.useLegacyAudioDevice = useLegacyAudioDevice;
}
/**
@ -54,7 +58,11 @@ public class RecordedAudioToFileController implements SamplesReadyCallback {
return false;
}
// Register this class as receiver of recorded audio samples for storage.
AudioDeviceModule.setOnAudioSamplesReady(this);
if (useLegacyAudioDevice) {
WebRtcAudioRecord.setOnAudioSamplesReady(this);
} else {
AudioDeviceModule.setOnAudioSamplesReady(this);
}
return true;
}
@ -65,7 +73,11 @@ public class RecordedAudioToFileController implements SamplesReadyCallback {
public void stop() {
Log.d(TAG, "stop");
// De-register this class as receiver of recorded audio samples for storage.
AudioDeviceModule.setOnAudioSamplesReady(null);
if (useLegacyAudioDevice) {
WebRtcAudioRecord.setOnAudioSamplesReady(null);
} else {
AudioDeviceModule.setOnAudioSamplesReady(null);
}
synchronized (lock) {
if (rawAudioFileOutputStream != null) {
try {
@ -106,7 +118,41 @@ public class RecordedAudioToFileController implements SamplesReadyCallback {
// Called when new audio samples are ready.
@Override
public void onWebRtcAudioRecordSamplesReady(AudioSamples samples) {
public void onWebRtcAudioRecordSamplesReady(AudioDeviceModule.AudioSamples samples) {
// The native audio layer on Android should use 16-bit PCM format.
if (samples.getAudioFormat() != AudioFormat.ENCODING_PCM_16BIT) {
Log.e(TAG, "Invalid audio format");
return;
}
// Open a new file for the first callback only since it allows us to add
// audio parameters to the file name.
synchronized (lock) {
if (rawAudioFileOutputStream == null) {
openRawAudioOutputFile(samples.getSampleRate(), samples.getChannelCount());
fileSizeInBytes = 0;
}
}
// Append the recorded 16-bit audio samples to the open output file.
executor.execute(() -> {
if (rawAudioFileOutputStream != null) {
try {
// Set a limit on max file size. 58348800 bytes corresponds to
// approximately 10 minutes of recording in mono at 48kHz.
if (fileSizeInBytes < MAX_FILE_SIZE_IN_BYTES) {
// Writes samples.getData().length bytes to output stream.
rawAudioFileOutputStream.write(samples.getData());
fileSizeInBytes += samples.getData().length;
}
} catch (IOException e) {
Log.e(TAG, "Failed to write audio to file: " + e.getMessage());
}
}
});
}
// Called when new audio samples are ready.
@Override
public void onWebRtcAudioRecordSamplesReady(WebRtcAudioRecord.AudioSamples samples) {
// The native audio layer on Android should use 16-bit PCM format.
if (samples.getAudioFormat() != AudioFormat.ENCODING_PCM_16BIT) {
Log.e(TAG, "Invalid audio format");

View File

@ -18,6 +18,7 @@ import android.preference.ListPreference;
import android.preference.Preference;
import org.webrtc.Camera2Enumerator;
import org.webrtc.audio.AudioDeviceModule;
import org.webrtc.voiceengine.WebRtcAudioUtils;
/**
* Settings activity for AppRTC.
@ -62,6 +63,7 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
private String keyprefDataProtocol;
private String keyprefNegotiated;
private String keyprefDataId;
private String keyprefUseLegacyAudioDevice;
@Override
protected void onCreate(Bundle savedInstanceState) {
@ -105,6 +107,7 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
keyPrefDisplayHud = getString(R.string.pref_displayhud_key);
keyPrefTracing = getString(R.string.pref_tracing_key);
keyprefEnabledRtcEventLog = getString(R.string.pref_enable_rtceventlog_key);
keyprefUseLegacyAudioDevice = getString(R.string.pref_use_legacy_audio_device_key);
// Display the fragment as the main content.
settingsFragment = new SettingsFragment();
@ -162,6 +165,7 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
updateSummaryB(sharedPreferences, keyPrefDisplayHud);
updateSummaryB(sharedPreferences, keyPrefTracing);
updateSummaryB(sharedPreferences, keyprefEnabledRtcEventLog);
updateSummaryB(sharedPreferences, keyprefUseLegacyAudioDevice);
if (!Camera2Enumerator.isSupported(this)) {
Preference camera2Preference = settingsFragment.findPreference(keyprefCamera2);
@ -172,28 +176,54 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
// Disable forcing WebRTC based AEC so it won't affect our value.
// Otherwise, if it was enabled, isAcousticEchoCancelerSupported would always return false.
AudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
if (!AudioDeviceModule.isAcousticEchoCancelerSupported()) {
Preference disableBuiltInAECPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
if (sharedPreferences.getBoolean(keyprefUseLegacyAudioDevice, false)) {
WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(false);
if (!WebRtcAudioUtils.isAcousticEchoCancelerSupported()) {
Preference disableBuiltInAECPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
disableBuiltInAECPreference.setEnabled(false);
}
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
disableBuiltInAECPreference.setEnabled(false);
}
Preference disableBuiltInAGCPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
Preference disableBuiltInAGCPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
disableBuiltInAGCPreference.setEnabled(false);
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
disableBuiltInAGCPreference.setEnabled(false);
AudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
if (!AudioDeviceModule.isNoiseSuppressorSupported()) {
Preference disableBuiltInNSPreference =
settingsFragment.findPreference(keyprefDisableBuiltInNS);
WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(false);
if (!WebRtcAudioUtils.isNoiseSuppressorSupported()) {
Preference disableBuiltInNSPreference =
settingsFragment.findPreference(keyprefDisableBuiltInNS);
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
disableBuiltInNSPreference.setEnabled(false);
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
disableBuiltInNSPreference.setEnabled(false);
}
} else {
AudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
if (!AudioDeviceModule.isAcousticEchoCancelerSupported()) {
Preference disableBuiltInAECPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
disableBuiltInAECPreference.setEnabled(false);
}
Preference disableBuiltInAGCPreference =
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
disableBuiltInAGCPreference.setEnabled(false);
AudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
if (!AudioDeviceModule.isNoiseSuppressorSupported()) {
Preference disableBuiltInNSPreference =
settingsFragment.findPreference(keyprefDisableBuiltInNS);
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
disableBuiltInNSPreference.setEnabled(false);
}
}
}
@ -243,7 +273,8 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
|| key.equals(keyprefEnableDataChannel)
|| key.equals(keyprefOrdered)
|| key.equals(keyprefNegotiated)
|| key.equals(keyprefEnabledRtcEventLog)) {
|| key.equals(keyprefEnabledRtcEventLog)
|| key.equals(keyprefUseLegacyAudioDevice)) {
updateSummaryB(sharedPreferences, key);
} else if (key.equals(keyprefSpeakerphone)) {
updateSummaryList(sharedPreferences, key);