Allow AudioAttributes to be app/client configurable
WebRtcAudioTrack is hardcoded to configure AudioAttributes with 1. usage=USAGE_VOICE_COMMUNICATIOON 2. contentType=CONTENT_TYPE_SPEECH This change allows AudioAttributes to be configured via the JavaAudioDeviceModule. Bug: webrtc:12153 Change-Id: I67c7f6e572c5a9f3a8fde674b6600d2adaf17895 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/191941 Commit-Queue: Gaurav Vaish <gvaish@chromium.org> Reviewed-by: Henrik Andreassson <henrika@webrtc.org> Reviewed-by: Paulina Hensman <phensman@webrtc.org> Cr-Commit-Position: refs/heads/master@{#32583}
This commit is contained in:
@ -11,6 +11,7 @@
|
|||||||
package org.webrtc.audio;
|
package org.webrtc.audio;
|
||||||
|
|
||||||
import android.content.Context;
|
import android.content.Context;
|
||||||
|
import android.media.AudioAttributes;
|
||||||
import android.media.AudioDeviceInfo;
|
import android.media.AudioDeviceInfo;
|
||||||
import android.media.AudioManager;
|
import android.media.AudioManager;
|
||||||
import android.os.Build;
|
import android.os.Build;
|
||||||
@ -47,6 +48,7 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
private boolean useHardwareNoiseSuppressor = isBuiltInNoiseSuppressorSupported();
|
private boolean useHardwareNoiseSuppressor = isBuiltInNoiseSuppressorSupported();
|
||||||
private boolean useStereoInput;
|
private boolean useStereoInput;
|
||||||
private boolean useStereoOutput;
|
private boolean useStereoOutput;
|
||||||
|
private AudioAttributes audioAttributes;
|
||||||
|
|
||||||
private Builder(Context context) {
|
private Builder(Context context) {
|
||||||
this.context = context;
|
this.context = context;
|
||||||
@ -193,6 +195,14 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set custom {@link AudioAttributes} to use.
|
||||||
|
*/
|
||||||
|
public Builder setAudioAttributes(AudioAttributes audioAttributes) {
|
||||||
|
this.audioAttributes = audioAttributes;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership
|
* Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership
|
||||||
* and is responsible for calling release().
|
* and is responsible for calling release().
|
||||||
@ -223,7 +233,7 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
|
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
|
||||||
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
|
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
|
||||||
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(
|
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(
|
||||||
context, audioManager, audioTrackErrorCallback, audioTrackStateCallback);
|
context, audioManager, audioAttributes, audioTrackErrorCallback, audioTrackStateCallback);
|
||||||
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,
|
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,
|
||||||
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
|
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
|
||||||
}
|
}
|
||||||
|
@ -71,6 +71,7 @@ class WebRtcAudioTrack {
|
|||||||
|
|
||||||
private ByteBuffer byteBuffer;
|
private ByteBuffer byteBuffer;
|
||||||
|
|
||||||
|
private @Nullable final AudioAttributes audioAttributes;
|
||||||
private @Nullable AudioTrack audioTrack;
|
private @Nullable AudioTrack audioTrack;
|
||||||
private @Nullable AudioTrackThread audioThread;
|
private @Nullable AudioTrackThread audioThread;
|
||||||
private final VolumeLogger volumeLogger;
|
private final VolumeLogger volumeLogger;
|
||||||
@ -162,15 +163,17 @@ class WebRtcAudioTrack {
|
|||||||
|
|
||||||
@CalledByNative
|
@CalledByNative
|
||||||
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
||||||
this(context, audioManager, null /* errorCallback */, null /* stateCallback */);
|
this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
|
||||||
|
null /* stateCallback */);
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtcAudioTrack(Context context, AudioManager audioManager,
|
WebRtcAudioTrack(Context context, AudioManager audioManager,
|
||||||
@Nullable AudioTrackErrorCallback errorCallback,
|
@Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
|
||||||
@Nullable AudioTrackStateCallback stateCallback) {
|
@Nullable AudioTrackStateCallback stateCallback) {
|
||||||
threadChecker.detachThread();
|
threadChecker.detachThread();
|
||||||
this.context = context;
|
this.context = context;
|
||||||
this.audioManager = audioManager;
|
this.audioManager = audioManager;
|
||||||
|
this.audioAttributes = audioAttributes;
|
||||||
this.errorCallback = errorCallback;
|
this.errorCallback = errorCallback;
|
||||||
this.stateCallback = stateCallback;
|
this.stateCallback = stateCallback;
|
||||||
this.volumeLogger = new VolumeLogger(audioManager);
|
this.volumeLogger = new VolumeLogger(audioManager);
|
||||||
@ -231,8 +234,8 @@ class WebRtcAudioTrack {
|
|||||||
// supersede the notion of stream types for defining the behavior of audio playback,
|
// supersede the notion of stream types for defining the behavior of audio playback,
|
||||||
// and to allow certain platforms or routing policies to use this information for more
|
// and to allow certain platforms or routing policies to use this information for more
|
||||||
// refined volume or routing decisions.
|
// refined volume or routing decisions.
|
||||||
audioTrack =
|
audioTrack = createAudioTrackOnLollipopOrHigher(
|
||||||
createAudioTrackOnLollipopOrHigher(sampleRate, channelConfig, minBufferSizeInBytes);
|
sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
|
||||||
} else {
|
} else {
|
||||||
// Use default constructor for API levels below 21.
|
// Use default constructor for API levels below 21.
|
||||||
audioTrack =
|
audioTrack =
|
||||||
@ -383,8 +386,8 @@ class WebRtcAudioTrack {
|
|||||||
// It allows certain platforms or routing policies to use this information for more
|
// It allows certain platforms or routing policies to use this information for more
|
||||||
// refined volume or routing decisions.
|
// refined volume or routing decisions.
|
||||||
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
|
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
|
||||||
private static AudioTrack createAudioTrackOnLollipopOrHigher(
|
private static AudioTrack createAudioTrackOnLollipopOrHigher(int sampleRateInHz,
|
||||||
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
|
int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
|
||||||
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
|
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
|
||||||
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
|
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
|
||||||
// performance when Android O is supported. Add some logging in the mean time.
|
// performance when Android O is supported. Add some logging in the mean time.
|
||||||
@ -394,11 +397,26 @@ class WebRtcAudioTrack {
|
|||||||
if (sampleRateInHz != nativeOutputSampleRate) {
|
if (sampleRateInHz != nativeOutputSampleRate) {
|
||||||
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AudioAttributes.Builder attributesBuilder =
|
||||||
|
new AudioAttributes.Builder()
|
||||||
|
.setUsage(DEFAULT_USAGE)
|
||||||
|
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH);
|
||||||
|
|
||||||
|
if (overrideAttributes != null) {
|
||||||
|
if (overrideAttributes.getUsage() != AudioAttributes.USAGE_UNKNOWN) {
|
||||||
|
attributesBuilder.setUsage(overrideAttributes.getUsage());
|
||||||
|
}
|
||||||
|
if (overrideAttributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN) {
|
||||||
|
attributesBuilder.setContentType(overrideAttributes.getContentType());
|
||||||
|
}
|
||||||
|
|
||||||
|
attributesBuilder.setAllowedCapturePolicy(overrideAttributes.getAllowedCapturePolicy())
|
||||||
|
.setFlags(overrideAttributes.getFlags());
|
||||||
|
}
|
||||||
|
|
||||||
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
||||||
return new AudioTrack(new AudioAttributes.Builder()
|
return new AudioTrack(attributesBuilder.build(),
|
||||||
.setUsage(DEFAULT_USAGE)
|
|
||||||
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
|
||||||
.build(),
|
|
||||||
new AudioFormat.Builder()
|
new AudioFormat.Builder()
|
||||||
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
||||||
.setSampleRate(sampleRateInHz)
|
.setSampleRate(sampleRateInHz)
|
||||||
|
Reference in New Issue
Block a user