Android audio code: Replace C++ template with input/output interface
Bug: webrtc:7452 Change-Id: Id816500051e065918bba5c2235d38ad8eb50a8eb Reviewed-on: https://webrtc-review.googlesource.com/64442 Commit-Queue: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Paulina Hensman <phensman@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22660}
This commit is contained in:
committed by
Commit Bot
parent
85eef49fa2
commit
1a18e0ac46
@ -42,20 +42,6 @@ class WebRtcAudioManager {
|
||||
private static boolean useStereoOutput = false;
|
||||
private static boolean useStereoInput = false;
|
||||
|
||||
private static boolean blacklistDeviceForOpenSLESUsage = false;
|
||||
private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false;
|
||||
|
||||
// Call this method to override the default list of blacklisted devices
|
||||
// specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
|
||||
// Allows an app to take control over which devices to exclude from using
|
||||
// the OpenSL ES audio output path
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
|
||||
blacklistDeviceForOpenSLESUsageIsOverridden = true;
|
||||
blacklistDeviceForOpenSLESUsage = enable;
|
||||
}
|
||||
|
||||
// Call these methods to override the default mono audio modes for the specified direction(s)
|
||||
// (input and/or output).
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@ -204,19 +190,7 @@ class WebRtcAudioManager {
|
||||
return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private static boolean isDeviceBlacklistedForOpenSLESUsage() {
|
||||
boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
|
||||
? blacklistDeviceForOpenSLESUsage
|
||||
: WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
|
||||
if (blacklisted) {
|
||||
Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
|
||||
}
|
||||
return blacklisted;
|
||||
}
|
||||
|
||||
// Returns true if low-latency audio output is supported.
|
||||
@CalledByNative
|
||||
public static boolean isLowLatencyOutputSupported(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
|
||||
}
|
||||
@ -224,7 +198,6 @@ class WebRtcAudioManager {
|
||||
// Returns true if low-latency audio input is supported.
|
||||
// TODO(henrika): remove the hardcoded false return value when OpenSL ES
|
||||
// input performance has been evaluated and tested more.
|
||||
@CalledByNative
|
||||
public static boolean isLowLatencyInputSupported(Context context) {
|
||||
// TODO(henrika): investigate if some sort of device list is needed here
|
||||
// as well. The NDK doc states that: "As of API level 21, lower latency
|
||||
|
||||
@ -193,7 +193,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioTrack(long nativeAudioTrack) {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
threadChecker.detachThread();
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.nativeAudioTrack = nativeAudioTrack;
|
||||
audioManager =
|
||||
|
||||
@ -35,16 +35,6 @@ import org.webrtc.Logging;
|
||||
final class WebRtcAudioUtils {
|
||||
private static final String TAG = "WebRtcAudioUtils";
|
||||
|
||||
// List of devices where we have seen issues (e.g. bad audio quality) using
|
||||
// the low latency output mode in combination with OpenSL ES.
|
||||
// The device name is given by Build.MODEL.
|
||||
private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
|
||||
// It is recommended to maintain a list of blacklisted models outside
|
||||
// this package and instead call
|
||||
// WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true)
|
||||
// from the client for devices where OpenSL ES shall be disabled.
|
||||
};
|
||||
|
||||
// List of devices where it has been verified that the built-in effect
|
||||
// bad and where it makes sense to avoid using it and instead rely on the
|
||||
// native WebRTC version instead. The device name is given by Build.MODEL.
|
||||
@ -207,12 +197,6 @@ final class WebRtcAudioUtils {
|
||||
return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
|
||||
}
|
||||
|
||||
// Returns true if the device is blacklisted for OpenSL ES usage.
|
||||
public static boolean deviceIsBlacklistedForOpenSLESUsage() {
|
||||
List<String> blackListedModels = Arrays.asList(BLACKLISTED_OPEN_SL_ES_MODELS);
|
||||
return blackListedModels.contains(Build.MODEL);
|
||||
}
|
||||
|
||||
// Information about the current build, taken from system properties.
|
||||
static void logDeviceInfo(String tag) {
|
||||
Logging.d(tag,
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_wrapper.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -30,8 +31,6 @@ class FineAudioBuffer;
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
// Implements low-latency 16-bit mono PCM audio output support for Android
|
||||
// using the C based AAudio API.
|
||||
//
|
||||
@ -52,30 +51,31 @@ class AudioManager;
|
||||
// where the internal AAudio buffer can be increased when needed. It will
|
||||
// reduce the risk of underruns (~glitches) at the expense of an increased
|
||||
// latency.
|
||||
class AAudioPlayer final : public AAudioObserverInterface,
|
||||
class AAudioPlayer final : public AudioOutput,
|
||||
public AAudioObserverInterface,
|
||||
public rtc::MessageHandler {
|
||||
public:
|
||||
explicit AAudioPlayer(AudioManager* audio_manager);
|
||||
~AAudioPlayer();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
int Init() override;
|
||||
int Terminate() override;
|
||||
|
||||
int InitPlayout();
|
||||
bool PlayoutIsInitialized() const;
|
||||
int InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
|
||||
int StartPlayout();
|
||||
int StopPlayout();
|
||||
bool Playing() const;
|
||||
int StartPlayout() override;
|
||||
int StopPlayout() override;
|
||||
bool Playing() const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
// Not implemented in AAudio.
|
||||
bool SpeakerVolumeIsAvailable();
|
||||
int SetSpeakerVolume(uint32_t volume);
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const;
|
||||
bool SpeakerVolumeIsAvailable() override;
|
||||
int SetSpeakerVolume(uint32_t volume) override;
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const override;
|
||||
|
||||
protected:
|
||||
// AAudioObserverInterface implementation.
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
#include "rtc_base/thread.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_wrapper.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -28,8 +29,6 @@ class AudioDeviceBuffer;
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
// Implements low-latency 16-bit mono PCM audio input support for Android
|
||||
// using the C based AAudio API.
|
||||
//
|
||||
@ -44,30 +43,29 @@ class AudioManager;
|
||||
//
|
||||
// TODO(henrika): add comments about device changes and adaptive buffer
|
||||
// management.
|
||||
class AAudioRecorder : public AAudioObserverInterface,
|
||||
class AAudioRecorder : public AudioInput,
|
||||
public AAudioObserverInterface,
|
||||
public rtc::MessageHandler {
|
||||
public:
|
||||
explicit AAudioRecorder(AudioManager* audio_manager);
|
||||
~AAudioRecorder();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
int Init() override;
|
||||
int Terminate() override;
|
||||
|
||||
int InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
int InitRecording() override;
|
||||
bool RecordingIsInitialized() const override { return initialized_; }
|
||||
|
||||
int StartRecording();
|
||||
int StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
int StartRecording() override;
|
||||
int StopRecording() override;
|
||||
bool Recording() const override { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
double latency_millis() const { return latency_millis_; }
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
// TODO(henrika): add support using AAudio APIs when available.
|
||||
int EnableBuiltInAEC(bool enable);
|
||||
int EnableBuiltInAGC(bool enable);
|
||||
int EnableBuiltInNS(bool enable);
|
||||
int EnableBuiltInAEC(bool enable) override;
|
||||
int EnableBuiltInAGC(bool enable) override;
|
||||
int EnableBuiltInNS(bool enable) override;
|
||||
|
||||
protected:
|
||||
// AAudioObserverInterface implementation.
|
||||
|
||||
@ -8,16 +8,14 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
|
||||
#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/refcountedobject.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
#define CHECKinitialized_() \
|
||||
@ -38,6 +36,8 @@ namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
namespace {
|
||||
|
||||
// InputType/OutputType can be any class that implements the capturing/rendering
|
||||
// part of the AudioDeviceGeneric API.
|
||||
// Construction and destruction must be done on one and the same thread. Each
|
||||
@ -47,9 +47,7 @@ namespace android_adm {
|
||||
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||
// RTC_CHECK that the calling thread is attached to a Java VM.
|
||||
|
||||
template <class InputType, class OutputType>
|
||||
class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
public:
|
||||
// For use with UMA logging. Must be kept in sync with histograms.xml in
|
||||
// Chrome, located at
|
||||
@ -62,17 +60,23 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
NUM_STATUSES = 4
|
||||
};
|
||||
|
||||
AudioDeviceTemplateAndroid(JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context,
|
||||
AudioDeviceModule::AudioLayer audio_layer)
|
||||
AndroidAudioDeviceModule(AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output)
|
||||
: audio_layer_(audio_layer),
|
||||
audio_manager_(env, audio_layer, application_context),
|
||||
audio_manager_(std::move(audio_manager)),
|
||||
input_(std::move(audio_input)),
|
||||
output_(std::move(audio_output)),
|
||||
initialized_(false) {
|
||||
RTC_CHECK(input_);
|
||||
RTC_CHECK(output_);
|
||||
RTC_CHECK(audio_manager_);
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
thread_checker_.DetachFromThread();
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceTemplateAndroid() { RTC_LOG(INFO) << __FUNCTION__; }
|
||||
virtual ~AndroidAudioDeviceModule() { RTC_LOG(INFO) << __FUNCTION__; }
|
||||
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer* audioLayer) const override {
|
||||
@ -89,22 +93,20 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t Init() override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
output_ = rtc::MakeUnique<OutputType>(&audio_manager_);
|
||||
input_ = rtc::MakeUnique<InputType>(&audio_manager_);
|
||||
audio_device_buffer_ = rtc::MakeUnique<AudioDeviceBuffer>();
|
||||
AttachAudioBuffer();
|
||||
if (initialized_) {
|
||||
return 0;
|
||||
}
|
||||
InitStatus status;
|
||||
if (!audio_manager_.Init()) {
|
||||
if (!audio_manager_->Init()) {
|
||||
status = InitStatus::OTHER_ERROR;
|
||||
} else if (output_->Init() != 0) {
|
||||
audio_manager_.Close();
|
||||
audio_manager_->Close();
|
||||
status = InitStatus::PLAYOUT_ERROR;
|
||||
} else if (input_->Init() != 0) {
|
||||
output_->Terminate();
|
||||
audio_manager_.Close();
|
||||
audio_manager_->Close();
|
||||
status = InitStatus::RECORDING_ERROR;
|
||||
} else {
|
||||
initialized_ = true;
|
||||
@ -127,7 +129,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
int32_t err = input_->Terminate();
|
||||
err |= output_->Terminate();
|
||||
err |= !audio_manager_.Close();
|
||||
err |= !audio_manager_->Close();
|
||||
initialized_ = false;
|
||||
RTC_DCHECK_EQ(err, 0);
|
||||
return err;
|
||||
@ -250,7 +252,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
return 0;
|
||||
}
|
||||
audio_device_buffer_->StartPlayout();
|
||||
if (!audio_manager_.IsCommunicationModeEnabled()) {
|
||||
if (!audio_manager_->IsCommunicationModeEnabled()) {
|
||||
RTC_LOG(WARNING)
|
||||
<< "The application should use MODE_IN_COMMUNICATION audio mode!";
|
||||
}
|
||||
@ -288,7 +290,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
if (Recording()) {
|
||||
return 0;
|
||||
}
|
||||
if (!audio_manager_.IsCommunicationModeEnabled()) {
|
||||
if (!audio_manager_->IsCommunicationModeEnabled()) {
|
||||
RTC_LOG(WARNING)
|
||||
<< "The application should use MODE_IN_COMMUNICATION audio mode!";
|
||||
}
|
||||
@ -474,7 +476,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*available = audio_manager_.IsStereoPlayoutSupported();
|
||||
*available = audio_manager_->IsStereoPlayoutSupported();
|
||||
RTC_LOG(INFO) << "output: " << *available;
|
||||
return 0;
|
||||
}
|
||||
@ -486,7 +488,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
RTC_LOG(WARNING) << "recording in stereo is not supported";
|
||||
return -1;
|
||||
}
|
||||
bool available = audio_manager_.IsStereoPlayoutSupported();
|
||||
bool available = audio_manager_->IsStereoPlayoutSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
@ -506,7 +508,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t StereoPlayout(bool* enabled) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*enabled = audio_manager_.IsStereoPlayoutSupported();
|
||||
*enabled = audio_manager_->IsStereoPlayoutSupported();
|
||||
RTC_LOG(INFO) << "output: " << *enabled;
|
||||
return 0;
|
||||
}
|
||||
@ -514,7 +516,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t StereoRecordingIsAvailable(bool* available) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*available = audio_manager_.IsStereoRecordSupported();
|
||||
*available = audio_manager_->IsStereoRecordSupported();
|
||||
RTC_LOG(INFO) << "output: " << *available;
|
||||
return 0;
|
||||
}
|
||||
@ -526,7 +528,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
RTC_LOG(WARNING) << "recording in stereo is not supported";
|
||||
return -1;
|
||||
}
|
||||
bool available = audio_manager_.IsStereoRecordSupported();
|
||||
bool available = audio_manager_->IsStereoRecordSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
@ -546,7 +548,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t StereoRecording(bool* enabled) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*enabled = audio_manager_.IsStereoRecordSupported();
|
||||
*enabled = audio_manager_->IsStereoRecordSupported();
|
||||
RTC_LOG(INFO) << "output: " << *enabled;
|
||||
return 0;
|
||||
}
|
||||
@ -554,7 +556,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
int32_t PlayoutDelay(uint16_t* delay_ms) const override {
|
||||
CHECKinitialized_();
|
||||
// Best guess we can do is to use half of the estimated total delay.
|
||||
*delay_ms = audio_manager_.GetDelayEstimateInMilliseconds() / 2;
|
||||
*delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
|
||||
RTC_DCHECK_GT(*delay_ms, 0);
|
||||
return 0;
|
||||
}
|
||||
@ -574,7 +576,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
bool BuiltInAECIsAvailable() const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_manager_.IsAcousticEchoCancelerSupported();
|
||||
bool isAvailable = audio_manager_->IsAcousticEchoCancelerSupported();
|
||||
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
@ -598,7 +600,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
bool BuiltInNSIsAvailable() const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_manager_.IsNoiseSuppressorSupported();
|
||||
bool isAvailable = audio_manager_->IsNoiseSuppressorSupported();
|
||||
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
@ -644,17 +646,26 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule {
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
|
||||
const AudioDeviceModule::AudioLayer audio_layer_;
|
||||
|
||||
AudioManager audio_manager_;
|
||||
std::unique_ptr<OutputType> output_;
|
||||
std::unique_ptr<InputType> input_;
|
||||
const std::unique_ptr<AudioManager> audio_manager_;
|
||||
const std::unique_ptr<AudioInput> input_;
|
||||
const std::unique_ptr<AudioOutput> output_;
|
||||
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output) {
|
||||
return new rtc::RefCountedObject<AndroidAudioDeviceModule>(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
}
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_TEMPLATE_ANDROID_H_
|
||||
77
sdk/android/src/jni/audio_device/audio_device_module.h
Normal file
77
sdk/android/src/jni/audio_device/audio_device_module.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_MODULE_H_
|
||||
#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_MODULE_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
class AudioInput {
|
||||
public:
|
||||
virtual ~AudioInput() {}
|
||||
|
||||
virtual int32_t Init() = 0;
|
||||
virtual int32_t Terminate() = 0;
|
||||
|
||||
virtual int32_t InitRecording() = 0;
|
||||
virtual bool RecordingIsInitialized() const = 0;
|
||||
|
||||
virtual int32_t StartRecording() = 0;
|
||||
virtual int32_t StopRecording() = 0;
|
||||
virtual bool Recording() const = 0;
|
||||
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
|
||||
|
||||
virtual int32_t EnableBuiltInAEC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInAGC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInNS(bool enable) = 0;
|
||||
};
|
||||
|
||||
class AudioOutput {
|
||||
public:
|
||||
virtual ~AudioOutput() {}
|
||||
|
||||
virtual int32_t Init() = 0;
|
||||
virtual int32_t Terminate() = 0;
|
||||
virtual int32_t InitPlayout() = 0;
|
||||
virtual bool PlayoutIsInitialized() const = 0;
|
||||
virtual int32_t StartPlayout() = 0;
|
||||
virtual int32_t StopPlayout() = 0;
|
||||
virtual bool Playing() const = 0;
|
||||
virtual bool SpeakerVolumeIsAvailable() = 0;
|
||||
virtual int SetSpeakerVolume(uint32_t volume) = 0;
|
||||
virtual rtc::Optional<uint32_t> SpeakerVolume() const = 0;
|
||||
virtual rtc::Optional<uint32_t> MaxSpeakerVolume() const = 0;
|
||||
virtual rtc::Optional<uint32_t> MinSpeakerVolume() const = 0;
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
|
||||
};
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output);
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_DEVICE_MODULE_H_
|
||||
@ -28,7 +28,7 @@
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_player.h"
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
|
||||
#endif
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_template_android.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
@ -39,64 +39,6 @@ namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
rtc::scoped_refptr<AudioDeviceModule>
|
||||
AudioManager::CreateAAudioAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context) {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
return new rtc::RefCountedObject<android_adm::AudioDeviceTemplateAndroid<
|
||||
android_adm::AAudioRecorder, android_adm::AAudioPlayer>>(
|
||||
env, application_context, AudioDeviceModule::kAndroidAAudioAudio);
|
||||
}
|
||||
#endif
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> AudioManager::CreateAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context) {
|
||||
const bool use_opensles_output =
|
||||
!Java_WebRtcAudioManager_isDeviceBlacklistedForOpenSLESUsage(env) &&
|
||||
Java_WebRtcAudioManager_isLowLatencyOutputSupported(env,
|
||||
application_context);
|
||||
const bool use_opensles_input =
|
||||
use_opensles_output && Java_WebRtcAudioManager_isLowLatencyInputSupported(
|
||||
env, application_context);
|
||||
return CreateAudioDeviceModule(env, application_context, use_opensles_input,
|
||||
use_opensles_output);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> AudioManager::CreateAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context,
|
||||
bool use_opensles_input,
|
||||
bool use_opensles_output) {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
|
||||
if (use_opensles_output) {
|
||||
if (use_opensles_input) {
|
||||
// Use OpenSL ES for both playout and recording.
|
||||
return new rtc::RefCountedObject<android_adm::AudioDeviceTemplateAndroid<
|
||||
android_adm::OpenSLESRecorder, android_adm::OpenSLESPlayer>>(
|
||||
env, application_context, AudioDeviceModule::kAndroidOpenSLESAudio);
|
||||
} else {
|
||||
// Use OpenSL ES for output and AudioRecord API for input. This
|
||||
// combination provides low-latency output audio and at the same
|
||||
// time support for HW AEC using the AudioRecord Java API.
|
||||
return new rtc::RefCountedObject<android_adm::AudioDeviceTemplateAndroid<
|
||||
android_adm::AudioRecordJni, android_adm::OpenSLESPlayer>>(
|
||||
env, application_context,
|
||||
AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio);
|
||||
}
|
||||
} else {
|
||||
RTC_DCHECK(!use_opensles_input)
|
||||
<< "Combination of OpenSLES input and Java-based output not supported";
|
||||
// Use Java-based audio in both directions.
|
||||
return new rtc::RefCountedObject<android_adm::AudioDeviceTemplateAndroid<
|
||||
android_adm::AudioRecordJni, android_adm::AudioTrackJni>>(
|
||||
env, application_context, AudioDeviceModule::kAndroidJavaAudio);
|
||||
}
|
||||
}
|
||||
|
||||
// AudioManager implementation
|
||||
AudioManager::AudioManager(JNIEnv* env,
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
@ -120,6 +62,8 @@ AudioManager::AudioManager(JNIEnv* env,
|
||||
static_cast<size_t>(output_buffer_size));
|
||||
record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
|
||||
static_cast<size_t>(input_buffer_size));
|
||||
RTC_CHECK(playout_parameters_.is_valid());
|
||||
RTC_CHECK(record_parameters_.is_valid());
|
||||
thread_checker_.DetachFromThread();
|
||||
}
|
||||
|
||||
@ -217,12 +161,10 @@ bool AudioManager::IsNoiseSuppressorSupported() const {
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoPlayoutSupported() const {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return (playout_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoRecordSupported() const {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return (record_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
@ -234,13 +176,11 @@ int AudioManager::GetDelayEstimateInMilliseconds() const {
|
||||
|
||||
const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
|
||||
RTC_CHECK(playout_parameters_.is_valid());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return playout_parameters_;
|
||||
}
|
||||
|
||||
const AudioParameters& AudioManager::GetRecordAudioParameters() {
|
||||
RTC_CHECK(record_parameters_.is_valid());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return record_parameters_;
|
||||
}
|
||||
|
||||
|
||||
@ -33,24 +33,6 @@ namespace android_adm {
|
||||
// unless Init() is called.
|
||||
class AudioManager {
|
||||
public:
|
||||
#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
static rtc::scoped_refptr<AudioDeviceModule> CreateAAudioAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context);
|
||||
#endif
|
||||
|
||||
static rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context,
|
||||
bool use_opensles_input,
|
||||
bool use_opensles_output);
|
||||
|
||||
// This function has internal logic checking if OpenSLES is blacklisted and
|
||||
// whether it's supported.
|
||||
static rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jobject>& application_context);
|
||||
|
||||
AudioManager(JNIEnv* env,
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
const JavaParamRef<jobject>& application_context);
|
||||
|
||||
@ -45,13 +45,13 @@ class ScopedHistogramTimer {
|
||||
const std::string histogram_name_;
|
||||
int64_t start_time_ms_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
// AudioRecordJni implementation.
|
||||
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
|
||||
: env_(AttachCurrentThreadIfNeeded()),
|
||||
j_audio_record_(
|
||||
Java_WebRtcAudioRecord_Constructor(env_,
|
||||
: j_audio_record_(
|
||||
Java_WebRtcAudioRecord_Constructor(AttachCurrentThreadIfNeeded(),
|
||||
jni::jlongFromPointer(this))),
|
||||
audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
@ -64,8 +64,9 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
// Detach from this thread since construction is allowed to happen on a
|
||||
// different thread.
|
||||
thread_checker_.DetachFromThread();
|
||||
thread_checker_java_.DetachFromThread();
|
||||
}
|
||||
|
||||
@ -77,6 +78,7 @@ AudioRecordJni::~AudioRecordJni() {
|
||||
|
||||
int32_t AudioRecordJni::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
env_ = AttachCurrentThreadIfNeeded();
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -41,26 +42,26 @@ namespace android_adm {
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioRecordJni {
|
||||
class AudioRecordJni : public AudioInput {
|
||||
public:
|
||||
explicit AudioRecordJni(AudioManager* audio_manager);
|
||||
~AudioRecordJni();
|
||||
~AudioRecordJni() override;
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
int32_t Init() override;
|
||||
int32_t Terminate() override;
|
||||
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override { return initialized_; }
|
||||
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() override;
|
||||
bool Recording() const override { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable);
|
||||
int32_t EnableBuiltInAGC(bool enable);
|
||||
int32_t EnableBuiltInNS(bool enable);
|
||||
int32_t EnableBuiltInAEC(bool enable) override;
|
||||
int32_t EnableBuiltInAGC(bool enable) override;
|
||||
int32_t EnableBuiltInNS(bool enable) override;
|
||||
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||
@ -90,7 +91,7 @@ class AudioRecordJni {
|
||||
rtc::ThreadChecker thread_checker_java_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioRecordJni class.
|
||||
JNIEnv* const env_;
|
||||
JNIEnv* env_ = nullptr;
|
||||
ScopedJavaGlobalRef<jobject> j_audio_record_;
|
||||
|
||||
// Raw pointer to the audio manger.
|
||||
|
||||
@ -27,9 +27,9 @@ namespace android_adm {
|
||||
|
||||
// TODO(henrika): possible extend usage of AudioManager and add it as member.
|
||||
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
|
||||
: env_(AttachCurrentThreadIfNeeded()),
|
||||
j_audio_track_(
|
||||
Java_WebRtcAudioTrack_Constructor(env_, jni::jlongFromPointer(this))),
|
||||
: j_audio_track_(
|
||||
Java_WebRtcAudioTrack_Constructor(AttachCurrentThreadIfNeeded(),
|
||||
jni::jlongFromPointer(this))),
|
||||
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
@ -39,8 +39,9 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
// Detach from this thread since construction is allowed to happen on a
|
||||
// different thread.
|
||||
thread_checker_.DetachFromThread();
|
||||
thread_checker_java_.DetachFromThread();
|
||||
}
|
||||
|
||||
@ -52,6 +53,7 @@ AudioTrackJni::~AudioTrackJni() {
|
||||
|
||||
int32_t AudioTrackJni::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
env_ = AttachCurrentThreadIfNeeded();
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -38,28 +39,28 @@ namespace android_adm {
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioTrackJni {
|
||||
class AudioTrackJni : public AudioOutput {
|
||||
public:
|
||||
explicit AudioTrackJni(AudioManager* audio_manager);
|
||||
~AudioTrackJni();
|
||||
~AudioTrackJni() override;
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
int32_t Init() override;
|
||||
int32_t Terminate() override;
|
||||
|
||||
int32_t InitPlayout();
|
||||
bool PlayoutIsInitialized() const { return initialized_; }
|
||||
int32_t InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override { return initialized_; }
|
||||
|
||||
int32_t StartPlayout();
|
||||
int32_t StopPlayout();
|
||||
bool Playing() const { return playing_; }
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() override;
|
||||
bool Playing() const override { return playing_; }
|
||||
|
||||
bool SpeakerVolumeIsAvailable();
|
||||
int SetSpeakerVolume(uint32_t volume);
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const;
|
||||
bool SpeakerVolumeIsAvailable() override;
|
||||
int SetSpeakerVolume(uint32_t volume) override;
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||
@ -86,7 +87,7 @@ class AudioTrackJni {
|
||||
rtc::ThreadChecker thread_checker_java_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioTrackJni class.
|
||||
JNIEnv* const env_;
|
||||
JNIEnv* env_ = nullptr;
|
||||
ScopedJavaGlobalRef<jobject> j_audio_track_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/opensles_common.h"
|
||||
|
||||
@ -49,7 +50,7 @@ namespace android_adm {
|
||||
// If the device doesn't claim this feature but supports API level 9 (Android
|
||||
// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
|
||||
// the output latency may be higher.
|
||||
class OpenSLESPlayer {
|
||||
class OpenSLESPlayer : public AudioOutput {
|
||||
public:
|
||||
// Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
|
||||
// required for lower latency. Beginning with API level 18 (Android 4.3), a
|
||||
@ -60,25 +61,25 @@ class OpenSLESPlayer {
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
explicit OpenSLESPlayer(AudioManager* audio_manager);
|
||||
~OpenSLESPlayer();
|
||||
~OpenSLESPlayer() override;
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
int Init() override;
|
||||
int Terminate() override;
|
||||
|
||||
int InitPlayout();
|
||||
bool PlayoutIsInitialized() const { return initialized_; }
|
||||
int InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override { return initialized_; }
|
||||
|
||||
int StartPlayout();
|
||||
int StopPlayout();
|
||||
bool Playing() const { return playing_; }
|
||||
int StartPlayout() override;
|
||||
int StopPlayout() override;
|
||||
bool Playing() const override { return playing_; }
|
||||
|
||||
bool SpeakerVolumeIsAvailable();
|
||||
int SetSpeakerVolume(uint32_t volume);
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const;
|
||||
bool SpeakerVolumeIsAvailable() override;
|
||||
int SetSpeakerVolume(uint32_t volume) override;
|
||||
rtc::Optional<uint32_t> SpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MaxSpeakerVolume() const override;
|
||||
rtc::Optional<uint32_t> MinSpeakerVolume() const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
private:
|
||||
// These callback methods are called when data is required for playout.
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/opensles_common.h"
|
||||
|
||||
@ -52,7 +53,7 @@ namespace android_adm {
|
||||
// for input effects preclude the lower latency path.
|
||||
// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
|
||||
// for more details.
|
||||
class OpenSLESRecorder {
|
||||
class OpenSLESRecorder : public AudioInput {
|
||||
public:
|
||||
// Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
|
||||
// required for lower latency. Beginning with API level 18 (Android 4.3), a
|
||||
@ -63,24 +64,24 @@ class OpenSLESRecorder {
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
explicit OpenSLESRecorder(AudioManager* audio_manager);
|
||||
~OpenSLESRecorder();
|
||||
~OpenSLESRecorder() override;
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
int Init() override;
|
||||
int Terminate() override;
|
||||
|
||||
int InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
int InitRecording() override;
|
||||
bool RecordingIsInitialized() const override { return initialized_; }
|
||||
|
||||
int StartRecording();
|
||||
int StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
int StartRecording() override;
|
||||
int StopRecording() override;
|
||||
bool Recording() const override { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
|
||||
|
||||
// TODO(henrika): add support using OpenSL ES APIs when available.
|
||||
int EnableBuiltInAEC(bool enable);
|
||||
int EnableBuiltInAGC(bool enable);
|
||||
int EnableBuiltInNS(bool enable);
|
||||
int EnableBuiltInAEC(bool enable) override;
|
||||
int EnableBuiltInAGC(bool enable) override;
|
||||
int EnableBuiltInNS(bool enable) override;
|
||||
|
||||
private:
|
||||
// Obtaines the SL Engine Interface from the existing global Engine object.
|
||||
|
||||
@ -27,6 +27,8 @@
|
||||
#include "sdk/android/generated_peerconnection_jni/jni/PeerConnectionFactory_jni.h"
|
||||
#include "sdk/android/native_api/jni/java_types.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
#include "sdk/android/src/jni/pc/androidnetworkmonitor.h"
|
||||
#include "sdk/android/src/jni/pc/audio.h"
|
||||
@ -238,10 +240,23 @@ jlong CreatePeerConnectionFactoryForJava(
|
||||
rtc::NetworkMonitorFactory::SetFactory(network_monitor_factory);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> adm =
|
||||
field_trial::IsEnabled(kExternalAndroidAudioDeviceFieldTrialName)
|
||||
? android_adm::AudioManager::CreateAudioDeviceModule(jni, jcontext)
|
||||
: nullptr;
|
||||
rtc::scoped_refptr<AudioDeviceModule> adm = nullptr;
|
||||
if (field_trial::IsEnabled(kExternalAndroidAudioDeviceFieldTrialName)) {
|
||||
// Only Java AudioDeviceModule is supported as an external ADM at the
|
||||
// moment.
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidJavaAudio;
|
||||
auto audio_manager =
|
||||
rtc::MakeUnique<android_adm::AudioManager>(jni, audio_layer, jcontext);
|
||||
auto audio_input =
|
||||
rtc::MakeUnique<android_adm::AudioRecordJni>(audio_manager.get());
|
||||
auto audio_output =
|
||||
rtc::MakeUnique<android_adm::AudioTrackJni>(audio_manager.get());
|
||||
adm = CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer = nullptr;
|
||||
std::unique_ptr<CallFactoryInterface> call_factory(CreateCallFactory());
|
||||
std::unique_ptr<RtcEventLogFactoryInterface> rtc_event_log_factory(
|
||||
|
||||
Reference in New Issue
Block a user