Adds support for new Windows ADM with limited API support.

Summary of what this CL does:

Existing users can keep using the old ADM for Windows as before.

A new ADM for Windows is created and a dedicated factory method is used
to create it. The old way (using AudioDeviceImpl) is not utilized.

The new ADM is based on a structure where most of the "action" takes
place in new AudioInput/AudioOutput implementations. This is inline
with our mobile platforms and also makes it easier to break out common
parts into a base class.

The AudioDevice unittest has always mainly focused on the "Start/Stop"-
parts of the ADM and not the complete ADM interface. This new ADM supports
all tests in AudioDeviceTest and is therefore tested in combination with
the old version. A value-parametrized test us added for Windows builds.

Improved readability, threading model and makes the code easier to maintain.

Uses the previously landed methods in webrtc::webrtc_win::core_audio_utility.

Bug: webrtc:9265
Change-Id: If2894b44528e74a181cf7ad1216f57386ee3a24d
Reviewed-on: https://webrtc-review.googlesource.com/78060
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23554}
This commit is contained in:
henrika
2018-06-08 16:10:03 +02:00
committed by Commit Bot
parent 488eb98616
commit ec9c745228
19 changed files with 2564 additions and 103 deletions

View File

@ -160,6 +160,57 @@ rtc_source_set("audio_device_name") {
]
}
rtc_source_set("windows_core_audio_utility") {
if (is_win && !build_with_chromium) {
sources = [
"win/core_audio_utility_win.cc",
"win/core_audio_utility_win.h",
]
deps = [
":audio_device_api",
":audio_device_name",
"../../api/units:time_delta",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
]
}
}
# An ADM with a dedicated factory method which does not depend on the
# audio_device_impl target. The goal is to use this new structure and
# gradually phase out the old design.
# TODO(henrika): currently only supported on Windows.
rtc_source_set("audio_device_module_from_input_and_output") {
visibility = [ ":*" ]
if (is_win && !build_with_chromium) {
sources = [
"include/audio_device_factory.cc",
"include/audio_device_factory.h",
]
sources += [
"win/audio_device_module_win.cc",
"win/audio_device_module_win.h",
"win/core_audio_base_win.cc",
"win/core_audio_base_win.h",
"win/core_audio_input_win.cc",
"win/core_audio_input_win.h",
"win/core_audio_output_win.cc",
"win/core_audio_output_win.h",
]
deps = [
":audio_device_api",
":audio_device_buffer",
":windows_core_audio_utility",
"../../api:optional",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
]
}
}
# Contains default implementations of webrtc::AudioDeviceModule for Windows,
# Linux, Mac, iOS and Android.
rtc_source_set("audio_device_impl") {
@ -187,9 +238,6 @@ rtc_source_set("audio_device_impl") {
if (rtc_include_internal_audio_device && is_ios) {
deps += [ ":audio_device_ios_objc" ]
}
if (is_win) {
deps += [ ":audio_device_name" ]
}
sources = [
"dummy/audio_device_dummy.cc",
@ -339,8 +387,6 @@ rtc_source_set("audio_device_impl") {
sources += [
"win/audio_device_core_win.cc",
"win/audio_device_core_win.h",
"win/core_audio_utility_win.cc",
"win/core_audio_utility_win.h",
]
libs = [
# Required for the built-in WASAPI AEC.
@ -449,8 +495,12 @@ if (rtc_include_tests) {
if (is_linux || is_mac || is_win) {
sources += [ "audio_device_unittest.cc" ]
}
if (is_win && !rtc_use_dummy_audio_file_devices) {
if (is_win) {
sources += [ "win/core_audio_utility_win_unittest.cc" ]
deps += [
":audio_device_module_from_input_and_output",
":windows_core_audio_utility",
]
}
if (is_android) {
# Need to disable error due to the line in

View File

@ -72,6 +72,15 @@ namespace webrtc {
rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
const AudioLayer audio_layer) {
RTC_LOG(INFO) << __FUNCTION__;
// The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
// dedicated factory method which should be used instead.
if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) {
RTC_LOG(LS_ERROR) << "Use the CreateWindowsCoreAudioAudioDeviceModule() "
"factory method instead for this option.";
return nullptr;
}
// Create the generic reference counted (platform independent) implementation.
rtc::scoped_refptr<AudioDeviceModuleImpl> audioDevice(
new rtc::RefCountedObject<AudioDeviceModuleImpl>(audio_layer));

View File

@ -10,6 +10,7 @@
#include <algorithm>
#include <cstring>
#include <memory>
#include <numeric>
#include "api/array_view.h"
@ -29,6 +30,10 @@
#include "rtc_base/timeutils.h"
#include "test/gmock.h"
#include "test/gtest.h"
#ifdef WEBRTC_WIN
#include "modules/audio_device/include/audio_device_factory.h"
#include "modules/audio_device/win/core_audio_utility_win.h"
#endif
using ::testing::_;
using ::testing::AtLeast;
@ -445,17 +450,17 @@ class MockAudioTransport : public test::MockAudioTransport {
};
// AudioDeviceTest test fixture.
class AudioDeviceTest : public ::testing::Test {
class AudioDeviceTest
: public ::testing::TestWithParam<webrtc::AudioDeviceModule::AudioLayer> {
protected:
AudioDeviceTest() : event_(false, false) {
AudioDeviceTest() : audio_layer_(GetParam()), event_(false, false) {
#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
!defined(WEBRTC_DUMMY_AUDIO_BUILD)
rtc::LogMessage::LogToDebug(rtc::LS_INFO);
// Add extra logging fields here if needed for debugging.
// rtc::LogMessage::LogTimestamps();
// rtc::LogMessage::LogThreads();
audio_device_ =
AudioDeviceModule::Create(AudioDeviceModule::kPlatformDefaultAudio);
rtc::LogMessage::LogTimestamps();
rtc::LogMessage::LogThreads();
audio_device_ = CreateAudioDevice();
EXPECT_NE(audio_device_.get(), nullptr);
AudioDeviceModule::AudioLayer audio_layer;
int got_platform_audio_layer =
@ -506,6 +511,32 @@ class AudioDeviceTest : public ::testing::Test {
return audio_device_;
}
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
// Use the default factory for kPlatformDefaultAudio and a special factory
// CreateWindowsCoreAudioAudioDeviceModule() for kWindowsCoreAudio2.
// The value of |audio_layer_| is set at construction by GetParam() and two
// different layers are tested on Windows only.
if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) {
return AudioDeviceModule::Create(audio_layer_);
} else if (audio_layer_ == AudioDeviceModule::kWindowsCoreAudio2) {
#ifdef WEBRTC_WIN
// We must initialize the COM library on a thread before we calling any of
// the library functions. All COM functions in the ADM will return
// CO_E_NOTINITIALIZED otherwise.
com_initializer_ = rtc::MakeUnique<webrtc_win::ScopedCOMInitializer>(
webrtc_win::ScopedCOMInitializer::kMTA);
EXPECT_TRUE(com_initializer_->Succeeded());
EXPECT_TRUE(webrtc_win::core_audio_utility::IsSupported());
EXPECT_TRUE(webrtc_win::core_audio_utility::IsMMCSSSupported());
return CreateWindowsCoreAudioAudioDeviceModule();
#else
return nullptr;
#endif
} else {
return nullptr;
}
}
void StartPlayout() {
EXPECT_FALSE(audio_device()->Playing());
EXPECT_EQ(0, audio_device()->InitPlayout());
@ -534,17 +565,70 @@ class AudioDeviceTest : public ::testing::Test {
EXPECT_FALSE(audio_device()->RecordingIsInitialized());
}
bool NewWindowsAudioDeviceModuleIsUsed() {
#ifdef WEBRTC_WIN
AudioDeviceModule::AudioLayer audio_layer;
EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) {
// Default device is always added as first element in the list and the
// default communication device as the second element. Hence, the list
// contains two extra elements in this case.
return true;
}
#endif
return false;
}
private:
#ifdef WEBRTC_WIN
// Windows Core Audio based ADM needs to run on a COM initialized thread.
std::unique_ptr<webrtc_win::ScopedCOMInitializer> com_initializer_;
#endif
AudioDeviceModule::AudioLayer audio_layer_;
bool requirements_satisfied_ = true;
rtc::Event event_;
rtc::scoped_refptr<AudioDeviceModule> audio_device_;
bool stereo_playout_ = false;
};
// Uses the test fixture to create, initialize and destruct the ADM.
TEST_F(AudioDeviceTest, ConstructDestruct) {}
// Instead of using the test fixture, verify that the different factory methods
// work as intended.
TEST(AudioDeviceTestWin, ConstructDestructWithFactory) {
rtc::scoped_refptr<AudioDeviceModule> audio_device;
// The default factory should work for all platforms when a default ADM is
// requested.
audio_device =
AudioDeviceModule::Create(AudioDeviceModule::kPlatformDefaultAudio);
EXPECT_TRUE(audio_device);
audio_device = nullptr;
#ifdef WEBRTC_WIN
// For Windows, the old factory method creates an ADM where the platform-
// specific parts are implemented by an AudioDeviceGeneric object. Verify
// that the old factory can't be used in combination with the latest audio
// layer AudioDeviceModule::kWindowsCoreAudio2.
audio_device =
AudioDeviceModule::Create(AudioDeviceModule::kWindowsCoreAudio2);
EXPECT_FALSE(audio_device);
audio_device = nullptr;
// Instead, ensure that the new dedicated factory method called
// CreateWindowsCoreAudioAudioDeviceModule() can be used on Windows and that
// it sets the audio layer to kWindowsCoreAudio2 implicitly. Note that, the
// new ADM for Windows must be created on a COM thread.
webrtc_win::ScopedCOMInitializer com_initializer(
webrtc_win::ScopedCOMInitializer::kMTA);
EXPECT_TRUE(com_initializer.Succeeded());
audio_device = CreateWindowsCoreAudioAudioDeviceModule();
EXPECT_TRUE(audio_device);
AudioDeviceModule::AudioLayer audio_layer;
EXPECT_EQ(0, audio_device->ActiveAudioLayer(&audio_layer));
EXPECT_EQ(audio_layer, AudioDeviceModule::kWindowsCoreAudio2);
#endif
}
TEST_F(AudioDeviceTest, InitTerminate) {
// Uses the test fixture to create, initialize and destruct the ADM.
TEST_P(AudioDeviceTest, ConstructDestructDefault) {}
TEST_P(AudioDeviceTest, InitTerminate) {
SKIP_TEST_IF_NOT(requirements_satisfied());
// Initialization is part of the test fixture.
EXPECT_TRUE(audio_device()->Initialized());
@ -552,28 +636,105 @@ TEST_F(AudioDeviceTest, InitTerminate) {
EXPECT_FALSE(audio_device()->Initialized());
}
// Tests Start/Stop playout without any registered audio callback.
TEST_F(AudioDeviceTest, StartStopPlayout) {
// Enumerate all available and active output devices.
TEST_P(AudioDeviceTest, PlayoutDeviceNames) {
SKIP_TEST_IF_NOT(requirements_satisfied());
char device_name[kAdmMaxDeviceNameSize];
char unique_id[kAdmMaxGuidSize];
int num_devices = audio_device()->PlayoutDevices();
if (NewWindowsAudioDeviceModuleIsUsed()) {
num_devices += 2;
}
EXPECT_GT(num_devices, 0);
for (int i = 0; i < num_devices; ++i) {
EXPECT_EQ(0, audio_device()->PlayoutDeviceName(i, device_name, unique_id));
}
EXPECT_EQ(-1, audio_device()->PlayoutDeviceName(num_devices, device_name,
unique_id));
}
// Enumerate all available and active input devices.
TEST_P(AudioDeviceTest, RecordingDeviceNames) {
SKIP_TEST_IF_NOT(requirements_satisfied());
char device_name[kAdmMaxDeviceNameSize];
char unique_id[kAdmMaxGuidSize];
int num_devices = audio_device()->RecordingDevices();
if (NewWindowsAudioDeviceModuleIsUsed()) {
num_devices += 2;
}
EXPECT_GT(num_devices, 0);
for (int i = 0; i < num_devices; ++i) {
EXPECT_EQ(0,
audio_device()->RecordingDeviceName(i, device_name, unique_id));
}
EXPECT_EQ(-1, audio_device()->RecordingDeviceName(num_devices, device_name,
unique_id));
}
// Counts number of active output devices and ensure that all can be selected.
TEST_P(AudioDeviceTest, SetPlayoutDevice) {
SKIP_TEST_IF_NOT(requirements_satisfied());
int num_devices = audio_device()->PlayoutDevices();
if (NewWindowsAudioDeviceModuleIsUsed()) {
num_devices += 2;
}
EXPECT_GT(num_devices, 0);
// Verify that all available playout devices can be set (not enabled yet).
for (int i = 0; i < num_devices; ++i) {
EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i));
}
EXPECT_EQ(-1, audio_device()->SetPlayoutDevice(num_devices));
#ifdef WEBRTC_WIN
// On Windows, verify the alternative method where the user can select device
// by role.
EXPECT_EQ(
0, audio_device()->SetPlayoutDevice(AudioDeviceModule::kDefaultDevice));
EXPECT_EQ(0, audio_device()->SetPlayoutDevice(
AudioDeviceModule::kDefaultCommunicationDevice));
#endif
}
// Counts number of active input devices and ensure that all can be selected.
TEST_P(AudioDeviceTest, SetRecordingDevice) {
SKIP_TEST_IF_NOT(requirements_satisfied());
int num_devices = audio_device()->RecordingDevices();
if (NewWindowsAudioDeviceModuleIsUsed()) {
num_devices += 2;
}
EXPECT_GT(num_devices, 0);
// Verify that all available recording devices can be set (not enabled yet).
for (int i = 0; i < num_devices; ++i) {
EXPECT_EQ(0, audio_device()->SetRecordingDevice(i));
}
EXPECT_EQ(-1, audio_device()->SetRecordingDevice(num_devices));
#ifdef WEBRTC_WIN
// On Windows, verify the alternative method where the user can select device
// by role.
EXPECT_EQ(
0, audio_device()->SetRecordingDevice(AudioDeviceModule::kDefaultDevice));
EXPECT_EQ(0, audio_device()->SetRecordingDevice(
AudioDeviceModule::kDefaultCommunicationDevice));
#endif
}
// Tests Start/Stop playout without any registered audio callback.
TEST_P(AudioDeviceTest, StartStopPlayout) {
SKIP_TEST_IF_NOT(requirements_satisfied());
StartPlayout();
StopPlayout();
StartPlayout();
StopPlayout();
}
// Tests Start/Stop recording without any registered audio callback.
TEST_F(AudioDeviceTest, StartStopRecording) {
TEST_P(AudioDeviceTest, StartStopRecording) {
SKIP_TEST_IF_NOT(requirements_satisfied());
StartRecording();
StopRecording();
StartRecording();
StopRecording();
}
// Tests Init/Stop/Init recording without any registered audio callback.
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=8041 for details
// on why this test is useful.
TEST_F(AudioDeviceTest, InitStopInitRecording) {
TEST_P(AudioDeviceTest, InitStopInitRecording) {
SKIP_TEST_IF_NOT(requirements_satisfied());
EXPECT_EQ(0, audio_device()->InitRecording());
EXPECT_TRUE(audio_device()->RecordingIsInitialized());
@ -583,7 +744,7 @@ TEST_F(AudioDeviceTest, InitStopInitRecording) {
}
// Tests Init/Stop/Init recording while playout is active.
TEST_F(AudioDeviceTest, InitStopInitRecordingWhilePlaying) {
TEST_P(AudioDeviceTest, InitStopInitRecordingWhilePlaying) {
SKIP_TEST_IF_NOT(requirements_satisfied());
StartPlayout();
EXPECT_EQ(0, audio_device()->InitRecording());
@ -595,7 +756,7 @@ TEST_F(AudioDeviceTest, InitStopInitRecordingWhilePlaying) {
}
// Tests Init/Stop/Init playout without any registered audio callback.
TEST_F(AudioDeviceTest, InitStopInitPlayout) {
TEST_P(AudioDeviceTest, InitStopInitPlayout) {
SKIP_TEST_IF_NOT(requirements_satisfied());
EXPECT_EQ(0, audio_device()->InitPlayout());
EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
@ -605,7 +766,7 @@ TEST_F(AudioDeviceTest, InitStopInitPlayout) {
}
// Tests Init/Stop/Init playout while recording is active.
TEST_F(AudioDeviceTest, InitStopInitPlayoutWhileRecording) {
TEST_P(AudioDeviceTest, InitStopInitPlayoutWhileRecording) {
SKIP_TEST_IF_NOT(requirements_satisfied());
StartRecording();
EXPECT_EQ(0, audio_device()->InitPlayout());
@ -621,7 +782,7 @@ TEST_F(AudioDeviceTest, InitStopInitPlayoutWhileRecording) {
// Note that we can't add expectations on audio parameters in EXPECT_CALL
// since parameter are not provided in the each callback. We therefore test and
// verify the parameters in the fake audio transport implementation instead.
TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
TEST_P(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
SKIP_TEST_IF_NOT(requirements_satisfied());
MockAudioTransport mock(TransportType::kPlay);
mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
@ -635,7 +796,7 @@ TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
// Start recording and verify that the native audio layer starts providing real
// audio samples using the RecordedDataIsAvailable() callback.
TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
TEST_P(AudioDeviceTest, StartRecordingVerifyCallbacks) {
SKIP_TEST_IF_NOT(requirements_satisfied());
MockAudioTransport mock(TransportType::kRecord);
mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
@ -650,7 +811,7 @@ TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
// Start playout and recording (full-duplex audio) and verify that audio is
// active in both directions.
TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
TEST_P(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
SKIP_TEST_IF_NOT(requirements_satisfied());
MockAudioTransport mock(TransportType::kPlayAndRecord);
mock.HandleCallbacks(event(), nullptr, kNumCallbacks);
@ -679,7 +840,7 @@ TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
// sequence by running in loopback for a few seconds while measuring the size
// (max and average) of the FIFO. The size of the FIFO is increased by the
// recording side and decreased by the playout side.
TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
TEST_P(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
SKIP_TEST_IF_NOT(requirements_satisfied());
NiceMock<MockAudioTransport> mock(TransportType::kPlayAndRecord);
FifoAudioStream audio_stream;
@ -716,7 +877,7 @@ TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
// some sort of audio feedback loop. E.g. a headset where the mic is placed
// close to the speaker to ensure highest possible echo. It is also recommended
// to run the test at highest possible output volume.
TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
TEST_P(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
SKIP_TEST_IF_NOT(requirements_satisfied());
NiceMock<MockAudioTransport> mock(TransportType::kPlayAndRecord);
LatencyAudioStream audio_stream;
@ -739,4 +900,20 @@ TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
audio_stream.PrintResults();
}
#ifdef WEBRTC_WIN
// Test two different audio layers (or rather two different Core Audio
// implementations) for Windows.
INSTANTIATE_TEST_CASE_P(
AudioLayerWin,
AudioDeviceTest,
::testing::Values(AudioDeviceModule::kPlatformDefaultAudio,
AudioDeviceModule::kWindowsCoreAudio2));
#else
// For all platforms but Windows, only test the default audio layer.
INSTANTIATE_TEST_CASE_P(
AudioLayer,
AudioDeviceTest,
::testing::Values(AudioDeviceModule::kPlatformDefaultAudio));
#endif
} // namespace webrtc

View File

@ -65,7 +65,9 @@ void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
while (playout_buffer_.size() < audio_buffer.size()) {
// Get 10ms decoded audio from WebRTC. The ADB knows about number of
// channels; hence we can ask for number of samples per channel here.
audio_device_buffer_->RequestPlayoutData(playout_samples_per_channel_10ms_);
if (audio_device_buffer_->RequestPlayoutData(
playout_samples_per_channel_10ms_) ==
static_cast<int32_t>(playout_samples_per_channel_10ms_)) {
// Append 10ms to the end of the local buffer taking number of channels
// into account.
const size_t num_elements_10ms =
@ -77,6 +79,13 @@ void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
return playout_channels_ * samples_per_channel_10ms;
});
RTC_DCHECK_EQ(num_elements_10ms, written_elements);
} else {
// Provide silence if AudioDeviceBuffer::RequestPlayoutData() fails.
// Can e.g. happen when an AudioTransport has not been registered.
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
std::memset(audio_buffer.data(), 0, num_bytes);
return;
}
}
// Provide the requested number of bytes to the consumer.

View File

@ -28,15 +28,16 @@ class AudioDeviceModule : public rtc::RefCountInterface {
enum AudioLayer {
kPlatformDefaultAudio = 0,
kWindowsCoreAudio = 2,
kLinuxAlsaAudio = 3,
kLinuxPulseAudio = 4,
kAndroidJavaAudio = 5,
kAndroidOpenSLESAudio = 6,
kAndroidJavaInputAndOpenSLESOutputAudio = 7,
kAndroidAAudioAudio = 8,
kAndroidJavaInputAndAAudioOutputAudio = 9,
kDummyAudio = 10
kWindowsCoreAudio,
kWindowsCoreAudio2, // experimental
kLinuxAlsaAudio,
kLinuxPulseAudio,
kAndroidJavaAudio,
kAndroidOpenSLESAudio,
kAndroidJavaInputAndOpenSLESOutputAudio,
kAndroidAAudioAudio,
kAndroidJavaInputAndAAudioOutputAudio,
kDummyAudio,
};
enum WindowsDeviceType {

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/include/audio_device_factory.h"
#if defined(WEBRTC_WIN)
#include "modules/audio_device/win/audio_device_module_win.h"
#include "modules/audio_device/win/core_audio_input_win.h"
#include "modules/audio_device/win/core_audio_output_win.h"
#include "modules/audio_device/win/core_audio_utility_win.h"
#endif
#include "rtc_base/logging.h"
#include "rtc_base/ptr_util.h"
namespace webrtc {
rtc::scoped_refptr<AudioDeviceModule>
CreateWindowsCoreAudioAudioDeviceModule() {
RTC_DLOG(INFO) << __FUNCTION__;
// Returns NULL if Core Audio is not supported or if COM has not been
// initialized correctly using webrtc_win::ScopedCOMInitializer.
if (!webrtc_win::core_audio_utility::IsSupported()) {
RTC_LOG(LS_ERROR)
<< "Unable to create ADM since Core Audio is not supported";
return nullptr;
}
return CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
rtc::MakeUnique<webrtc_win::CoreAudioInput>(),
rtc::MakeUnique<webrtc_win::CoreAudioOutput>());
}
} // namespace webrtc

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
#include "modules/audio_device/include/audio_device.h"
namespace webrtc {
// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API.
// The creating thread must be a COM thread; otherwise nullptr will be returned.
// Example (assuming webrtc namespace):
//
// public:
// rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
// // Tell COM that this thread shall live in the MTA.
// com_initializer_ = rtc::MakeUnique<webrtc_win::ScopedCOMInitializer>(
// webrtc_win::ScopedCOMInitializer::kMTA);
// if (!com_initializer_->Succeeded()) {
// return nullptr;
// }
// return CreateWindowsCoreAudioAudioDeviceModule();
// }
//
// private:
// std::unique_ptr<webrtc_win::ScopedCOMInitializer> com_initializer_;
//
rtc::scoped_refptr<AudioDeviceModule> CreateWindowsCoreAudioAudioDeviceModule();
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_

View File

@ -0,0 +1,423 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/audio_device_module_win.h"
#include <utility>
#include "modules/audio_device/audio_device_buffer.h"
#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/refcountedobject.h"
#include "rtc_base/stringutils.h"
#include "rtc_base/thread_checker.h"
namespace webrtc {
namespace webrtc_win {
namespace {
// This class combines a generic instance of an AudioInput and a generic
// instance of an AudioOutput to create an AudioDeviceModule. This is mostly
// done by delegating to the audio input/output with some glue code. This class
// also directly implements some of the AudioDeviceModule methods with dummy
// implementations.
//
// An instance must be created, destroyed and used on one and the same thread,
// i.e., all public methods must also be called on the same thread. A thread
// checker will RTC_DCHECK if any method is called on an invalid thread.
// TODO(henrika): is thread checking needed in AudioInput and AudioOutput?
class WindowsAudioDeviceModule : public AudioDeviceModule {
public:
enum class InitStatus {
OK = 0,
PLAYOUT_ERROR = 1,
RECORDING_ERROR = 2,
OTHER_ERROR = 3,
NUM_STATUSES = 4
};
WindowsAudioDeviceModule(std::unique_ptr<AudioInput> audio_input,
std::unique_ptr<AudioOutput> audio_output)
: input_(std::move(audio_input)), output_(std::move(audio_output)) {
RTC_CHECK(input_);
RTC_CHECK(output_);
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
}
~WindowsAudioDeviceModule() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
Terminate();
}
WindowsAudioDeviceModule(const WindowsAudioDeviceModule&) = delete;
WindowsAudioDeviceModule& operator=(const WindowsAudioDeviceModule&) = delete;
int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer* audioLayer) const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
// TODO(henrika): it might be possible to remove this unique signature.
*audioLayer = AudioDeviceModule::kWindowsCoreAudio2;
return 0;
}
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK(audio_device_buffer_);
RTC_DCHECK_RUN_ON(&thread_checker_);
return audio_device_buffer_->RegisterAudioCallback(audioCallback);
}
int32_t Init() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (initialized_) {
return 0;
}
audio_device_buffer_ = rtc::MakeUnique<AudioDeviceBuffer>();
AttachAudioBuffer();
InitStatus status;
if (output_->Init() != 0) {
status = InitStatus::PLAYOUT_ERROR;
} else if (input_->Init() != 0) {
output_->Terminate();
status = InitStatus::RECORDING_ERROR;
} else {
initialized_ = true;
status = InitStatus::OK;
}
if (status != InitStatus::OK) {
RTC_LOG(LS_ERROR) << "Audio device initialization failed";
return -1;
}
return 0;
}
int32_t Terminate() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!initialized_)
return 0;
int32_t err = input_->Terminate();
err |= output_->Terminate();
initialized_ = false;
RTC_DCHECK_EQ(err, 0);
return err;
}
bool Initialized() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
return initialized_;
}
int16_t PlayoutDevices() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->NumDevices();
}
int16_t RecordingDevices() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->NumDevices();
}
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
std::string name_str, guid_str;
int ret = -1;
if (guid != nullptr) {
ret = output_->DeviceName(index, &name_str, &guid_str);
rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
} else {
ret = output_->DeviceName(index, &name_str, nullptr);
}
rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
return ret;
}
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
std::string name_str, guid_str;
int ret = -1;
if (guid != nullptr) {
ret = input_->DeviceName(index, &name_str, &guid_str);
rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
} else {
ret = input_->DeviceName(index, &name_str, nullptr);
}
rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
return ret;
}
int32_t SetPlayoutDevice(uint16_t index) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->SetDevice(index);
}
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->SetDevice(device);
}
int32_t SetRecordingDevice(uint16_t index) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->SetDevice(index);
}
int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->SetDevice(device);
}
int32_t PlayoutIsAvailable(bool* available) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = true;
return 0;
}
int32_t InitPlayout() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->InitPlayout();
}
bool PlayoutIsInitialized() const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->PlayoutIsInitialized();
}
int32_t RecordingIsAvailable(bool* available) override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = true;
return 0;
}
int32_t InitRecording() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->InitRecording();
}
bool RecordingIsInitialized() const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->RecordingIsInitialized();
}
int32_t StartPlayout() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->StartPlayout();
}
int32_t StopPlayout() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->StopPlayout();
}
bool Playing() const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return output_->Playing();
}
int32_t StartRecording() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->StartRecording();
}
int32_t StopRecording() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return input_->StopRecording();
}
bool Recording() const override {
RTC_LOG(INFO) << __FUNCTION__;
return input_->Recording();
}
int32_t InitSpeaker() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DLOG(LS_WARNING) << "This method has no effect";
return initialized_ ? 0 : -1;
}
bool SpeakerIsInitialized() const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DLOG(LS_WARNING) << "This method has no effect";
return initialized_;
}
int32_t InitMicrophone() override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DLOG(LS_WARNING) << "This method has no effect";
return initialized_ ? 0 : -1;
}
bool MicrophoneIsInitialized() const override {
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DLOG(LS_WARNING) << "This method has no effect";
return initialized_;
}
int32_t SpeakerVolumeIsAvailable(bool* available) override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = false;
return 0;
}
int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
int32_t MicrophoneVolumeIsAvailable(bool* available) override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = false;
return 0;
}
int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
int32_t SetSpeakerMute(bool enable) override { return 0; }
int32_t SpeakerMute(bool* enabled) const override { return 0; }
int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
int32_t SetMicrophoneMute(bool enable) override { return 0; }
int32_t MicrophoneMute(bool* enabled) const override { return 0; }
int32_t StereoPlayoutIsAvailable(bool* available) const override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = true;
return 0;
}
int32_t SetStereoPlayout(bool enable) override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return 0;
}
int32_t StereoPlayout(bool* enabled) const override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*enabled = true;
return 0;
}
int32_t StereoRecordingIsAvailable(bool* available) const override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*available = true;
return 0;
}
int32_t SetStereoRecording(bool enable) override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return 0;
}
int32_t StereoRecording(bool* enabled) const override {
// TODO(henrika): improve support.
RTC_LOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
*enabled = true;
return 0;
}
int32_t PlayoutDelay(uint16_t* delayMS) const override { return 0; }
bool BuiltInAECIsAvailable() const override { return false; }
bool BuiltInAGCIsAvailable() const override { return false; }
bool BuiltInNSIsAvailable() const override { return false; }
int32_t EnableBuiltInAEC(bool enable) override { return 0; }
int32_t EnableBuiltInAGC(bool enable) override { return 0; }
int32_t EnableBuiltInNS(bool enable) override { return 0; }
int32_t AttachAudioBuffer() {
RTC_DLOG(INFO) << __FUNCTION__;
output_->AttachAudioBuffer(audio_device_buffer_.get());
input_->AttachAudioBuffer(audio_device_buffer_.get());
return 0;
}
private:
// Ensures that the class is used on the same thread as it is constructed
// and destroyed on.
rtc::ThreadChecker thread_checker_;
// Implements the AudioInput interface and deals with audio capturing parts.
const std::unique_ptr<AudioInput> input_;
// Implements the AudioOutput interface and deals with audio rendering parts.
const std::unique_ptr<AudioOutput> output_;
// The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio
// to/from the WebRTC layer. Created and owned by this object. Used by
// both |input_| and |output_| but they use orthogonal parts of the ADB.
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
// Set to true after a successful call to Init(). Cleared by Terminate().
bool initialized_ = false;
};
} // namespace
rtc::scoped_refptr<AudioDeviceModule>
CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
std::unique_ptr<AudioInput> audio_input,
std::unique_ptr<AudioOutput> audio_output) {
RTC_LOG(INFO) << __FUNCTION__;
return new rtc::RefCountedObject<WindowsAudioDeviceModule>(
std::move(audio_input), std::move(audio_output));
}
} // namespace webrtc_win
} // namespace webrtc

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
#include <memory>
#include <string>
#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/scoped_ref_ptr.h"
namespace webrtc {
class AudioDeviceBuffer;
namespace webrtc_win {
// This interface represents the main input-related parts of the complete
// AudioDeviceModule interface.
class AudioInput {
public:
virtual ~AudioInput() {}
virtual int Init() = 0;
virtual int Terminate() = 0;
virtual int NumDevices() const = 0;
virtual int SetDevice(int index) = 0;
virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
virtual bool RecordingIsInitialized() const = 0;
virtual int InitRecording() = 0;
virtual int StartRecording() = 0;
virtual int StopRecording() = 0;
virtual bool Recording() = 0;
virtual int VolumeIsAvailable(bool* available) = 0;
};
// This interface represents the main output-related parts of the complete
// AudioDeviceModule interface.
class AudioOutput {
public:
virtual ~AudioOutput() {}
virtual int Init() = 0;
virtual int Terminate() = 0;
virtual int NumDevices() const = 0;
virtual int SetDevice(int index) = 0;
virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
virtual bool PlayoutIsInitialized() const = 0;
virtual int InitPlayout() = 0;
virtual int StartPlayout() = 0;
virtual int StopPlayout() = 0;
virtual bool Playing() = 0;
virtual int VolumeIsAvailable(bool* available) = 0;
};
// Combines an AudioInput and an AudioOutput implementation to build an
// AudioDeviceModule. Hides most parts of the full ADM interface.
rtc::scoped_refptr<AudioDeviceModule>
CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
std::unique_ptr<AudioInput> audio_input,
std::unique_ptr<AudioOutput> audio_output);
} // namespace webrtc_win
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_

View File

@ -0,0 +1,439 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/core_audio_base_win.h"
#include <string>
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/win/windows_version.h"
using Microsoft::WRL::ComPtr;
namespace webrtc {
namespace webrtc_win {
namespace {
enum DefaultDeviceType {
kDefault,
kDefaultCommunications,
kDefaultDeviceTypeMaxCount,
};
const char* DirectionToString(CoreAudioBase::Direction direction) {
switch (direction) {
case CoreAudioBase::Direction::kOutput:
return "Output";
case CoreAudioBase::Direction::kInput:
return "Input";
default:
return "Unkown";
}
}
void Run(void* obj) {
RTC_DCHECK(obj);
reinterpret_cast<CoreAudioBase*>(obj)->ThreadRun();
}
} // namespace
CoreAudioBase::CoreAudioBase(Direction direction, OnDataCallback callback)
: direction_(direction), on_data_callback_(callback), format_() {
RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction) << "]";
// Create the event which the audio engine will signal each time a buffer
// becomes ready to be processed by the client.
audio_samples_event_.Set(CreateEvent(nullptr, false, false, nullptr));
RTC_DCHECK(audio_samples_event_.IsValid());
// Event to be be set in Stop() when rendering/capturing shall stop.
stop_event_.Set(CreateEvent(nullptr, false, false, nullptr));
RTC_DCHECK(stop_event_.IsValid());
}
CoreAudioBase::~CoreAudioBase() {
RTC_DLOG(INFO) << __FUNCTION__;
}
EDataFlow CoreAudioBase::GetDataFlow() const {
return direction_ == CoreAudioBase::Direction::kOutput ? eRender : eCapture;
}
int CoreAudioBase::NumberOfActiveDevices() const {
return core_audio_utility::NumberOfActiveDevices(GetDataFlow());
}
int CoreAudioBase::NumberOfEnumeratedDevices() const {
const int num_active = NumberOfActiveDevices();
return num_active > 0 ? num_active + kDefaultDeviceTypeMaxCount : 0;
}
bool CoreAudioBase::IsDefaultDevice(int index) const {
return index == kDefault;
}
bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const {
return index == kDefaultCommunications;
}
bool CoreAudioBase::IsDefaultDevice(const std::string& device_id) const {
return (IsInput() &&
(device_id == core_audio_utility::GetDefaultInputDeviceID())) ||
(IsOutput() &&
(device_id == core_audio_utility::GetDefaultOutputDeviceID()));
}
bool CoreAudioBase::IsDefaultCommunicationsDevice(
const std::string& device_id) const {
return (IsInput() &&
(device_id ==
core_audio_utility::GetCommunicationsInputDeviceID())) ||
(IsOutput() &&
(device_id == core_audio_utility::GetCommunicationsOutputDeviceID()));
}
bool CoreAudioBase::IsInput() const {
return direction_ == CoreAudioBase::Direction::kInput;
}
bool CoreAudioBase::IsOutput() const {
return direction_ == CoreAudioBase::Direction::kOutput;
}
std::string CoreAudioBase::GetDeviceID(int index) const {
if (index >= NumberOfEnumeratedDevices()) {
RTC_LOG(LS_ERROR) << "Invalid device index";
return std::string();
}
std::string device_id;
if (IsDefaultDevice(index)) {
device_id = IsInput() ? core_audio_utility::GetDefaultInputDeviceID()
: core_audio_utility::GetDefaultOutputDeviceID();
} else if (IsDefaultCommunicationsDevice(index)) {
device_id = IsInput()
? core_audio_utility::GetCommunicationsInputDeviceID()
: core_audio_utility::GetCommunicationsOutputDeviceID();
} else {
AudioDeviceNames device_names;
bool ok = IsInput()
? core_audio_utility::GetInputDeviceNames(&device_names)
: core_audio_utility::GetOutputDeviceNames(&device_names);
if (ok) {
device_id = device_names[index].unique_id;
}
}
return device_id;
}
int CoreAudioBase::DeviceName(int index,
std::string* name,
std::string* guid) const {
RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
<< "]";
if (index > NumberOfEnumeratedDevices() - 1) {
RTC_LOG(LS_ERROR) << "Invalid device index";
return -1;
}
AudioDeviceNames device_names;
bool ok = IsInput() ? core_audio_utility::GetInputDeviceNames(&device_names)
: core_audio_utility::GetOutputDeviceNames(&device_names);
if (!ok) {
RTC_LOG(LS_ERROR) << "Failed to get the device name";
return -1;
}
*name = device_names[index].device_name;
RTC_DLOG(INFO) << "name: " << *name;
if (guid != nullptr) {
*guid = device_names[index].unique_id;
RTC_DLOG(INFO) << "guid: " << guid;
}
return 0;
}
bool CoreAudioBase::Init() {
RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
<< "]";
RTC_DCHECK(!device_id_.empty());
RTC_DCHECK(audio_device_buffer_);
RTC_DCHECK(!audio_client_.Get());
// Use an existing |device_id_| and set parameters which are required to
// create an audio client. It is up to the parent class to set |device_id_|.
// TODO(henrika): improve device notification.
std::string device_id = device_id_;
ERole role = eConsole;
if (IsDefaultDevice(device_id)) {
device_id = AudioDeviceName::kDefaultDeviceId;
role = eConsole;
} else if (IsDefaultCommunicationsDevice(device_id)) {
device_id = AudioDeviceName::kDefaultCommunicationsDeviceId;
role = eCommunications;
}
// Create an IAudioClient interface which enables us to create and initialize
// an audio stream between an audio application and the audio engine.
ComPtr<IAudioClient> audio_client =
core_audio_utility::CreateClient(device_id, GetDataFlow(), role);
if (!audio_client.Get()) {
return false;
}
// Retrieve preferred audio input or output parameters for the given client.
AudioParameters params;
if (FAILED(core_audio_utility::GetPreferredAudioParameters(audio_client.Get(),
&params))) {
return false;
}
// Define the output WAVEFORMATEXTENSIBLE format in |format_|.
WAVEFORMATEX* format = &format_.Format;
format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
format->nChannels = rtc::dchecked_cast<WORD>(params.channels());
format->nSamplesPerSec = params.sample_rate();
format->wBitsPerSample = rtc::dchecked_cast<WORD>(params.bits_per_sample());
format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
// Add the parts which are unique for the WAVE_FORMAT_EXTENSIBLE structure.
format_.Samples.wValidBitsPerSample =
rtc::dchecked_cast<WORD>(params.bits_per_sample());
// TODO(henrika): improve (common for input and output?)
format_.dwChannelMask = params.channels() == 1
? SPEAKER_FRONT_CENTER
: SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
RTC_DLOG(INFO) << core_audio_utility::WaveFormatExToString(&format_);
// Verify that the format is supported.
if (!core_audio_utility::IsFormatSupported(
audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &format_)) {
return false;
}
// Initialize the audio stream between the client and the device in shared
// mode using event-driven buffer handling.
if (FAILED(core_audio_utility::SharedModeInitialize(
audio_client.Get(), &format_, audio_samples_event_,
&endpoint_buffer_size_frames_))) {
return false;
}
// Check device period and the preferred buffer size and log a warning if
// WebRTC's buffer size is not an even divisor of the preferred buffer size
// in Core Audio.
// TODO(henrik): sort out if a non-perfect match really is an issue.
REFERENCE_TIME device_period;
if (FAILED(core_audio_utility::GetDevicePeriod(
audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) {
return false;
}
const double device_period_in_seconds =
static_cast<double>(
core_audio_utility::ReferenceTimeToTimeDelta(device_period).ms()) /
1000.0L;
const int preferred_frames_per_buffer =
static_cast<int>(params.sample_rate() * device_period_in_seconds + 0.5);
RTC_DLOG(INFO) << "preferred_frames_per_buffer: "
<< preferred_frames_per_buffer;
if (preferred_frames_per_buffer % params.frames_per_buffer()) {
RTC_LOG(WARNING) << "Buffer size of " << params.frames_per_buffer()
<< " is not an even divisor of "
<< preferred_frames_per_buffer;
}
// Store valid COM interfaces.
audio_client_ = audio_client;
return true;
}
bool CoreAudioBase::Start() {
RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
<< "]";
audio_thread_ = rtc::MakeUnique<rtc::PlatformThread>(
Run, this, IsInput() ? "wasapi_capture_thread" : "wasapi_render_thread",
rtc::kRealtimePriority);
audio_thread_->Start();
if (!audio_thread_->IsRunning()) {
StopThread();
RTC_LOG(LS_ERROR) << "Failed to start audio thread";
return false;
}
RTC_DLOG(INFO) << "Started thread with name: " << audio_thread_->name();
// Start streaming data between the endpoint buffer and the audio engine.
_com_error error = audio_client_->Start();
if (error.Error() != S_OK) {
StopThread();
RTC_LOG(LS_ERROR) << "IAudioClient::Start failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
return true;
}
bool CoreAudioBase::Stop() {
RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
<< "]";
// Stop streaming and the internal audio thread.
_com_error error = audio_client_->Stop();
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: "
<< core_audio_utility::ErrorToString(error);
}
StopThread();
// Flush all pending data and reset the audio clock stream position to 0.
error = audio_client_->Reset();
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::Reset failed: "
<< core_audio_utility::ErrorToString(error);
}
if (IsOutput()) {
// Extra safety check to ensure that the buffers are cleared.
// If the buffers are not cleared correctly, the next call to Start()
// would fail with AUDCLNT_E_BUFFER_ERROR at
// IAudioRenderClient::GetBuffer().
UINT32 num_queued_frames = 0;
audio_client_->GetCurrentPadding(&num_queued_frames);
RTC_DCHECK_EQ(0u, num_queued_frames);
}
return true;
}
bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const {
// A valid IAudioClient is required to access the ISimpleAudioVolume interface
// properly. It is possible to use IAudioSessionManager::GetSimpleAudioVolume
// as well but we use the audio client here to ensure that the initialized
// audio session is visible under group box labeled "Applications" in
// Sndvol.exe.
if (!audio_client_.Get()) {
return false;
}
// Try to create an ISimpleAudioVolume instance.
ComPtr<ISimpleAudioVolume> audio_volume =
core_audio_utility::CreateSimpleAudioVolume(audio_client_.Get());
if (!audio_volume.Get()) {
RTC_DLOG(LS_ERROR) << "Volume control is not supported";
return false;
}
// Try to use the valid volume control.
float volume = 0.0;
_com_error error = audio_volume->GetMasterVolume(&volume);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "ISimpleAudioVolume::GetMasterVolume failed: "
<< core_audio_utility::ErrorToString(error);
*available = false;
}
RTC_DLOG(INFO) << "master volume for output audio session: " << volume;
*available = true;
return false;
}
void CoreAudioBase::StopThread() {
RTC_DLOG(INFO) << __FUNCTION__;
if (audio_thread_) {
if (audio_thread_->IsRunning()) {
RTC_DLOG(INFO) << "Sets stop_event...";
SetEvent(stop_event_.Get());
RTC_DLOG(INFO) << "PlatformThread::Stop...";
audio_thread_->Stop();
}
audio_thread_.reset();
// Ensure that we don't quit the main thread loop immediately next
// time Start() is called.
ResetEvent(stop_event_.Get());
}
}
void CoreAudioBase::ThreadRun() {
if (!core_audio_utility::IsMMCSSSupported()) {
RTC_LOG(LS_ERROR) << "MMCSS is not supported";
return;
}
RTC_DLOG(INFO) << "ThreadRun starts...";
// TODO(henrika): difference between "Pro Audio" and "Audio"?
ScopedMMCSSRegistration mmcss_registration(L"Pro Audio");
ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
RTC_DCHECK(mmcss_registration.Succeeded());
RTC_DCHECK(com_initializer.Succeeded());
RTC_DCHECK(stop_event_.IsValid());
RTC_DCHECK(audio_samples_event_.IsValid());
bool streaming = true;
bool error = false;
HANDLE wait_array[] = {stop_event_.Get(), audio_samples_event_.Get()};
// The device frequency is the frequency generated by the hardware clock in
// the audio device. The GetFrequency() method reports a constant frequency.
UINT64 device_frequency = 0;
if (audio_clock_.Get()) {
RTC_DCHECK(IsOutput());
_com_error result = audio_clock_->GetFrequency(&device_frequency);
if ((error = result.Error()) != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClock::GetFrequency failed: "
<< core_audio_utility::ErrorToString(error);
}
}
// Keep streaming audio until the stop event or the stream-switch event
// is signaled. An error event can also break the main thread loop.
while (streaming && !error) {
// Wait for a close-down event, stream-switch event or a new render event.
DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
wait_array, false, INFINITE);
switch (wait_result) {
case WAIT_OBJECT_0 + 0:
// |stop_event_| has been set.
streaming = false;
break;
case WAIT_OBJECT_0 + 1:
// |audio_samples_event_| has been set.
error = !on_data_callback_(device_frequency);
break;
default:
error = true;
break;
}
}
if (streaming && error) {
RTC_LOG(LS_ERROR) << "WASAPI streaming failed.";
// Stop audio streaming since something has gone wrong in our main thread
// loop. Note that, we are still in a "started" state, hence a Stop() call
// is required to join the thread properly.
audio_client_->Stop();
// TODO(henrika): notify clients that something has gone wrong and that
// this stream should be destroyed instead of reused in the future.
}
RTC_DLOG(INFO) << "...ThreadRun stops";
}
} // namespace webrtc_win
} // namespace webrtc

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
#include <functional>
#include <memory>
#include <string>
#include "modules/audio_device/win/core_audio_utility_win.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/thread_checker.h"
namespace webrtc {
class AudioDeviceBuffer;
class FineAudioBuffer;
namespace webrtc_win {
// Serves as base class for CoreAudioInput and CoreAudioOutput and supports
// device handling and audio streaming where the direction (input or output)
// is set at constructions by the parent.
class CoreAudioBase {
public:
enum class Direction {
kInput,
kOutput,
};
// Callback definition for notifications of new audio data. For input clients,
// it means that "new audio data has now been captured", and for output
// clients, "the output layer now needs new audio data".
typedef std::function<bool(uint64_t device_frequency)> OnDataCallback;
explicit CoreAudioBase(Direction direction, OnDataCallback callback);
~CoreAudioBase();
std::string GetDeviceID(int index) const;
int DeviceName(int index, std::string* name, std::string* guid) const;
bool Init();
bool Start();
bool Stop();
bool IsVolumeControlAvailable(bool* available) const;
Direction direction() const { return direction_; }
void ThreadRun();
CoreAudioBase(const CoreAudioBase&) = delete;
CoreAudioBase& operator=(const CoreAudioBase&) = delete;
protected:
// Returns number of active devices given the specified |direction_|.
int NumberOfActiveDevices() const;
// Returns total number of enumerated audio devices which is the sum of all
// active devices plus two extra (one default and one default
// communications). The value in |direction_| determines if capture or
// render devices are counted.
int NumberOfEnumeratedDevices() const;
bool IsInput() const;
bool IsOutput() const;
bool IsDefaultDevice(int index) const;
bool IsDefaultCommunicationsDevice(int index) const;
bool IsDefaultDevice(const std::string& device_id) const;
bool IsDefaultCommunicationsDevice(const std::string& device_id) const;
EDataFlow GetDataFlow() const;
rtc::ThreadChecker thread_checker_;
rtc::ThreadChecker thread_checker_audio_;
const Direction direction_;
const OnDataCallback on_data_callback_;
AudioDeviceBuffer* audio_device_buffer_ = nullptr;
bool initialized_ = false;
std::string device_id_;
WAVEFORMATEXTENSIBLE format_ = {};
uint32_t endpoint_buffer_size_frames_ = 0;
Microsoft::WRL::ComPtr<IAudioClient> audio_client_;
Microsoft::WRL::ComPtr<IAudioClock> audio_clock_;
ScopedHandle audio_samples_event_;
ScopedHandle stop_event_;
std::unique_ptr<rtc::PlatformThread> audio_thread_;
private:
void StopThread();
};
} // namespace webrtc_win
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_

View File

@ -0,0 +1,342 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/core_audio_input_win.h"
#include "modules/audio_device/audio_device_buffer.h"
#include "modules/audio_device/fine_audio_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/ptr_util.h"
using Microsoft::WRL::ComPtr;
namespace webrtc {
namespace webrtc_win {
CoreAudioInput::CoreAudioInput()
: CoreAudioBase(CoreAudioBase::Direction::kInput,
[this](uint64_t freq) { return OnDataCallback(freq); }) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
thread_checker_audio_.DetachFromThread();
}
CoreAudioInput::~CoreAudioInput() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
}
int CoreAudioInput::Init() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
StopRecording();
return 0;
}
int CoreAudioInput::Terminate() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return 0;
}
int CoreAudioInput::NumDevices() const {
RTC_DCHECK_RUN_ON(&thread_checker_);
return core_audio_utility::NumberOfActiveDevices(eCapture);
}
int CoreAudioInput::SetDevice(int index) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << index;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (initialized_) {
return -1;
}
std::string device_id = GetDeviceID(index);
RTC_DLOG(INFO) << "index=" << index << " => device_id: " << device_id;
device_id_ = device_id;
return device_id_.empty() ? -1 : 0;
}
int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << device;
RTC_DCHECK_RUN_ON(&thread_checker_);
return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
}
int CoreAudioInput::DeviceName(int index,
std::string* name,
std::string* guid) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << index;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(name);
return CoreAudioBase::DeviceName(index, name, guid);
}
void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
audio_device_buffer_ = audio_buffer;
}
bool CoreAudioInput::RecordingIsInitialized() const {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << initialized_;
RTC_DCHECK_RUN_ON(&thread_checker_);
return initialized_;
}
int CoreAudioInput::InitRecording() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(!initialized_);
RTC_DCHECK(!Recording());
RTC_DCHECK(!audio_client_.Get());
RTC_DCHECK(!audio_capture_client_.Get());
// Create an IAudioClient and store the valid interface pointer in
// |audio_client_|. The base class will use optimal input parameters and do
// an event driven shared mode initialization. The utilized format will be
// stored in |format_| and can be used for configuration and allocation of
// audio buffers.
if (!CoreAudioBase::Init()) {
return -1;
}
RTC_DCHECK(audio_client_.Get());
// Configure the recording side of the audio device buffer using |format_|
// after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format;
RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
audio_device_buffer_->SetRecordingSampleRate(format->nSamplesPerSec);
audio_device_buffer_->SetRecordingChannels(format->nChannels);
// Create a modified audio buffer class which allows us to supply any number
// of samples (and not only multiple of 10ms) to match the optimal buffer
// size per callback used by Core Audio.
// TODO(henrika): can we share one FineAudioBuffer?
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
// Create an IAudioCaptureClient for an initialized IAudioClient.
// The IAudioCaptureClient interface enables a client to read input data from
// a capture endpoint buffer.
ComPtr<IAudioCaptureClient> audio_capture_client =
core_audio_utility::CreateCaptureClient(audio_client_.Get());
if (!audio_capture_client.Get()) {
return -1;
}
// Query performance frequency.
LARGE_INTEGER ticks_per_sec = {};
qpc_to_100ns_.reset();
if (::QueryPerformanceFrequency(&ticks_per_sec)) {
double qpc_ticks_per_second =
rtc::dchecked_cast<double>(ticks_per_sec.QuadPart);
qpc_to_100ns_ = 10000000.0 / qpc_ticks_per_second;
}
// Store valid COM interfaces. Note that, |audio_client_| has already been
// set in CoreAudioBase::Init().
audio_capture_client_ = audio_capture_client;
initialized_ = true;
return 0;
}
int CoreAudioInput::StartRecording() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(!Recording());
if (!initialized_) {
RTC_DLOG(LS_WARNING)
<< "Recording can not start since InitRecording must succeed first";
return 0;
}
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetRecord();
}
if (!Start()) {
return -1;
}
return 0;
}
int CoreAudioInput::StopRecording() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!initialized_) {
return 0;
}
// Release resources allocated in InitRecording() and then return if this
// method is called without any active input audio.
if (!Recording()) {
RTC_DLOG(WARNING) << "No input stream is active";
audio_client_.Reset();
audio_capture_client_.Reset();
initialized_ = false;
return 0;
}
if (!Stop()) {
RTC_LOG(LS_ERROR) << "StopRecording failed";
return -1;
}
// TODO(henrika): if we want to support Init(), Start(), Stop(), Init(),
// Start(), Stop() without close in between, these lines are needed.
// Not supported on mobile ADMs, hence we can probably live without it.
// audio_client_.Reset();
// audio_capture_client_.Reset();
// audio_device_buffer_->NativeAudioRecordingInterrupted();
thread_checker_audio_.DetachFromThread();
qpc_to_100ns_.reset();
initialized_ = false;
return 0;
}
bool CoreAudioInput::Recording() {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << (audio_thread_ != nullptr);
RTC_DCHECK_RUN_ON(&thread_checker_);
return audio_thread_ != nullptr;
}
// TODO(henrika): finalize support of audio session volume control. As is, we
// are not compatible with the old ADM implementation since it allows accessing
// the volume control with any active audio output stream.
int CoreAudioInput::VolumeIsAvailable(bool* available) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return IsVolumeControlAvailable(available) ? 0 : -1;
}
bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) {
RTC_DCHECK_RUN_ON(&thread_checker_audio_);
UINT32 num_frames_in_next_packet = 0;
_com_error error =
audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
// Drain the WASAPI capture buffer fully if audio has been recorded.
while (num_frames_in_next_packet > 0) {
uint8_t* audio_data;
UINT32 num_frames_to_read = 0;
DWORD flags = 0;
UINT64 device_position_frames = 0;
UINT64 capture_time_100ns = 0;
error = audio_capture_client_->GetBuffer(&audio_data, &num_frames_to_read,
&flags, &device_position_frames,
&capture_time_100ns);
if (error.Error() == AUDCLNT_S_BUFFER_EMPTY) {
// The call succeeded but no capture data is available to be read.
// Return and start waiting for new capture event
RTC_DCHECK_EQ(num_frames_to_read, 0u);
return true;
}
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
// TODO(henrika): only update the latency estimate N times per second to
// save resources.
// TODO(henrika): note that FineAudioBuffer adds latency as well.
auto opt_record_delay_ms = EstimateLatencyMillis(capture_time_100ns);
// The data in the packet is not correlated with the previous packet's
// device position; possibly due to a stream state transition or timing
// glitch. The behavior of the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY flag
// is undefined on the application's first call to GetBuffer after Start.
if (device_position_frames != 0 &&
flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY";
}
// The time at which the device's stream position was recorded is uncertain.
// Thus, the client might be unable to accurately set a time stamp for the
// current data packet.
if (flags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) {
RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR";
}
// Treat all of the data in the packet as silence and ignore the actual
// data values when AUDCLNT_BUFFERFLAGS_SILENT is set.
if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
rtc::ExplicitZeroMemory(audio_data,
format_.Format.nBlockAlign * num_frames_to_read);
RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence";
} else {
// Copy recorded audio in |audio_data| to the WebRTC sink using the
// FineAudioBuffer object.
// TODO(henrika): fix delay estimation.
int record_delay_ms = 0;
if (opt_record_delay_ms) {
record_delay_ms = *opt_record_delay_ms;
// RTC_DLOG(INFO) << "record_delay_ms: " << record_delay_ms;
}
fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data),
format_.Format.nChannels * num_frames_to_read),
record_delay_ms);
}
error = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioCaptureClient::ReleaseBuffer failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
error =
audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
}
return true;
}
rtc::Optional<int> CoreAudioInput::EstimateLatencyMillis(
uint64_t capture_time_100ns) {
if (!qpc_to_100ns_) {
return rtc::nullopt;
}
// Input parameter |capture_time_100ns| contains the performance counter at
// the time that the audio endpoint device recorded the device position of
// the first audio frame in the data packet converted into 100ns units.
// We derive a delay estimate by:
// - sampling the current performance counter (qpc_now_raw),
// - converting it into 100ns time units (now_time_100ns), and
// - subtracting |capture_time_100ns| from now_time_100ns.
LARGE_INTEGER perf_counter_now = {};
if (!::QueryPerformanceCounter(&perf_counter_now)) {
return rtc::nullopt;
}
uint64_t qpc_now_raw = perf_counter_now.QuadPart;
uint64_t now_time_100ns = qpc_now_raw * (*qpc_to_100ns_);
webrtc::TimeDelta delay_us =
webrtc::TimeDelta::us(0.1 * (now_time_100ns - capture_time_100ns) + 0.5);
return delay_us.ms();
}
} // namespace webrtc_win
} // namespace webrtc

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
#include <memory>
#include <string>
#include "api/optional.h"
#include "modules/audio_device/win/audio_device_module_win.h"
#include "modules/audio_device/win/core_audio_base_win.h"
namespace webrtc {
class AudioDeviceBuffer;
class FineAudioBuffer;
namespace webrtc_win {
// Windows specific AudioInput implementation using a CoreAudioBase class where
// an input direction is set at construction. Supports capture device handling
// and streaming of captured audio to a WebRTC client.
class CoreAudioInput final : public CoreAudioBase, public AudioInput {
public:
CoreAudioInput();
~CoreAudioInput() override;
// AudioInput implementation.
int Init() override;
int Terminate() override;
int NumDevices() const override;
int SetDevice(int index) override;
int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
int DeviceName(int index, std::string* name, std::string* guid) override;
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
bool RecordingIsInitialized() const override;
int InitRecording() override;
int StartRecording() override;
int StopRecording() override;
bool Recording() override;
int VolumeIsAvailable(bool* available) override;
CoreAudioInput(const CoreAudioInput&) = delete;
CoreAudioInput& operator=(const CoreAudioInput&) = delete;
private:
bool OnDataCallback(uint64_t device_frequency);
rtc::Optional<int> EstimateLatencyMillis(uint64_t capture_time_100ns);
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
Microsoft::WRL::ComPtr<IAudioCaptureClient> audio_capture_client_;
rtc::Optional<double> qpc_to_100ns_;
};
} // namespace webrtc_win
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_

View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/core_audio_output_win.h"
#include "modules/audio_device/audio_device_buffer.h"
#include "modules/audio_device/fine_audio_buffer.h"
#include "rtc_base/bind.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/ptr_util.h"
using Microsoft::WRL::ComPtr;
namespace webrtc {
namespace webrtc_win {
CoreAudioOutput::CoreAudioOutput()
: CoreAudioBase(CoreAudioBase::Direction::kOutput,
[this](uint64_t freq) { return OnDataCallback(freq); }) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
thread_checker_audio_.DetachFromThread();
}
CoreAudioOutput::~CoreAudioOutput() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
Terminate();
}
int CoreAudioOutput::Init() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return 0;
}
int CoreAudioOutput::Terminate() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
StopPlayout();
return 0;
}
int CoreAudioOutput::NumDevices() const {
RTC_DCHECK_RUN_ON(&thread_checker_);
return core_audio_utility::NumberOfActiveDevices(eRender);
}
int CoreAudioOutput::SetDevice(int index) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << index;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (initialized_) {
return -1;
}
std::string device_id = GetDeviceID(index);
RTC_DLOG(INFO) << "index=" << index << " => device_id: " << device_id;
device_id_ = device_id;
return device_id_.empty() ? -1 : 0;
}
int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << device;
RTC_DCHECK_RUN_ON(&thread_checker_);
return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
}
int CoreAudioOutput::DeviceName(int index,
std::string* name,
std::string* guid) {
RTC_DLOG(INFO) << __FUNCTION__ << ": " << index;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(name);
return CoreAudioBase::DeviceName(index, name, guid);
}
void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
audio_device_buffer_ = audio_buffer;
}
bool CoreAudioOutput::PlayoutIsInitialized() const {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return initialized_;
}
int CoreAudioOutput::InitPlayout() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(!initialized_);
RTC_DCHECK(!Playing());
RTC_DCHECK(!audio_client_.Get());
RTC_DCHECK(!audio_render_client_.Get());
// Create an IAudioClient client and store the valid interface pointer in
// |audio_client_|. The base class will use optimal output parameters and do
// an event driven shared mode initialization. The utilized format will be
// stored in |format_| and can be used for configuration and allocation of
// audio buffers.
if (!CoreAudioBase::Init()) {
return -1;
}
RTC_DCHECK(audio_client_.Get());
// Configure the playout side of the audio device buffer using |format_|
// after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format;
RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
audio_device_buffer_->SetPlayoutSampleRate(format->nSamplesPerSec);
audio_device_buffer_->SetPlayoutChannels(format->nChannels);
// Create a modified audio buffer class which allows us to ask for any number
// of samples (and not only multiple of 10ms) to match the optimal
// buffer size per callback used by Core Audio.
// TODO(henrika): can we use a shared buffer instead?
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
// Create an IAudioRenderClient for an initialized IAudioClient.
// The IAudioRenderClient interface enables us to write output data to
// a rendering endpoint buffer.
ComPtr<IAudioRenderClient> audio_render_client =
core_audio_utility::CreateRenderClient(audio_client_.Get());
if (!audio_render_client.Get())
return -1;
ComPtr<IAudioClock> audio_clock =
core_audio_utility::CreateAudioClock(audio_client_.Get());
if (!audio_clock.Get())
return -1;
// Store valid COM interfaces. Note that, |audio_client_| has already been
// set in CoreAudioBase::Init().
audio_render_client_ = audio_render_client;
audio_clock_ = audio_clock;
initialized_ = true;
return 0;
}
int CoreAudioOutput::StartPlayout() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(!Playing());
if (!initialized_) {
RTC_DLOG(LS_WARNING)
<< "Playout can not start since InitPlayout must succeed first";
return 0;
}
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetPlayout();
}
if (!core_audio_utility::FillRenderEndpointBufferWithSilence(
audio_client_.Get(), audio_render_client_.Get())) {
RTC_LOG(LS_WARNING) << "Failed to prepare output endpoint with silence";
}
num_frames_written_ = endpoint_buffer_size_frames_;
if (!Start()) {
return -1;
}
return 0;
}
int CoreAudioOutput::StopPlayout() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!initialized_) {
return 0;
}
// Release resources allocated in InitPlayout() and then return if this
// method is called without any active output audio.
if (!Playing()) {
RTC_DLOG(WARNING) << "No output stream is active";
audio_client_.Reset();
audio_render_client_.Reset();
initialized_ = false;
return 0;
}
if (!Stop()) {
RTC_LOG(LS_ERROR) << "StopPlayout failed";
return -1;
}
thread_checker_audio_.DetachFromThread();
initialized_ = false;
return 0;
}
bool CoreAudioOutput::Playing() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return audio_thread_ != nullptr;
}
// TODO(henrika): finalize support of audio session volume control. As is, we
// are not compatible with the old ADM implementation since it allows accessing
// the volume control with any active audio output stream.
int CoreAudioOutput::VolumeIsAvailable(bool* available) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DCHECK_RUN_ON(&thread_checker_);
return IsVolumeControlAvailable(available) ? 0 : -1;
}
bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) {
RTC_DCHECK_RUN_ON(&thread_checker_audio_);
// Get the padding value which indicates the amount of valid unread data that
// the endpoint buffer currently contains.
UINT32 num_unread_frames = 0;
_com_error error = audio_client_->GetCurrentPadding(&num_unread_frames);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
// Contains how much new data we can write to the buffer without the risk of
// overwriting previously written data that the audio engine has not yet read
// from the buffer. I.e., it is the maximum buffer size we can request when
// calling IAudioRenderClient::GetBuffer().
UINT32 num_requested_frames =
endpoint_buffer_size_frames_ - num_unread_frames;
// Request all available space in the rendering endpoint buffer into which the
// client can later write an audio packet.
uint8_t* audio_data;
error = audio_render_client_->GetBuffer(num_requested_frames, &audio_data);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
// TODO(henrika): only update the latency estimate N times per second to
// save resources.
// TODO(henrika): note that FineAudioBuffer adds latency as well.
int playout_delay_ms = EstimateOutputLatencyMillis(device_frequency);
// RTC_DLOG(INFO) << "playout_delay_ms: " << playout_delay_ms;
// Get audio data from WebRTC and write it to the allocated buffer in
// |audio_data|.
fine_audio_buffer_->GetPlayoutData(
rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data),
num_requested_frames * format_.Format.nChannels),
playout_delay_ms);
// Release the buffer space acquired in IAudioRenderClient::GetBuffer.
error = audio_render_client_->ReleaseBuffer(num_requested_frames, 0);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
<< core_audio_utility::ErrorToString(error);
return false;
}
num_frames_written_ += num_requested_frames;
return true;
}
// TODO(henrika): IAudioClock2::GetDevicePosition could perhaps be used here
// instead. Tried it once, but it crashed for capture devices.
int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) {
UINT64 position = 0;
UINT64 qpc_position = 0;
int delay_ms = 0;
// Get the device position through output parameter |position|. This is the
// stream position of the sample that is currently playing through the
// speakers.
_com_error error = audio_clock_->GetPosition(&position, &qpc_position);
if (error.Error() == S_OK) {
// Number of frames already played out through the speaker.
const uint64_t num_played_out_frames =
format_.Format.nSamplesPerSec * position / device_frequency;
// Number of frames that have been written to the buffer but not yet
// played out corresponding to the estimated latency measured in number
// of audio frames.
const uint64_t delay_frames = num_frames_written_ - num_played_out_frames;
// Convert latency in number of frames into milliseconds.
webrtc::TimeDelta delay = webrtc::TimeDelta::us(
delay_frames * kNumMicrosecsPerSec / format_.Format.nSamplesPerSec);
delay_ms = delay.ms();
}
return delay_ms;
}
} // namespace webrtc_win
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
#include <memory>
#include <string>
#include "modules/audio_device/win/audio_device_module_win.h"
#include "modules/audio_device/win/core_audio_base_win.h"
namespace webrtc {
class AudioDeviceBuffer;
class FineAudioBuffer;
namespace webrtc_win {
// Windows specific AudioOutput implementation using a CoreAudioBase class where
// an output direction is set at construction. Supports render device handling
// and streaming of decoded audio from a WebRTC client to the native audio
// layer.
class CoreAudioOutput final : public CoreAudioBase, public AudioOutput {
public:
CoreAudioOutput();
~CoreAudioOutput() override;
// AudioOutput implementation.
int Init() override;
int Terminate() override;
int NumDevices() const override;
int SetDevice(int index) override;
int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
int DeviceName(int index, std::string* name, std::string* guid) override;
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
bool PlayoutIsInitialized() const override;
int InitPlayout() override;
int StartPlayout() override;
int StopPlayout() override;
bool Playing() override;
int VolumeIsAvailable(bool* available) override;
CoreAudioOutput(const CoreAudioOutput&) = delete;
CoreAudioOutput& operator=(const CoreAudioOutput&) = delete;
private:
bool OnDataCallback(uint64_t device_frequency);
int EstimateOutputLatencyMillis(uint64_t device_frequency);
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
Microsoft::WRL::ComPtr<IAudioRenderClient> audio_render_client_;
uint64_t num_frames_written_ = 0;
};
} // namespace webrtc_win
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_

View File

@ -11,6 +11,7 @@
#include "modules/audio_device/win/core_audio_utility_win.h"
#include <Functiondiscoverykeys_devpkey.h>
#include <atlbase.h>
#include <stdio.h>
#include <tchar.h>
@ -24,6 +25,7 @@
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/stringutils.h"
using ATL::CComHeapPtr;
using Microsoft::WRL::ComPtr;
using webrtc::AudioDeviceName;
using webrtc::AudioParameters;
@ -44,13 +46,21 @@ bool LoadAudiosesDll() {
nullptr);
}
bool LoadAvrtDll() {
static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll";
wchar_t path[MAX_PATH] = {0};
ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path));
RTC_DLOG(INFO) << rtc::ToUtf8(path);
return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
nullptr);
}
ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
bool allow_reinitialize) {
ComPtr<IMMDeviceEnumerator> device_enumerator;
_com_error error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
CLSCTX_INPROC_SERVER,
_com_error error =
::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
IID_PPV_ARGS(&device_enumerator));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error);
}
@ -64,8 +74,7 @@ ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
error = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
if (error.Error() != S_OK) {
error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&device_enumerator));
CLSCTX_ALL, IID_PPV_ARGS(&device_enumerator));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "CoCreateInstance failed: "
<< ErrorToString(error);
@ -150,7 +159,7 @@ ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
std::string GetDeviceIdInternal(IMMDevice* device) {
// Retrieve unique name of endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
ScopedCoMem<WCHAR> device_id;
CComHeapPtr<WCHAR> device_id;
if (SUCCEEDED(device->GetId(&device_id))) {
return rtc::ToUtf8(device_id, wcslen(device_id));
} else {
@ -181,6 +190,45 @@ std::string GetDeviceFriendlyNameInternal(IMMDevice* device) {
}
}
ComPtr<IAudioSessionManager2> CreateSessionManager2Internal(
IMMDevice* audio_device) {
if (!audio_device)
return ComPtr<IAudioSessionManager2>();
ComPtr<IAudioSessionManager2> audio_session_manager;
_com_error error =
audio_device->Activate(__uuidof(IAudioSessionManager2), CLSCTX_ALL,
nullptr, &audio_session_manager);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioSessionManager2) failed: "
<< ErrorToString(error);
}
return audio_session_manager;
}
ComPtr<IAudioSessionEnumerator> CreateSessionEnumeratorInternal(
IMMDevice* audio_device) {
if (!audio_device) {
return ComPtr<IAudioSessionEnumerator>();
}
ComPtr<IAudioSessionEnumerator> audio_session_enumerator;
ComPtr<IAudioSessionManager2> audio_session_manager =
CreateSessionManager2Internal(audio_device);
if (!audio_session_manager.Get()) {
return audio_session_enumerator;
}
_com_error error =
audio_session_manager->GetSessionEnumerator(&audio_session_enumerator);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR)
<< "IAudioSessionEnumerator::IAudioSessionEnumerator failed: "
<< ErrorToString(error);
return ComPtr<IAudioSessionEnumerator>();
}
return audio_session_enumerator;
}
// Creates and activates an IAudioClient COM object given the selected
// endpoint device.
ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
@ -188,8 +236,8 @@ ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
return ComPtr<IAudioClient>();
ComPtr<IAudioClient> audio_client;
_com_error error = audio_device->Activate(
__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, nullptr, &audio_client);
_com_error error = audio_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
nullptr, &audio_client);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: "
<< ErrorToString(error);
@ -202,8 +250,8 @@ ComPtr<IAudioClient2> CreateClient2Internal(IMMDevice* audio_device) {
return ComPtr<IAudioClient2>();
ComPtr<IAudioClient2> audio_client;
_com_error error = audio_device->Activate(
__uuidof(IAudioClient2), CLSCTX_INPROC_SERVER, nullptr, &audio_client);
_com_error error = audio_device->Activate(__uuidof(IAudioClient2), CLSCTX_ALL,
nullptr, &audio_client);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: "
<< ErrorToString(error);
@ -346,10 +394,16 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
namespace core_audio_utility {
bool IsSupported() {
RTC_DLOG(INFO) << "IsSupported";
static bool g_is_supported = IsSupportedInternal();
return g_is_supported;
}
bool IsMMCSSSupported() {
RTC_DLOG(INFO) << "IsMMCSSSupported";
return LoadAvrtDll();
}
int NumberOfActiveDevices(EDataFlow data_flow) {
// Generate a collection of active audio endpoint devices for the specified
// data-flow direction.
@ -468,6 +522,67 @@ bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) {
return GetDeviceNamesInternal(eRender, device_names);
}
ComPtr<IAudioSessionManager2> CreateSessionManager2(IMMDevice* device) {
RTC_DLOG(INFO) << "CreateSessionManager2";
return CreateSessionManager2Internal(device);
}
Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
IMMDevice* device) {
RTC_DLOG(INFO) << "CreateSessionEnumerator";
return CreateSessionEnumeratorInternal(device);
}
int NumberOfActiveSessions(IMMDevice* device) {
RTC_DLOG(INFO) << "NumberOfActiveSessions";
ComPtr<IAudioSessionEnumerator> session_enumerator =
CreateSessionEnumerator(device);
// Iterate over all audio sessions for the given device.
int session_count = 0;
_com_error error = session_enumerator->GetCount(&session_count);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetCount failed: "
<< ErrorToString(error);
return 0;
}
RTC_DLOG(INFO) << "Total number of audio sessions: " << session_count;
int num_active = 0;
for (int session = 0; session < session_count; session++) {
// Acquire the session control interface.
ComPtr<IAudioSessionControl> session_control;
error = session_enumerator->GetSession(session, &session_control);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetSession failed: "
<< ErrorToString(error);
return 0;
}
// Log the display name of the audio session for debugging purposes.
CComHeapPtr<WCHAR> display_name;
if (SUCCEEDED(session_control->GetDisplayName(&display_name))) {
RTC_DLOG(INFO) << "display name: "
<< rtc::ToUtf8(display_name, wcslen(display_name));
}
// Get the current state and check if the state is active or not.
AudioSessionState state;
error = session_control->GetState(&state);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
<< ErrorToString(error);
return 0;
}
if (state == AudioSessionStateActive) {
++num_active;
}
}
RTC_DLOG(INFO) << "Number of active audio sessions: " << num_active;
return num_active;
}
ComPtr<IAudioClient> CreateClient(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
@ -624,11 +739,12 @@ HRESULT SharedModeInitialize(IAudioClient* client,
uint32_t* endpoint_buffer_size) {
RTC_DLOG(INFO) << "SharedModeInitialize";
RTC_DCHECK(client);
// Use default flags (i.e, don't set AUDCLNT_STREAMFLAGS_NOPERSIST) to
// ensure that the volume level and muting state for a rendering session
// are persistent across system restarts. The volume level and muting
// state for a capture session are never persistent.
DWORD stream_flags = 0;
// The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume
// and mute settings for a session that contains rendering streams.
// By default, the volume level and muting state for a rendering session are
// persistent across system restarts. The volume level and muting state for a
// capture session are never persistent.
DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
// Enable event-driven streaming if a valid event handle is provided.
// After the stream starts, the audio engine will signal the event handle
@ -739,6 +855,22 @@ ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client) {
return audio_clock;
}
ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(IAudioClient* client) {
RTC_DLOG(INFO) << "CreateSimpleAudioVolume";
RTC_DCHECK(client);
// Get access to the ISimpleAudioVolume interface. This interface enables a
// client to control the master volume level of an audio session.
ComPtr<ISimpleAudioVolume> simple_audio_volume;
_com_error error = client->GetService(IID_PPV_ARGS(&simple_audio_volume));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR)
<< "IAudioClient::GetService(IID_ISimpleAudioVolume) failed: "
<< ErrorToString(error);
return ComPtr<ISimpleAudioVolume>();
}
return simple_audio_volume;
}
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client) {
RTC_DLOG(INFO) << "FillRenderEndpointBufferWithSilence";

View File

@ -12,6 +12,7 @@
#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
#include <Audioclient.h>
#include <Audiopolicy.h>
#include <Mmdeviceapi.h>
#include <avrt.h>
#include <comdef.h>
@ -26,6 +27,8 @@
#include "modules/audio_device/include/audio_device_defines.h"
#include "rtc_base/logging.h"
#pragma comment(lib, "Avrt.lib")
namespace webrtc {
namespace webrtc_win {
@ -78,6 +81,10 @@ class ScopedMMCSSRegistration {
class ScopedCOMInitializer {
public:
// Enum value provided to initialize the thread as an MTA instead of STA.
// There are two types of apartments, Single Threaded Apartments (STAs)
// and Multi Threaded Apartments (MTAs). Within a given process there can
// be multiple STA’s but there is only one MTA. STA is typically used by
// "GUI applications" and MTA by "worker threads" with no UI message loop.
enum SelectMTA { kMTA };
// Constructor for STA initialization.
@ -107,9 +114,23 @@ class ScopedCOMInitializer {
void Initialize(COINIT init) {
// Initializes the COM library for use by the calling thread, sets the
// thread's concurrency model, and creates a new apartment for the thread
// if one is required.
// if one is required. CoInitializeEx must be called at least once, and is
// usually called only once, for each thread that uses the COM library.
hr_ = CoInitializeEx(NULL, init);
RTC_CHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
RTC_CHECK_NE(RPC_E_CHANGED_MODE, hr_)
<< "Invalid COM thread model change (MTA->STA)";
// Multiple calls to CoInitializeEx by the same thread are allowed as long
// as they pass the same concurrency flag, but subsequent valid calls
// return S_FALSE. To close the COM library gracefully on a thread, each
// successful call to CoInitializeEx, including any call that returns
// S_FALSE, must be balanced by a corresponding call to CoUninitialize.
if (hr_ == S_OK) {
RTC_DLOG(INFO)
<< "The COM library was initialized successfully on this thread";
} else if (hr_ == S_FALSE) {
RTC_DLOG(WARNING)
<< "The COM library is already initialized on this thread";
}
}
HRESULT hr_;
};
@ -263,6 +284,12 @@ namespace core_audio_utility {
// other methods in this class.
bool IsSupported();
// Returns true if Multimedia Class Scheduler service (MMCSS) is supported.
// The MMCSS enables multimedia applications to ensure that their time-sensitive
// processing receives prioritized access to CPU resources without denying CPU
// resources to lower-priority applications.
bool IsMMCSSSupported();
// The MMDevice API lets clients discover the audio endpoint devices in the
// system and determine which devices are suitable for the application to use.
// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
@ -320,7 +347,22 @@ bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
// interfaces.
// Create an IAudioClient instance for a specific device or the default
// Creates an IAudioSessionManager2 interface for the specified |device|.
// This interface provides access to e.g. the IAudioSessionEnumerator
Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2(
IMMDevice* device);
// Creates an IAudioSessionEnumerator interface for the specified |device|.
// The client can use the interface to enumerate audio sessions on the audio
// device
Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
IMMDevice* device);
// Number of active audio sessions for the given |device|. Expired or inactive
// sessions are not included.
int NumberOfActiveSessions(IMMDevice* device);
// Creates an IAudioClient instance for a specific device or the default
// device specified by data-flow direction and role.
Microsoft::WRL::ComPtr<IAudioClient> CreateClient(const std::string& device_id,
EDataFlow data_flow,
@ -384,7 +426,6 @@ HRESULT GetPreferredAudioParameters(IAudioClient* client,
// TODO(henrika):
// - use IAudioClient2::SetClientProperties before calling this method
// - IAudioClient::Initialize(your_format, AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM
// |
// AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY)
HRESULT SharedModeInitialize(IAudioClient* client,
const WAVEFORMATEXTENSIBLE* format,
@ -410,12 +451,17 @@ Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
// data rate and the current position in the stream.
Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by
// |client|. This interface enables a client to control the master volume level
// of an active audio session.
Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(
IAudioClient* client);
// Fills up the endpoint rendering buffer with silence for an existing
// IAudioClient given by |client| and a corresponding IAudioRenderClient
// given by |render_client|.
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client);
// Transforms a WAVEFORMATEXTENSIBLE struct to a human-readable string.
std::string WaveFormatExToString(const WAVEFORMATEXTENSIBLE* format);

View File

@ -11,8 +11,11 @@
#include "modules/audio_device/win/core_audio_utility_win.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/logging.h"
#include "rtc_base/win/windows_version.h"
#include "test/gtest.h"
#include "system_wrappers/include/sleep.h"
using Microsoft::WRL::ComPtr;
using webrtc::AudioDeviceName;
@ -53,7 +56,8 @@ bool ShouldAbortTest(bool requirements_satisfied,
// CoreAudioUtilityWinTest test fixture.
class CoreAudioUtilityWinTest : public ::testing::Test {
protected:
CoreAudioUtilityWinTest() {
CoreAudioUtilityWinTest()
: com_init_(webrtc_win::ScopedCOMInitializer::kMTA) {
// We must initialize the COM library on a thread before we calling any of
// the library functions. All COM functions will return CO_E_NOTINITIALIZED
// otherwise.
@ -242,31 +246,107 @@ TEST_F(CoreAudioUtilityWinTest, GetOutputDeviceNames) {
2 + core_audio_utility::NumberOfActiveDevices(eRender));
}
TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) {
ABORT_TEST_IF_NOT(DevicesAvailable() &&
rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioSessionManager2 interface for a default audio
// endpoint device specified by two different data flows and the |eConsole|
// role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
EXPECT_TRUE(device.Get());
ComPtr<IAudioSessionManager2> session_manager =
core_audio_utility::CreateSessionManager2(device.Get());
EXPECT_TRUE(session_manager.Get());
}
}
TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) {
ABORT_TEST_IF_NOT(DevicesAvailable() &&
rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioSessionEnumerator interface for a default
// audio endpoint device specified by two different data flows and the
// |eConsole| role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
EXPECT_TRUE(device.Get());
ComPtr<IAudioSessionEnumerator> session_enumerator =
core_audio_utility::CreateSessionEnumerator(device.Get());
EXPECT_TRUE(session_enumerator.Get());
// Perform a sanity test of the interface by asking for the total number
// of audio sessions that are open on the audio device. Note that, we do
// not check if the session is active or not.
int session_count = 0;
EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&session_count)));
EXPECT_GE(session_count, 0);
}
}
TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) {
ABORT_TEST_IF_NOT(DevicesAvailable() &&
rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
EDataFlow data_flow[] = {eRender, eCapture};
// Count number of active audio session for a default audio endpoint device
// specified by two different data flows and the |eConsole| role.
// Ensure that the number of active audio sessions is less than or equal to
// the total number of audio sessions on that same device.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
// Create an audio endpoint device.
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
EXPECT_TRUE(device.Get());
// Ask for total number of audio sessions on the created device.
ComPtr<IAudioSessionEnumerator> session_enumerator =
core_audio_utility::CreateSessionEnumerator(device.Get());
EXPECT_TRUE(session_enumerator.Get());
int total_session_count = 0;
EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&total_session_count)));
EXPECT_GE(total_session_count, 0);
// Use NumberOfActiveSessions and get number of active audio sessions.
int active_session_count =
core_audio_utility::NumberOfActiveSessions(device.Get());
EXPECT_LE(active_session_count, total_session_count);
}
}
TEST_F(CoreAudioUtilityWinTest, CreateClient) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role.
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, data[i], eConsole);
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
}
}
// TODO(henrik): enable when support for Windows version querying is added.
TEST_F(CoreAudioUtilityWinTest, DISABLED_CreateClient2) {
ABORT_TEST_IF_NOT(DevicesAvailable());
TEST_F(CoreAudioUtilityWinTest, CreateClient2) {
ABORT_TEST_IF_NOT(DevicesAvailable() &&
rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN10);
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient2 interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role.
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient2> client = core_audio_utility::CreateClient2(
AudioDeviceName::kDefaultDeviceId, data[i], eConsole);
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
}
}
@ -325,16 +405,16 @@ TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) {
TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
// Verify that the device periods are valid for the default render and
// capture devices.
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client;
REFERENCE_TIME shared_time_period = 0;
REFERENCE_TIME exclusive_time_period = 0;
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
@ -349,25 +429,25 @@ TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
// Verify that the preferred audio parameters are OK for the default render
// and capture devices.
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
webrtc::AudioParameters params;
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultDeviceId, data[i] == eRender, &params)));
AudioDeviceName::kDefaultDeviceId, data_flow[i] == eRender, &params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}
// Verify that the preferred audio parameters are OK for the default
// communication devices.
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
webrtc::AudioParameters params;
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultCommunicationsDeviceId, data[i] == eRender,
&params)));
AudioDeviceName::kDefaultCommunicationsDeviceId,
data_flow[i] == eRender, &params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}
@ -443,23 +523,23 @@ TEST_F(CoreAudioUtilityWinTest, SharedModeInitialize) {
TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client;
ComPtr<IAudioRenderClient> render_client;
ComPtr<IAudioCaptureClient> capture_client;
// Create a default client for the given data-flow direction.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
if (data[i] == eRender) {
if (data_flow[i] == eRender) {
// It is not possible to create a render client using an unitialized
// client interface.
render_client = core_audio_utility::CreateRenderClient(client.Get());
@ -471,7 +551,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
render_client = core_audio_utility::CreateRenderClient(client.Get());
EXPECT_TRUE(render_client.Get());
EXPECT_GT(endpoint_buffer_size, 0u);
} else if (data[i] == eCapture) {
} else if (data_flow[i] == eCapture) {
// It is not possible to create a capture client using an unitialized
// client interface.
capture_client = core_audio_utility::CreateCaptureClient(client.Get());
@ -490,18 +570,18 @@ TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
EDataFlow data_flow[] = {eRender, eCapture};
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
for (size_t i = 0; i < arraysize(data); ++i) {
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client;
ComPtr<IAudioClock> audio_clock;
// Create a default client for the given data-flow direction.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
@ -525,6 +605,55 @@ TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) {
}
}
TEST_F(CoreAudioUtilityWinTest, CreateSimpleAudioVolume) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data_flow[] = {eRender, eCapture};
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client;
ComPtr<ISimpleAudioVolume> simple_audio_volume;
// Create a default client for the given data-flow direction.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data_flow[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
// It is not possible to create an audio volume using an uninitialized
// client interface.
simple_audio_volume =
core_audio_utility::CreateSimpleAudioVolume(client.Get());
EXPECT_FALSE(simple_audio_volume.Get());
// Do a proper initialization and verify that it works this time.
core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
simple_audio_volume =
core_audio_utility::CreateSimpleAudioVolume(client.Get());
EXPECT_TRUE(simple_audio_volume.Get());
EXPECT_GT(endpoint_buffer_size, 0u);
// Use the audio volume interface and validate that it works. The volume
// level should be value in the range 0.0 to 1.0 at first call.
float volume = 0.0;
EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
EXPECT_GE(volume, 0.0);
EXPECT_LE(volume, 1.0);
// Next, set a new volume and verify that the setter does its job.
const float target_volume = 0.5;
EXPECT_TRUE(SUCCEEDED(
simple_audio_volume->SetMasterVolume(target_volume, nullptr)));
EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
EXPECT_EQ(volume, target_volume);
}
}
TEST_F(CoreAudioUtilityWinTest, FillRenderEndpointBufferWithSilence) {
ABORT_TEST_IF_NOT(DevicesAvailable());

View File

@ -31,8 +31,12 @@ namespace {
// implementations.
//
// An instance can be created on any thread, but must then be used on one and
// the same thread. All public methods must also be called on the same thread. A
// thread checker will RTC_DCHECK if any method is called on an invalid thread.
// the same thread. All public methods must also be called on the same thread.
// A thread checker will RTC_DCHECK if any method is called on an invalid
// thread.
// TODO(henrika): it might be useful to also support a scenario where the ADM
// is constructed on thread T1, used on thread T2 and destructed on T2 or T3.
// If so, care must be taken to ensure that only T2 is a COM thread.
class AndroidAudioDeviceModule : public AudioDeviceModule {
public:
// For use with UMA logging. Must be kept in sync with histograms.xml in