Use backticks not vertical bars to denote variables in comments for /modules/audio_device

Bug: webrtc:12338
Change-Id: I27ad3a5fe6e765379e4e4f42783558c5522bab38
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227091
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34620}
This commit is contained in:
Artem Titov
2021-07-28 20:03:05 +02:00
committed by WebRTC LUCI CQ
parent 53adc7b1c8
commit 0146a34b3f
35 changed files with 181 additions and 181 deletions

View File

@ -184,7 +184,7 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
} }
// Read audio data from the WebRTC source using the FineAudioBuffer object // Read audio data from the WebRTC source using the FineAudioBuffer object
// and write that data into |audio_data| to be played out by AAudio. // and write that data into `audio_data` to be played out by AAudio.
// Prime output with zeros during a short initial phase to avoid distortion. // Prime output with zeros during a short initial phase to avoid distortion.
// TODO(henrika): do more work to figure out of if the initial forced silence // TODO(henrika): do more work to figure out of if the initial forced silence
// period is really needed. // period is really needed.

View File

@ -76,8 +76,8 @@ class AAudioPlayer final : public AAudioObserverInterface,
protected: protected:
// AAudioObserverInterface implementation. // AAudioObserverInterface implementation.
// For an output stream, this function should render and write |num_frames| // For an output stream, this function should render and write `num_frames`
// of data in the streams current data format to the |audio_data| buffer. // of data in the streams current data format to the `audio_data` buffer.
// Called on a real-time thread owned by AAudio. // Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data, aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override; int32_t num_frames) override;

View File

@ -146,7 +146,7 @@ void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
} }
} }
// Read and process |num_frames| of data from the |audio_data| buffer. // Read and process `num_frames` of data from the `audio_data` buffer.
// TODO(henrika): possibly add trace here to be included in systrace. // TODO(henrika): possibly add trace here to be included in systrace.
// See https://developer.android.com/studio/profile/systrace-commandline.html. // See https://developer.android.com/studio/profile/systrace-commandline.html.
aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
@ -180,7 +180,7 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
RTC_DLOG(INFO) << "input latency: " << latency_millis_ RTC_DLOG(INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames; << ", num_frames: " << num_frames;
} }
// Copy recorded audio in |audio_data| to the WebRTC sink using the // Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object. // FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData( fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),

View File

@ -69,8 +69,8 @@ class AAudioRecorder : public AAudioObserverInterface,
protected: protected:
// AAudioObserverInterface implementation. // AAudioObserverInterface implementation.
// For an input stream, this function should read |num_frames| of recorded // For an input stream, this function should read `num_frames` of recorded
// data, in the stream's current data format, from the |audio_data| buffer. // data, in the stream's current data format, from the `audio_data` buffer.
// Called on a real-time thread owned by AAudio. // Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data, aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override; int32_t num_frames) override;

View File

@ -68,7 +68,7 @@ static const int kFilePlayTimeInSec = 5;
static const size_t kBitsPerSample = 16; static const size_t kBitsPerSample = 16;
static const size_t kBytesPerSample = kBitsPerSample / 8; static const size_t kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds). // Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored. // Note that first `kNumIgnoreFirstCallbacks` are ignored.
static const int kFullDuplexTimeInSec = 5; static const int kFullDuplexTimeInSec = 5;
// Wait for the callback sequence to stabilize by ignoring this amount of the // Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access). // initial callbacks (avoids initial FIFO access).
@ -127,7 +127,7 @@ class FileAudioStream : public AudioStreamInterface {
void Write(const void* source, size_t num_frames) override {} void Write(const void* source, size_t num_frames) override {}
// Read samples from file stored in memory (at construction) and copy // Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer. // `num_frames` (<=> 10ms) to the `destination` byte buffer.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]), memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
num_frames * sizeof(int16_t)); num_frames * sizeof(int16_t));
@ -171,7 +171,7 @@ class FifoAudioStream : public AudioStreamInterface {
~FifoAudioStream() { Flush(); } ~FifoAudioStream() { Flush(); }
// Allocate new memory, copy |num_frames| samples from |source| into memory // Allocate new memory, copy `num_frames` samples from `source` into memory
// and add pointer to the memory location to end of the list. // and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element. // Increases the size of the FIFO by one element.
void Write(const void* source, size_t num_frames) override { void Write(const void* source, size_t num_frames) override {
@ -192,8 +192,8 @@ class FifoAudioStream : public AudioStreamInterface {
total_written_elements_ += size; total_written_elements_ += size;
} }
// Read pointer to data buffer from front of list, copy |num_frames| of stored // Read pointer to data buffer from front of list, copy `num_frames` of stored
// data into |destination| and delete the utilized memory allocation. // data into `destination` and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element. // Decreases the size of the FIFO by one element.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
@ -251,7 +251,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
rec_count_(0), rec_count_(0),
pulse_time_(0) {} pulse_time_(0) {}
// Insert periodic impulses in first two samples of |destination|. // Insert periodic impulses in first two samples of `destination`.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) { if (play_count_ == 0) {
@ -272,14 +272,14 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
} }
} }
// Detect received impulses in |source|, derive time between transmission and // Detect received impulses in `source`, derive time between transmission and
// detection and add the calculated delay to list of latencies. // detection and add the calculated delay to list of latencies.
void Write(const void* source, size_t num_frames) override { void Write(const void* source, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++; rec_count_++;
if (pulse_time_ == 0) { if (pulse_time_ == 0) {
// Avoid detection of new impulse response until a new impulse has // Avoid detection of new impulse response until a new impulse has
// been transmitted (sets |pulse_time_| to value larger than zero). // been transmitted (sets `pulse_time_` to value larger than zero).
return; return;
} }
const int16_t* ptr16 = static_cast<const int16_t*>(source); const int16_t* ptr16 = static_cast<const int16_t*>(source);
@ -298,7 +298,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
// Total latency is the difference between transmit time and detection // Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the // tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received // received impulse. It is transmitted at sample 0 but can be received
// at sample N where N > 0. The term |extra_delay| accounts for N and it // at sample N where N > 0. The term `extra_delay` accounts for N and it
// is a value between 0 and 10ms. // is a value between 0 and 10ms.
latencies_.push_back(now_time - pulse_time_ + extra_delay); latencies_.push_back(now_time - pulse_time_ + extra_delay);
pulse_time_ = 0; pulse_time_ = 0;

View File

@ -98,7 +98,7 @@ void AudioManager::SetActiveAudioLayer(
// The delay estimate can take one of two fixed values depending on if the // The delay estimate can take one of two fixed values depending on if the
// device supports low-latency output or not. However, it is also possible // device supports low-latency output or not. However, it is also possible
// that the user explicitly selects the high-latency audio path, hence we use // that the user explicitly selects the high-latency audio path, hence we use
// the selected |audio_layer| here to set the delay estimate. // the selected `audio_layer` here to set the delay estimate.
delay_estimate_in_milliseconds_ = delay_estimate_in_milliseconds_ =
(audio_layer == AudioDeviceModule::kAndroidJavaAudio) (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
? kHighLatencyModeDelayEstimateInMilliseconds ? kHighLatencyModeDelayEstimateInMilliseconds

View File

@ -270,8 +270,8 @@ void AudioRecordJni::OnDataIsRecorded(int length) {
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_, audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_); frames_per_buffer_);
// We provide one (combined) fixed delay estimate for the APM and use the // We provide one (combined) fixed delay estimate for the APM and use the
// |playDelayMs| parameter only. Components like the AEC only sees the sum // `playDelayMs` parameter only. Components like the AEC only sees the sum
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter. // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0); audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
if (audio_device_buffer_->DeliverRecordedData() == -1) { if (audio_device_buffer_->DeliverRecordedData() == -1) {
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";

View File

@ -87,8 +87,8 @@ class AudioRecordJni {
private: private:
// Called from Java side so we can cache the address of the Java-manged // Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|. // is also stored in `direct_buffer_capacity_in_bytes_`.
// This method will be called by the WebRtcAudioRecord constructor, i.e., // This method will be called by the WebRtcAudioRecord constructor, i.e.,
// on the same thread that this object is created on. // on the same thread that this object is created on.
static void JNICALL CacheDirectBufferAddress(JNIEnv* env, static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
@ -98,8 +98,8 @@ class AudioRecordJni {
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer); void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
// Called periodically by the Java based WebRtcAudioRecord object when // Called periodically by the Java based WebRtcAudioRecord object when
// recording has started. Each call indicates that there are |length| new // recording has started. Each call indicates that there are `length` new
// bytes recorded in the memory area |direct_buffer_address_| and it is // bytes recorded in the memory area `direct_buffer_address_` and it is
// now time to send these to the consumer. // now time to send these to the consumer.
// This method is called on a high-priority thread from Java. The name of // This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioRecordThread'. // the thread is 'AudioRecordThread'.
@ -142,10 +142,10 @@ class AudioRecordJni {
// possible values. See audio_common.h for details. // possible values. See audio_common.h for details.
int total_delay_in_milliseconds_; int total_delay_in_milliseconds_;
// Cached copy of address to direct audio buffer owned by |j_audio_record_|. // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
void* direct_buffer_address_; void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_record_|. // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
size_t direct_buffer_capacity_in_bytes_; size_t direct_buffer_capacity_in_bytes_;
// Number audio frames per audio buffer. Each audio frame corresponds to // Number audio frames per audio buffer. Each audio frame corresponds to

View File

@ -88,8 +88,8 @@ class AudioTrackJni {
private: private:
// Called from Java side so we can cache the address of the Java-manged // Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|. // is also stored in `direct_buffer_capacity_in_bytes_`.
// Called on the same thread as the creating thread. // Called on the same thread as the creating thread.
static void JNICALL CacheDirectBufferAddress(JNIEnv* env, static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
jobject obj, jobject obj,
@ -98,8 +98,8 @@ class AudioTrackJni {
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer); void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
// Called periodically by the Java based WebRtcAudioTrack object when // Called periodically by the Java based WebRtcAudioTrack object when
// playout has started. Each call indicates that |length| new bytes should // playout has started. Each call indicates that `length` new bytes should
// be written to the memory area |direct_buffer_address_| for playout. // be written to the memory area `direct_buffer_address_` for playout.
// This method is called on a high-priority thread from Java. The name of // This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioTrackThread'. // the thread is 'AudioTrackThread'.
static void JNICALL GetPlayoutData(JNIEnv* env, static void JNICALL GetPlayoutData(JNIEnv* env,
@ -133,10 +133,10 @@ class AudioTrackJni {
// AudioManager. // AudioManager.
const AudioParameters audio_parameters_; const AudioParameters audio_parameters_;
// Cached copy of address to direct audio buffer owned by |j_audio_track_|. // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
void* direct_buffer_address_; void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_track_|. // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
size_t direct_buffer_capacity_in_bytes_; size_t direct_buffer_capacity_in_bytes_;
// Number of audio frames per audio buffer. Each audio frame corresponds to // Number of audio frames per audio buffer. Each audio frame corresponds to

View File

@ -64,7 +64,7 @@ class BuildInfo {
SdkCode GetSdkVersion(); SdkCode GetSdkVersion();
private: private:
// Helper method which calls a static getter method with |name| and returns // Helper method which calls a static getter method with `name` and returns
// a string from Java. // a string from Java.
std::string GetStringFromJava(const char* name); std::string GetStringFromJava(const char* name);

View File

@ -23,7 +23,7 @@ import org.webrtc.Logging;
// This class wraps control of three different platform effects. Supported // This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS). // effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
// Calling enable() will active all effects that are // Calling enable() will active all effects that are
// supported by the device if the corresponding |shouldEnableXXX| member is set. // supported by the device if the corresponding `shouldEnableXXX` member is set.
public class WebRtcAudioEffects { public class WebRtcAudioEffects {
private static final boolean DEBUG = false; private static final boolean DEBUG = false;
@ -162,7 +162,7 @@ public class WebRtcAudioEffects {
} }
// Call this method to enable or disable the platform AEC. It modifies // Call this method to enable or disable the platform AEC. It modifies
// |shouldEnableAec| which is used in enable() where the actual state // `shouldEnableAec` which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and // of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise. // false otherwise.
public boolean setAEC(boolean enable) { public boolean setAEC(boolean enable) {
@ -181,7 +181,7 @@ public class WebRtcAudioEffects {
} }
// Call this method to enable or disable the platform NS. It modifies // Call this method to enable or disable the platform NS. It modifies
// |shouldEnableNs| which is used in enable() where the actual state // `shouldEnableNs` which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and // of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise. // false otherwise.
public boolean setNS(boolean enable) { public boolean setNS(boolean enable) {
@ -269,7 +269,7 @@ public class WebRtcAudioEffects {
} }
} }
// Returns true for effect types in |type| that are of "VoIP" types: // Returns true for effect types in `type` that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS). Note that, an extra check for support is needed // Noise Suppressor (NS). Note that, an extra check for support is needed
// in each comparison since some devices includes effects in the // in each comparison since some devices includes effects in the
@ -306,7 +306,7 @@ public class WebRtcAudioEffects {
} }
// Returns true if an effect of the specified type is available. Functionally // Returns true if an effect of the specified type is available. Functionally
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
// faster as it avoids the expensive OS call to enumerate effects. // faster as it avoids the expensive OS call to enumerate effects.
private static boolean isEffectTypeAvailable(UUID effectType) { private static boolean isEffectTypeAvailable(UUID effectType) {
Descriptor[] effects = getAvailableEffects(); Descriptor[] effects = getAvailableEffects();

View File

@ -366,7 +366,7 @@ public class WebRtcAudioRecord {
return AudioSource.VOICE_COMMUNICATION; return AudioSource.VOICE_COMMUNICATION;
} }
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
// the microphone is muted. // the microphone is muted.
public static void setMicrophoneMute(boolean mute) { public static void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")"); Logging.w(TAG, "setMicrophoneMute(" + mute + ")");

View File

@ -78,7 +78,7 @@ public class WebRtcAudioTrack {
private @Nullable AudioTrack audioTrack; private @Nullable AudioTrack audioTrack;
private @Nullable AudioTrackThread audioThread; private @Nullable AudioTrackThread audioThread;
// Samples to be played are replaced by zeros if |speakerMute| is set to true. // Samples to be played are replaced by zeros if `speakerMute` is set to true.
// Can be used to ensure that the speaker is fully muted. // Can be used to ensure that the speaker is fully muted.
private static volatile boolean speakerMute; private static volatile boolean speakerMute;
private byte[] emptyBytes; private byte[] emptyBytes;
@ -239,9 +239,9 @@ public class WebRtcAudioTrack {
Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes); Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
// For the streaming mode, data must be written to the audio sink in // For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal // chunks of size (given by byteBuffer.capacity()) less than or equal
// to the total buffer size |minBufferSizeInBytes|. But, we have seen // to the total buffer size `minBufferSizeInBytes`. But, we have seen
// reports of "getMinBufferSize(): error querying hardware". Hence, it // reports of "getMinBufferSize(): error querying hardware". Hence, it
// can happen that |minBufferSizeInBytes| contains an invalid value. // can happen that `minBufferSizeInBytes` contains an invalid value.
if (minBufferSizeInBytes < byteBuffer.capacity()) { if (minBufferSizeInBytes < byteBuffer.capacity()) {
reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value."); reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
return -1; return -1;
@ -481,7 +481,7 @@ public class WebRtcAudioTrack {
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
// Sets all samples to be played out to zero if |mute| is true, i.e., // Sets all samples to be played out to zero if `mute` is true, i.e.,
// ensures that the speaker is muted. // ensures that the speaker is muted.
public static void setSpeakerMute(boolean mute) { public static void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")"); Logging.w(TAG, "setSpeakerMute(" + mute + ")");

View File

@ -86,7 +86,7 @@ class OpenSLESPlayer {
// Reads audio data in PCM format using the AudioDeviceBuffer. // Reads audio data in PCM format using the AudioDeviceBuffer.
// Can be called both on the main thread (during Start()) and from the // Can be called both on the main thread (during Start()) and from the
// internal audio thread while output streaming is active. // internal audio thread while output streaming is active.
// If the |silence| flag is set, the audio is filled with zeros instead of // If the `silence` flag is set, the audio is filled with zeros instead of
// asking the WebRTC layer for real audio data. This procedure is also known // asking the WebRTC layer for real audio data. This procedure is also known
// as audio priming. // as audio priming.
void EnqueuePlayoutData(bool silence); void EnqueuePlayoutData(bool silence);
@ -97,7 +97,7 @@ class OpenSLESPlayer {
// Obtaines the SL Engine Interface from the existing global Engine object. // Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types. // The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable. // This method defines the `engine_` member variable.
bool ObtainEngineInterface(); bool ObtainEngineInterface();
// Creates/destroys the output mix object. // Creates/destroys the output mix object.

View File

@ -83,7 +83,7 @@ class OpenSLESRecorder {
private: private:
// Obtaines the SL Engine Interface from the existing global Engine object. // Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types. // The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable. // This method defines the `engine_` member variable.
bool ObtainEngineInterface(); bool ObtainEngineInterface();
// Creates/destroys the audio recorder and the simple-buffer queue object. // Creates/destroys the audio recorder and the simple-buffer queue object.
@ -104,7 +104,7 @@ class OpenSLESRecorder {
// Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
// called both on the main thread (but before recording has started) and from // called both on the main thread (but before recording has started) and from
// the internal audio thread while input streaming is active. It uses // the internal audio thread while input streaming is active. It uses
// |simple_buffer_queue_| but no lock is needed since the initial calls from // `simple_buffer_queue_` but no lock is needed since the initial calls from
// the main thread and the native callback thread are mutually exclusive. // the main thread and the native callback thread are mutually exclusive.
bool EnqueueAudioBuffer(); bool EnqueueAudioBuffer();

View File

@ -160,10 +160,10 @@ void AudioDeviceBuffer::StopRecording() {
// recorded. Measurements (max of absolute level) are taken twice per second, // recorded. Measurements (max of absolute level) are taken twice per second,
// which means that if e.g 10 seconds of audio has been recorded, a total of // which means that if e.g 10 seconds of audio has been recorded, a total of
// 20 level estimates must all be identical to zero to trigger the histogram. // 20 level estimates must all be identical to zero to trigger the histogram.
// |only_silence_recorded_| can only be cleared on the native audio thread // `only_silence_recorded_` can only be cleared on the native audio thread
// that drives audio capture but we know by design that the audio has stopped // that drives audio capture but we know by design that the audio has stopped
// when this method is called, hence there should not be aby conflicts. Also, // when this method is called, hence there should not be aby conflicts. Also,
// the fact that |only_silence_recorded_| can be affected during the complete // the fact that `only_silence_recorded_` can be affected during the complete
// call makes chances of conflicts with potentially one last callback very // call makes chances of conflicts with potentially one last callback very
// small. // small.
const size_t time_since_start = rtc::TimeSince(rec_start_time_); const size_t time_since_start = rtc::TimeSince(rec_start_time_);
@ -245,7 +245,7 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
// Returns the largest absolute value in a signed 16-bit vector. // Returns the largest absolute value in a signed 16-bit vector.
max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size()); max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size());
rec_stat_count_ = 0; rec_stat_count_ = 0;
// Set |only_silence_recorded_| to false as soon as at least one detection // Set `only_silence_recorded_` to false as soon as at least one detection
// of a non-zero audio packet is found. It can only be restored to true // of a non-zero audio packet is found. It can only be restored to true
// again by restarting the call. // again by restarting the call.
if (max_abs > 0) { if (max_abs > 0) {

View File

@ -162,14 +162,14 @@ class FifoAudioStream : public AudioStream {
// channel configuration. No conversion is needed. // channel configuration. No conversion is needed.
std::copy(buffer.begin(), buffer.end(), destination.begin()); std::copy(buffer.begin(), buffer.end(), destination.begin());
} else if (destination.size() == 2 * buffer.size()) { } else if (destination.size() == 2 * buffer.size()) {
// Recorded input signal in |buffer| is in mono. Do channel upmix to // Recorded input signal in `buffer` is in mono. Do channel upmix to
// match stereo output (1 -> 2). // match stereo output (1 -> 2).
for (size_t i = 0; i < buffer.size(); ++i) { for (size_t i = 0; i < buffer.size(); ++i) {
destination[2 * i] = buffer[i]; destination[2 * i] = buffer[i];
destination[2 * i + 1] = buffer[i]; destination[2 * i + 1] = buffer[i];
} }
} else if (buffer.size() == 2 * destination.size()) { } else if (buffer.size() == 2 * destination.size()) {
// Recorded input signal in |buffer| is in stereo. Do channel downmix // Recorded input signal in `buffer` is in stereo. Do channel downmix
// to match mono output (2 -> 1). // to match mono output (2 -> 1).
for (size_t i = 0; i < destination.size(); ++i) { for (size_t i = 0; i < destination.size(); ++i) {
destination[i] = destination[i] =
@ -219,7 +219,7 @@ class LatencyAudioStream : public AudioStream {
write_thread_checker_.Detach(); write_thread_checker_.Detach();
} }
// Insert periodic impulses in first two samples of |destination|. // Insert periodic impulses in first two samples of `destination`.
void Read(rtc::ArrayView<int16_t> destination) override { void Read(rtc::ArrayView<int16_t> destination) override {
RTC_DCHECK_RUN_ON(&read_thread_checker_); RTC_DCHECK_RUN_ON(&read_thread_checker_);
if (read_count_ == 0) { if (read_count_ == 0) {
@ -240,7 +240,7 @@ class LatencyAudioStream : public AudioStream {
} }
} }
// Detect received impulses in |source|, derive time between transmission and // Detect received impulses in `source`, derive time between transmission and
// detection and add the calculated delay to list of latencies. // detection and add the calculated delay to list of latencies.
void Write(rtc::ArrayView<const int16_t> source) override { void Write(rtc::ArrayView<const int16_t> source) override {
RTC_DCHECK_RUN_ON(&write_thread_checker_); RTC_DCHECK_RUN_ON(&write_thread_checker_);
@ -249,7 +249,7 @@ class LatencyAudioStream : public AudioStream {
write_count_++; write_count_++;
if (!pulse_time_) { if (!pulse_time_) {
// Avoid detection of new impulse response until a new impulse has // Avoid detection of new impulse response until a new impulse has
// been transmitted (sets |pulse_time_| to value larger than zero). // been transmitted (sets `pulse_time_` to value larger than zero).
return; return;
} }
// Find index (element position in vector) of the max element. // Find index (element position in vector) of the max element.
@ -267,7 +267,7 @@ class LatencyAudioStream : public AudioStream {
// Total latency is the difference between transmit time and detection // Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the // tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received // received impulse. It is transmitted at sample 0 but can be received
// at sample N where N > 0. The term |extra_delay| accounts for N and it // at sample N where N > 0. The term `extra_delay` accounts for N and it
// is a value between 0 and 10ms. // is a value between 0 and 10ms.
latencies_.push_back(now_time - *pulse_time_ + extra_delay); latencies_.push_back(now_time - *pulse_time_ + extra_delay);
pulse_time_.reset(); pulse_time_.reset();
@ -586,7 +586,7 @@ class MAYBE_AudioDeviceTest
rtc::scoped_refptr<AudioDeviceModuleForTest> CreateAudioDevice() { rtc::scoped_refptr<AudioDeviceModuleForTest> CreateAudioDevice() {
// Use the default factory for kPlatformDefaultAudio and a special factory // Use the default factory for kPlatformDefaultAudio and a special factory
// CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2. // CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2.
// The value of |audio_layer_| is set at construction by GetParam() and two // The value of `audio_layer_` is set at construction by GetParam() and two
// different layers are tested on Windows only. // different layers are tested on Windows only.
if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) { if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) {
return AudioDeviceModule::CreateForTest(audio_layer_, return AudioDeviceModule::CreateForTest(audio_layer_,

View File

@ -28,8 +28,8 @@ namespace webrtc {
// and plays out into a file. // and plays out into a file.
class FileAudioDevice : public AudioDeviceGeneric { class FileAudioDevice : public AudioDeviceGeneric {
public: public:
// Constructs a file audio device with |id|. It will read audio from // Constructs a file audio device with `id`. It will read audio from
// |inputFilename| and record output audio to |outputFilename|. // `inputFilename` and record output audio to `outputFilename`.
// //
// The input file should be a readable 48k stereo raw file, and the output // The input file should be a readable 48k stereo raw file, and the output
// file should point to a writable location. The output format will also be // file should point to a writable location. The output format will also be

View File

@ -113,7 +113,7 @@ void FineAudioBuffer::DeliverRecordedData(
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size()); record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
// Consume samples from buffer in chunks of 10ms until there is not // Consume samples from buffer in chunks of 10ms until there is not
// enough data left. The number of remaining samples in the cache is given by // enough data left. The number of remaining samples in the cache is given by
// the new size of the internal |record_buffer_|. // the new size of the internal `record_buffer_`.
const size_t num_elements_10ms = const size_t num_elements_10ms =
record_channels_ * record_samples_per_channel_10ms_; record_channels_ * record_samples_per_channel_10ms_;
while (record_buffer_.size() >= num_elements_10ms) { while (record_buffer_.size() >= num_elements_10ms) {

View File

@ -29,7 +29,7 @@ class AudioDeviceBuffer;
// accumulated 10ms worth of data to the ADB every second call. // accumulated 10ms worth of data to the ADB every second call.
class FineAudioBuffer { class FineAudioBuffer {
public: public:
// |device_buffer| is a buffer that provides 10ms of audio data. // `device_buffer` is a buffer that provides 10ms of audio data.
FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer); FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer);
~FineAudioBuffer(); ~FineAudioBuffer();
@ -42,18 +42,18 @@ class FineAudioBuffer {
bool IsReadyForPlayout() const; bool IsReadyForPlayout() const;
bool IsReadyForRecord() const; bool IsReadyForRecord() const;
// Copies audio samples into |audio_buffer| where number of requested // Copies audio samples into `audio_buffer` where number of requested
// elements is specified by |audio_buffer.size()|. The producer will always // elements is specified by |audio_buffer.size()|. The producer will always
// fill up the audio buffer and if no audio exists, the buffer will contain // fill up the audio buffer and if no audio exists, the buffer will contain
// silence instead. The provided delay estimate in |playout_delay_ms| should // silence instead. The provided delay estimate in `playout_delay_ms` should
// contain an estimate of the latency between when an audio frame is read from // contain an estimate of the latency between when an audio frame is read from
// WebRTC and when it is played out on the speaker. // WebRTC and when it is played out on the speaker.
void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer, void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
int playout_delay_ms); int playout_delay_ms);
// Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer // Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer
// in chunks of 10ms. The sum of the provided delay estimate in // in chunks of 10ms. The sum of the provided delay estimate in
// |record_delay_ms| and the latest |playout_delay_ms| in GetPlayoutData() // `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData()
// are given to the AEC in the audio processing module. // are given to the AEC in the audio processing module.
// They can be fixed values on most platforms and they are ignored if an // They can be fixed values on most platforms and they are ignored if an
// external (hardware/built-in) AEC is used. // external (hardware/built-in) AEC is used.
@ -72,11 +72,11 @@ class FineAudioBuffer {
// time of this object. // time of this object.
AudioDeviceBuffer* const audio_device_buffer_; AudioDeviceBuffer* const audio_device_buffer_;
// Number of audio samples per channel per 10ms. Set once at construction // Number of audio samples per channel per 10ms. Set once at construction
// based on parameters in |audio_device_buffer|. // based on parameters in `audio_device_buffer`.
const size_t playout_samples_per_channel_10ms_; const size_t playout_samples_per_channel_10ms_;
const size_t record_samples_per_channel_10ms_; const size_t record_samples_per_channel_10ms_;
// Number of audio channels. Set once at construction based on parameters in // Number of audio channels. Set once at construction based on parameters in
// |audio_device_buffer|. // `audio_device_buffer`.
const size_t playout_channels_; const size_t playout_channels_;
const size_t record_channels_; const size_t record_channels_;
// Storage for output samples from which a consumer can read audio buffers // Storage for output samples from which a consumer can read audio buffers

View File

@ -36,7 +36,7 @@ const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and // E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around // buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
// will happen. // will happen.
// |buffer| is the audio buffer to verify. // `buffer` is the audio buffer to verify.
bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) { bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) {
int start_value = (buffer_number * size) % SCHAR_MAX; int start_value = (buffer_number * size) % SCHAR_MAX;
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
@ -51,9 +51,9 @@ bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) {
// called (which is done implicitly when calling GetBufferData). It writes the // called (which is done implicitly when calling GetBufferData). It writes the
// sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a // sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a
// buffer of different size than the one VerifyBuffer verifies. // buffer of different size than the one VerifyBuffer verifies.
// |iteration| is the number of calls made to UpdateBuffer prior to this call. // `iteration` is the number of calls made to UpdateBuffer prior to this call.
// |samples_per_10_ms| is the number of samples that should be written to the // `samples_per_10_ms` is the number of samples that should be written to the
// buffer (|arg0|). // buffer (`arg0`).
ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
int16_t* buffer = static_cast<int16_t*>(arg0); int16_t* buffer = static_cast<int16_t*>(arg0);
int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
@ -64,7 +64,7 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
return samples_per_10_ms / kChannels; return samples_per_10_ms / kChannels;
} }
// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer() // Writes a periodic ramp pattern to the supplied `buffer`. See UpdateBuffer()
// for details. // for details.
void UpdateInputBuffer(int16_t* buffer, int iteration, int size) { void UpdateInputBuffer(int16_t* buffer, int iteration, int size) {
int start_value = (iteration * size) % SCHAR_MAX; int start_value = (iteration * size) % SCHAR_MAX;
@ -74,7 +74,7 @@ void UpdateInputBuffer(int16_t* buffer, int iteration, int size) {
} }
// Action macro which verifies that the recorded 10ms chunk of audio data // Action macro which verifies that the recorded 10ms chunk of audio data
// (in |arg0|) contains the correct reference values even if they have been // (in `arg0`) contains the correct reference values even if they have been
// supplied using a buffer size that is smaller or larger than 10ms. // supplied using a buffer size that is smaller or larger than 10ms.
// See VerifyBuffer() for details. // See VerifyBuffer() for details.
ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) { ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {

View File

@ -20,7 +20,7 @@ namespace webrtc {
// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API. // Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API.
// The creating thread must be a COM thread; otherwise nullptr will be returned. // The creating thread must be a COM thread; otherwise nullptr will be returned.
// By default |automatic_restart| is set to true and it results in support for // By default `automatic_restart` is set to true and it results in support for
// automatic restart of audio if e.g. the existing device is removed. If set to // automatic restart of audio if e.g. the existing device is removed. If set to
// false, no attempt to restart audio is performed under these conditions. // false, no attempt to restart audio is performed under these conditions.
// //

View File

@ -48,10 +48,10 @@ class TestAudioDeviceModuleImpl
: public webrtc_impl::AudioDeviceModuleDefault<TestAudioDeviceModule> { : public webrtc_impl::AudioDeviceModuleDefault<TestAudioDeviceModule> {
public: public:
// Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
// frames will be processed every 10ms / |speed|. // frames will be processed every 10ms / `speed`.
// |capturer| is an object that produces audio data. Can be nullptr if this // `capturer` is an object that produces audio data. Can be nullptr if this
// device is never used for recording. // device is never used for recording.
// |renderer| is an object that receives audio data that would have been // `renderer` is an object that receives audio data that would have been
// played out. Can be nullptr if this device is never used for playing. // played out. Can be nullptr if this device is never used for playing.
// Use one of the Create... functions to get these instances. // Use one of the Create... functions to get these instances.
TestAudioDeviceModuleImpl(TaskQueueFactory* task_queue_factory, TestAudioDeviceModuleImpl(TaskQueueFactory* task_queue_factory,
@ -142,13 +142,13 @@ class TestAudioDeviceModuleImpl
} }
// Blocks until the Renderer refuses to receive data. // Blocks until the Renderer refuses to receive data.
// Returns false if |timeout_ms| passes before that happens. // Returns false if `timeout_ms` passes before that happens.
bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) override { bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) override {
return done_rendering_.Wait(timeout_ms); return done_rendering_.Wait(timeout_ms);
} }
// Blocks until the Recorder stops producing data. // Blocks until the Recorder stops producing data.
// Returns false if |timeout_ms| passes before that happens. // Returns false if `timeout_ms` passes before that happens.
bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) override { bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) override {
return done_capturing_.Wait(timeout_ms); return done_capturing_.Wait(timeout_ms);
} }

View File

@ -42,7 +42,7 @@ class TestAudioDeviceModule : public AudioDeviceModule {
virtual int SamplingFrequency() const = 0; virtual int SamplingFrequency() const = 0;
// Returns the number of channels of captured audio data. // Returns the number of channels of captured audio data.
virtual int NumChannels() const = 0; virtual int NumChannels() const = 0;
// Replaces the contents of |buffer| with 10ms of captured audio data // Replaces the contents of `buffer` with 10ms of captured audio data
// (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the // (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the
// capturer can keep producing data, or false when the capture finishes. // capturer can keep producing data, or false when the capture finishes.
virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0; virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0;
@ -73,10 +73,10 @@ class TestAudioDeviceModule : public AudioDeviceModule {
~TestAudioDeviceModule() override {} ~TestAudioDeviceModule() override {}
// Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
// frames will be processed every 10ms / |speed|. // frames will be processed every 10ms / `speed`.
// |capturer| is an object that produces audio data. Can be nullptr if this // `capturer` is an object that produces audio data. Can be nullptr if this
// device is never used for recording. // device is never used for recording.
// |renderer| is an object that receives audio data that would have been // `renderer` is an object that receives audio data that would have been
// played out. Can be nullptr if this device is never used for playing. // played out. Can be nullptr if this device is never used for playing.
// Use one of the Create... functions to get these instances. // Use one of the Create... functions to get these instances.
static rtc::scoped_refptr<TestAudioDeviceModule> Create( static rtc::scoped_refptr<TestAudioDeviceModule> Create(
@ -85,9 +85,9 @@ class TestAudioDeviceModule : public AudioDeviceModule {
std::unique_ptr<Renderer> renderer, std::unique_ptr<Renderer> renderer,
float speed = 1); float speed = 1);
// Returns a Capturer instance that generates a signal of |num_channels| // Returns a Capturer instance that generates a signal of `num_channels`
// channels where every second frame is zero and every second frame is evenly // channels where every second frame is zero and every second frame is evenly
// distributed random noise with max amplitude |max_amplitude|. // distributed random noise with max amplitude `max_amplitude`.
static std::unique_ptr<PulsedNoiseCapturer> CreatePulsedNoiseCapturer( static std::unique_ptr<PulsedNoiseCapturer> CreatePulsedNoiseCapturer(
int16_t max_amplitude, int16_t max_amplitude,
int sampling_frequency_in_hz, int sampling_frequency_in_hz,
@ -109,7 +109,7 @@ class TestAudioDeviceModule : public AudioDeviceModule {
// Returns a Capturer instance that gets its data from a file. // Returns a Capturer instance that gets its data from a file.
// Automatically detects sample rate and num of channels. // Automatically detects sample rate and num of channels.
// |repeat| - if true, the file will be replayed from the start when we reach // `repeat` - if true, the file will be replayed from the start when we reach
// the end of file. // the end of file.
static std::unique_ptr<Capturer> CreateWavFileReader(std::string filename, static std::unique_ptr<Capturer> CreateWavFileReader(std::string filename,
bool repeat = false); bool repeat = false);
@ -140,10 +140,10 @@ class TestAudioDeviceModule : public AudioDeviceModule {
bool Recording() const override = 0; bool Recording() const override = 0;
// Blocks until the Renderer refuses to receive data. // Blocks until the Renderer refuses to receive data.
// Returns false if |timeout_ms| passes before that happens. // Returns false if `timeout_ms` passes before that happens.
virtual bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) = 0; virtual bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) = 0;
// Blocks until the Recorder stops producing data. // Blocks until the Recorder stops producing data.
// Returns false if |timeout_ms| passes before that happens. // Returns false if `timeout_ms` passes before that happens.
virtual bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) = 0; virtual bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) = 0;
}; };

View File

@ -1169,7 +1169,7 @@ int32_t AudioDeviceLinuxPulse::StartPlayout() {
_startPlay = true; _startPlay = true;
} }
// Both |_startPlay| and |_playing| needs protction since they are also // Both `_startPlay` and `_playing` needs protction since they are also
// accessed on the playout thread. // accessed on the playout thread.
// The audio thread will signal when playout has started. // The audio thread will signal when playout has started.

View File

@ -1365,7 +1365,7 @@ int32_t AudioDeviceMac::StopRecording() {
} else { } else {
// We signal a stop for a shared device even when rendering has // We signal a stop for a shared device even when rendering has
// not yet ended. This is to ensure the IOProc will return early as // not yet ended. This is to ensure the IOProc will return early as
// intended (by checking |_recording|) before accessing // intended (by checking `_recording`) before accessing
// resources we free below (e.g. the capture converter). // resources we free below (e.g. the capture converter).
// //
// In the case of a shared devcie, the IOProc will verify // In the case of a shared devcie, the IOProc will verify
@ -1476,7 +1476,7 @@ int32_t AudioDeviceMac::StopPlayout() {
if (_playing && renderDeviceIsAlive == 1) { if (_playing && renderDeviceIsAlive == 1) {
// We signal a stop for a shared device even when capturing has not // We signal a stop for a shared device even when capturing has not
// yet ended. This is to ensure the IOProc will return early as // yet ended. This is to ensure the IOProc will return early as
// intended (by checking |_playing|) before accessing resources we // intended (by checking `_playing`) before accessing resources we
// free below (e.g. the render converter). // free below (e.g. the render converter).
// //
// In the case of a shared device, the IOProc will verify capturing // In the case of a shared device, the IOProc will verify capturing

View File

@ -3000,8 +3000,8 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
dmoBuffer.pBuffer->AddRef(); dmoBuffer.pBuffer->AddRef();
// Poll the DMO for AEC processed capture data. The DMO will // Poll the DMO for AEC processed capture data. The DMO will
// copy available data to |dmoBuffer|, and should only return // copy available data to `dmoBuffer`, and should only return
// 10 ms frames. The value of |dwStatus| should be ignored. // 10 ms frames. The value of `dwStatus` should be ignored.
hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus); hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
SAFE_RELEASE(dmoBuffer.pBuffer); SAFE_RELEASE(dmoBuffer.pBuffer);
dwStatus = dmoBuffer.dwStatus; dwStatus = dmoBuffer.dwStatus;

View File

@ -499,7 +499,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest {
// The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio // The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio
// to/from the WebRTC layer. Created and owned by this object. Used by // to/from the WebRTC layer. Created and owned by this object. Used by
// both |input_| and |output_| but they use orthogonal parts of the ADB. // both `input_` and `output_` but they use orthogonal parts of the ADB.
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_; std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
// Set to true after a successful call to Init(). Cleared by Terminate(). // Set to true after a successful call to Init(). Cleared by Terminate().

View File

@ -35,7 +35,7 @@ namespace {
// TODO(henrika): more research is needed before we can enable low-latency. // TODO(henrika): more research is needed before we can enable low-latency.
const bool kEnableLowLatencyIfSupported = false; const bool kEnableLowLatencyIfSupported = false;
// Each unit of reference time is 100 nanoseconds, hence |kReftimesPerSec| // Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec`
// corresponds to one second. // corresponds to one second.
// TODO(henrika): possibly add usage in Init(). // TODO(henrika): possibly add usage in Init().
// const REFERENCE_TIME kReferenceTimesPerSecond = 10000000; // const REFERENCE_TIME kReferenceTimesPerSecond = 10000000;
@ -230,9 +230,9 @@ bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const {
} }
bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const { bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const {
// Returns true if |device_id| corresponds to the id of the default // Returns true if `device_id` corresponds to the id of the default
// device. Note that, if only one device is available (or if the user has not // device. Note that, if only one device is available (or if the user has not
// explicitly set a default device), |device_id| will also math // explicitly set a default device), `device_id` will also math
// IsDefaultCommunicationsDeviceId(). // IsDefaultCommunicationsDeviceId().
return (IsInput() && return (IsInput() &&
(device_id == core_audio_utility::GetDefaultInputDeviceID())) || (device_id == core_audio_utility::GetDefaultInputDeviceID())) ||
@ -242,9 +242,9 @@ bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const {
bool CoreAudioBase::IsDefaultCommunicationsDeviceId( bool CoreAudioBase::IsDefaultCommunicationsDeviceId(
const std::string& device_id) const { const std::string& device_id) const {
// Returns true if |device_id| corresponds to the id of the default // Returns true if `device_id` corresponds to the id of the default
// communication device. Note that, if only one device is available (or if // communication device. Note that, if only one device is available (or if
// the user has not explicitly set a communication device), |device_id| will // the user has not explicitly set a communication device), `device_id` will
// also math IsDefaultDeviceId(). // also math IsDefaultDeviceId().
return (IsInput() && return (IsInput() &&
(device_id == (device_id ==
@ -341,9 +341,9 @@ bool CoreAudioBase::Init() {
RTC_DCHECK(!audio_client_); RTC_DCHECK(!audio_client_);
RTC_DCHECK(!audio_session_control_.Get()); RTC_DCHECK(!audio_session_control_.Get());
// Use an existing combination of |device_index_| and |device_id_| to set // Use an existing combination of `device_index_` and `device_id_` to set
// parameters which are required to create an audio client. It is up to the // parameters which are required to create an audio client. It is up to the
// parent class to set |device_index_| and |device_id_|. // parent class to set `device_index_` and `device_id_`.
std::string device_id = AudioDeviceName::kDefaultDeviceId; std::string device_id = AudioDeviceName::kDefaultDeviceId;
ERole role = ERole(); ERole role = ERole();
if (IsDefaultDevice(device_index_)) { if (IsDefaultDevice(device_index_)) {
@ -400,7 +400,7 @@ bool CoreAudioBase::Init() {
return false; return false;
} }
// Define the output WAVEFORMATEXTENSIBLE format in |format_|. // Define the output WAVEFORMATEXTENSIBLE format in `format_`.
WAVEFORMATEX* format = &format_.Format; WAVEFORMATEX* format = &format_.Format;
format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
// Check the preferred channel configuration and request implicit channel // Check the preferred channel configuration and request implicit channel
@ -475,7 +475,7 @@ bool CoreAudioBase::Init() {
// Initialize the audio stream between the client and the device in shared // Initialize the audio stream between the client and the device in shared
// mode using event-driven buffer handling. Also, using 0 as requested // mode using event-driven buffer handling. Also, using 0 as requested
// buffer size results in a default (minimum) endpoint buffer size. // buffer size results in a default (minimum) endpoint buffer size.
// TODO(henrika): possibly increase |requested_buffer_size| to add // TODO(henrika): possibly increase `requested_buffer_size` to add
// robustness. // robustness.
const REFERENCE_TIME requested_buffer_size = 0; const REFERENCE_TIME requested_buffer_size = 0;
if (FAILED(core_audio_utility::SharedModeInitialize( if (FAILED(core_audio_utility::SharedModeInitialize(
@ -905,15 +905,15 @@ void CoreAudioBase::ThreadRun() {
wait_array, false, INFINITE); wait_array, false, INFINITE);
switch (wait_result) { switch (wait_result) {
case WAIT_OBJECT_0 + 0: case WAIT_OBJECT_0 + 0:
// |stop_event_| has been set. // `stop_event_` has been set.
streaming = false; streaming = false;
break; break;
case WAIT_OBJECT_0 + 1: case WAIT_OBJECT_0 + 1:
// |restart_event_| has been set. // `restart_event_` has been set.
error = !HandleRestartEvent(); error = !HandleRestartEvent();
break; break;
case WAIT_OBJECT_0 + 2: case WAIT_OBJECT_0 + 2:
// |audio_samples_event_| has been set. // `audio_samples_event_` has been set.
error = !on_data_callback_(device_frequency); error = !on_data_callback_(device_frequency);
break; break;
default: default:

View File

@ -63,7 +63,7 @@ class CoreAudioBase : public IAudioSessionEvents {
// Callback definition for notifications of run-time error messages. It can // Callback definition for notifications of run-time error messages. It can
// be called e.g. when an active audio device is removed and an audio stream // be called e.g. when an active audio device is removed and an audio stream
// is disconnected (|error| is then set to kStreamDisconnected). Both input // is disconnected (`error` is then set to kStreamDisconnected). Both input
// and output clients implements OnErrorCallback() and will trigger an // and output clients implements OnErrorCallback() and will trigger an
// internal restart sequence for kStreamDisconnected. // internal restart sequence for kStreamDisconnected.
// This method is currently always called on the audio thread. // This method is currently always called on the audio thread.
@ -103,13 +103,13 @@ class CoreAudioBase : public IAudioSessionEvents {
// Releases all allocated COM resources in the base class. // Releases all allocated COM resources in the base class.
void ReleaseCOMObjects(); void ReleaseCOMObjects();
// Returns number of active devices given the specified |direction_| set // Returns number of active devices given the specified `direction_` set
// by the parent (input or output). // by the parent (input or output).
int NumberOfActiveDevices() const; int NumberOfActiveDevices() const;
// Returns total number of enumerated audio devices which is the sum of all // Returns total number of enumerated audio devices which is the sum of all
// active devices plus two extra (one default and one default // active devices plus two extra (one default and one default
// communications). The value in |direction_| determines if capture or // communications). The value in `direction_` determines if capture or
// render devices are counted. // render devices are counted.
int NumberOfEnumeratedDevices() const; int NumberOfEnumeratedDevices() const;

View File

@ -105,17 +105,17 @@ int CoreAudioInput::InitRecording() {
RTC_DCHECK(!audio_capture_client_); RTC_DCHECK(!audio_capture_client_);
// Creates an IAudioClient instance and stores the valid interface pointer in // Creates an IAudioClient instance and stores the valid interface pointer in
// |audio_client3_|, |audio_client2_|, or |audio_client_| depending on // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
// platform support. The base class will use optimal input parameters and do // platform support. The base class will use optimal input parameters and do
// an event driven shared mode initialization. The utilized format will be // an event driven shared mode initialization. The utilized format will be
// stored in |format_| and can be used for configuration and allocation of // stored in `format_` and can be used for configuration and allocation of
// audio buffers. // audio buffers.
if (!CoreAudioBase::Init()) { if (!CoreAudioBase::Init()) {
return -1; return -1;
} }
RTC_DCHECK(audio_client_); RTC_DCHECK(audio_client_);
// Configure the recording side of the audio device buffer using |format_| // Configure the recording side of the audio device buffer using `format_`
// after a trivial sanity check of the format structure. // after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_); RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format; WAVEFORMATEX* format = &format_.Format;
@ -353,7 +353,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) {
format_.Format.nBlockAlign * num_frames_to_read); format_.Format.nBlockAlign * num_frames_to_read);
RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence"; RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence";
} else { } else {
// Copy recorded audio in |audio_data| to the WebRTC sink using the // Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object. // FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData( fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data), rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data),
@ -397,13 +397,13 @@ absl::optional<int> CoreAudioInput::EstimateLatencyMillis(
if (!qpc_to_100ns_) { if (!qpc_to_100ns_) {
return absl::nullopt; return absl::nullopt;
} }
// Input parameter |capture_time_100ns| contains the performance counter at // Input parameter `capture_time_100ns` contains the performance counter at
// the time that the audio endpoint device recorded the device position of // the time that the audio endpoint device recorded the device position of
// the first audio frame in the data packet converted into 100ns units. // the first audio frame in the data packet converted into 100ns units.
// We derive a delay estimate by: // We derive a delay estimate by:
// - sampling the current performance counter (qpc_now_raw), // - sampling the current performance counter (qpc_now_raw),
// - converting it into 100ns time units (now_time_100ns), and // - converting it into 100ns time units (now_time_100ns), and
// - subtracting |capture_time_100ns| from now_time_100ns. // - subtracting `capture_time_100ns` from now_time_100ns.
LARGE_INTEGER perf_counter_now = {}; LARGE_INTEGER perf_counter_now = {};
if (!::QueryPerformanceCounter(&perf_counter_now)) { if (!::QueryPerformanceCounter(&perf_counter_now)) {
return absl::nullopt; return absl::nullopt;

View File

@ -102,17 +102,17 @@ int CoreAudioOutput::InitPlayout() {
RTC_DCHECK(!audio_render_client_); RTC_DCHECK(!audio_render_client_);
// Creates an IAudioClient instance and stores the valid interface pointer in // Creates an IAudioClient instance and stores the valid interface pointer in
// |audio_client3_|, |audio_client2_|, or |audio_client_| depending on // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
// platform support. The base class will use optimal output parameters and do // platform support. The base class will use optimal output parameters and do
// an event driven shared mode initialization. The utilized format will be // an event driven shared mode initialization. The utilized format will be
// stored in |format_| and can be used for configuration and allocation of // stored in `format_` and can be used for configuration and allocation of
// audio buffers. // audio buffers.
if (!CoreAudioBase::Init()) { if (!CoreAudioBase::Init()) {
return -1; return -1;
} }
RTC_DCHECK(audio_client_); RTC_DCHECK(audio_client_);
// Configure the playout side of the audio device buffer using |format_| // Configure the playout side of the audio device buffer using `format_`
// after a trivial sanity check of the format structure. // after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_); RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format; WAVEFORMATEX* format = &format_.Format;
@ -334,7 +334,7 @@ bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) {
} }
// Get audio data from WebRTC and write it to the allocated buffer in // Get audio data from WebRTC and write it to the allocated buffer in
// |audio_data|. The playout latency is not updated for each callback. // `audio_data`. The playout latency is not updated for each callback.
fine_audio_buffer_->GetPlayoutData( fine_audio_buffer_->GetPlayoutData(
rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data), rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data),
num_requested_frames * format_.Format.nChannels), num_requested_frames * format_.Format.nChannels),
@ -360,7 +360,7 @@ int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) {
UINT64 position = 0; UINT64 position = 0;
UINT64 qpc_position = 0; UINT64 qpc_position = 0;
int delay_ms = 0; int delay_ms = 0;
// Get the device position through output parameter |position|. This is the // Get the device position through output parameter `position`. This is the
// stream position of the sample that is currently playing through the // stream position of the sample that is currently playing through the
// speakers. // speakers.
_com_error error = audio_clock_->GetPosition(&position, &qpc_position); _com_error error = audio_clock_->GetPosition(&position, &qpc_position);

View File

@ -38,9 +38,9 @@ using core_audio_utility::ErrorToString;
// Converts from channel mask to list of included channels. // Converts from channel mask to list of included channels.
// Each audio data format contains channels for one or more of the positions // Each audio data format contains channels for one or more of the positions
// listed below. The number of channels simply equals the number of nonzero // listed below. The number of channels simply equals the number of nonzero
// flag bits in the |channel_mask|. The relative positions of the channels // flag bits in the `channel_mask`. The relative positions of the channels
// within each block of audio data always follow the same relative ordering // within each block of audio data always follow the same relative ordering
// as the flag bits in the table below. For example, if |channel_mask| contains // as the flag bits in the table below. For example, if `channel_mask` contains
// the value 0x00000033, the format defines four audio channels that are // the value 0x00000033, the format defines four audio channels that are
// assigned for playback to the front-left, front-right, back-left, // assigned for playback to the front-left, front-right, back-left,
// and back-right speakers, respectively. The channel data should be interleaved // and back-right speakers, respectively. The channel data should be interleaved
@ -278,8 +278,8 @@ bool IsDeviceActive(IMMDevice* device) {
return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE); return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
} }
// Retrieve an audio device specified by |device_id| or a default device // Retrieve an audio device specified by `device_id` or a default device
// specified by data-flow direction and role if |device_id| is default. // specified by data-flow direction and role if `device_id` is default.
ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id, ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
EDataFlow data_flow, EDataFlow data_flow,
ERole role) { ERole role) {
@ -500,7 +500,7 @@ bool GetDeviceNamesInternal(EDataFlow data_flow,
} }
// Loop over all active devices and add friendly name and unique id to the // Loop over all active devices and add friendly name and unique id to the
// |device_names| queue. For now, devices are added at indexes 0, 1, ..., N-1 // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1
// but they will be moved to 2,3,..., N+1 at the next stage when default and // but they will be moved to 2,3,..., N+1 at the next stage when default and
// default communication devices are added at index 0 and 1. // default communication devices are added at index 0 and 1.
ComPtr<IMMDevice> audio_device; ComPtr<IMMDevice> audio_device;
@ -611,7 +611,7 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
return hr; return hr;
int sample_rate = mix_format.Format.nSamplesPerSec; int sample_rate = mix_format.Format.nSamplesPerSec;
// Override default sample rate if |fixed_sample_rate| is set and different // Override default sample rate if `fixed_sample_rate` is set and different
// from the default rate. // from the default rate.
if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) { if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) {
RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: " RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: "
@ -909,7 +909,7 @@ HRESULT SetClientProperties(IAudioClient2* client) {
props.eCategory = AudioCategory_Communications; props.eCategory = AudioCategory_Communications;
// Hardware-offloaded audio processing allows the main audio processing tasks // Hardware-offloaded audio processing allows the main audio processing tasks
// to be performed outside the computer's main CPU. Check support and log the // to be performed outside the computer's main CPU. Check support and log the
// result but hard-code |bIsOffload| to FALSE for now. // result but hard-code `bIsOffload` to FALSE for now.
// TODO(henrika): evaluate hardware-offloading. Might complicate usage of // TODO(henrika): evaluate hardware-offloading. Might complicate usage of
// IAudioClient::GetMixFormat(). // IAudioClient::GetMixFormat().
BOOL supports_offload = FALSE; BOOL supports_offload = FALSE;
@ -989,7 +989,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client,
// The GetMixFormat method retrieves the stream format that the audio engine // The GetMixFormat method retrieves the stream format that the audio engine
// uses for its internal processing of shared-mode streams. The method // uses for its internal processing of shared-mode streams. The method
// allocates the storage for the structure and this memory will be released // allocates the storage for the structure and this memory will be released
// when |mix_format| goes out of scope. The GetMixFormat method retrieves a // when `mix_format` goes out of scope. The GetMixFormat method retrieves a
// format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
// instead of a standalone WAVEFORMATEX structure. The method outputs a // instead of a standalone WAVEFORMATEX structure. The method outputs a
// pointer to the WAVEFORMATEX structure that is embedded at the start of // pointer to the WAVEFORMATEX structure that is embedded at the start of
@ -1017,7 +1017,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client,
return AUDCLNT_E_UNSUPPORTED_FORMAT; return AUDCLNT_E_UNSUPPORTED_FORMAT;
} }
// Log a warning for the rare case where |mix_format| only contains a // Log a warning for the rare case where `mix_format` only contains a
// stand-alone WAVEFORMATEX structure but don't return. // stand-alone WAVEFORMATEX structure but don't return.
if (!wrapped_format.IsExtensible()) { if (!wrapped_format.IsExtensible()) {
RTC_DLOG(WARNING) RTC_DLOG(WARNING)
@ -1079,8 +1079,8 @@ HRESULT GetDevicePeriod(IAudioClient* client,
REFERENCE_TIME* device_period) { REFERENCE_TIME* device_period) {
RTC_DLOG(INFO) << "GetDevicePeriod"; RTC_DLOG(INFO) << "GetDevicePeriod";
RTC_DCHECK(client); RTC_DCHECK(client);
// The |default_period| parameter specifies the default scheduling period // The `default_period` parameter specifies the default scheduling period
// for a shared-mode stream. The |minimum_period| parameter specifies the // for a shared-mode stream. The `minimum_period` parameter specifies the
// minimum scheduling period for an exclusive-mode stream. // minimum scheduling period for an exclusive-mode stream.
// The time is expressed in 100-nanosecond units. // The time is expressed in 100-nanosecond units.
REFERENCE_TIME default_period = 0; REFERENCE_TIME default_period = 0;
@ -1203,8 +1203,8 @@ HRESULT SharedModeInitialize(IAudioClient* client,
} }
RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
// Initialize the shared mode client for minimal delay if |buffer_duration| // Initialize the shared mode client for minimal delay if `buffer_duration`
// is 0 or possibly a higher delay (more robust) if |buffer_duration| is // is 0 or possibly a higher delay (more robust) if `buffer_duration` is
// larger than 0. The actual size is given by IAudioClient::GetBufferSize(). // larger than 0. The actual size is given by IAudioClient::GetBufferSize().
_com_error error = client->Initialize( _com_error error = client->Initialize(
AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0, AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0,
@ -1294,7 +1294,7 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
// Initialize the shared mode client for lowest possible latency. // Initialize the shared mode client for lowest possible latency.
// It is assumed that GetSharedModeEnginePeriod() has been used to query the // It is assumed that GetSharedModeEnginePeriod() has been used to query the
// smallest possible engine period and that it is given by |period_in_frames|. // smallest possible engine period and that it is given by `period_in_frames`.
_com_error error = client->InitializeSharedAudioStream( _com_error error = client->InitializeSharedAudioStream(
stream_flags, period_in_frames, stream_flags, period_in_frames,
reinterpret_cast<const WAVEFORMATEX*>(format), nullptr); reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);

View File

@ -34,7 +34,7 @@ namespace webrtc {
namespace webrtc_win { namespace webrtc_win {
// Utility class which registers a thread with MMCSS in the constructor and // Utility class which registers a thread with MMCSS in the constructor and
// deregisters MMCSS in the destructor. The task name is given by |task_name|. // deregisters MMCSS in the destructor. The task name is given by `task_name`.
// The Multimedia Class Scheduler service (MMCSS) enables multimedia // The Multimedia Class Scheduler service (MMCSS) enables multimedia
// applications to ensure that their time-sensitive processing receives // applications to ensure that their time-sensitive processing receives
// prioritized access to CPU resources without denying CPU resources to // prioritized access to CPU resources without denying CPU resources to
@ -84,7 +84,7 @@ class ScopedMMCSSRegistration {
explicit ScopedMMCSSRegistration(const wchar_t* task_name) { explicit ScopedMMCSSRegistration(const wchar_t* task_name) {
RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name);
// Register the calling thread with MMCSS for the supplied |task_name|. // Register the calling thread with MMCSS for the supplied `task_name`.
DWORD mmcss_task_index = 0; DWORD mmcss_task_index = 0;
mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index); mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index);
if (mmcss_handle_ == nullptr) { if (mmcss_handle_ == nullptr) {
@ -304,7 +304,7 @@ bool IsMMCSSSupported();
// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API. // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
// Number of active audio devices in the specified data flow direction. // Number of active audio devices in the specified data flow direction.
// Set |data_flow| to eAll to retrieve the total number of active audio // Set `data_flow` to eAll to retrieve the total number of active audio
// devices. // devices.
int NumberOfActiveDevices(EDataFlow data_flow); int NumberOfActiveDevices(EDataFlow data_flow);
@ -327,7 +327,7 @@ std::string GetCommunicationsInputDeviceID();
std::string GetCommunicationsOutputDeviceID(); std::string GetCommunicationsOutputDeviceID();
// Creates an IMMDevice interface corresponding to the unique device id in // Creates an IMMDevice interface corresponding to the unique device id in
// |device_id|, or by data-flow direction and role if |device_id| is set to // `device_id`, or by data-flow direction and role if `device_id` is set to
// AudioDeviceName::kDefaultDeviceId. // AudioDeviceName::kDefaultDeviceId.
Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(const std::string& device_id, Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
EDataFlow data_flow, EDataFlow data_flow,
@ -339,8 +339,8 @@ Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
webrtc::AudioDeviceName GetDeviceName(IMMDevice* device); webrtc::AudioDeviceName GetDeviceName(IMMDevice* device);
// Gets the user-friendly name of the endpoint device which is represented // Gets the user-friendly name of the endpoint device which is represented
// by a unique id in |device_id|, or by data-flow direction and role if // by a unique id in `device_id`, or by data-flow direction and role if
// |device_id| is set to AudioDeviceName::kDefaultDeviceId. // `device_id` is set to AudioDeviceName::kDefaultDeviceId.
std::string GetFriendlyName(const std::string& device_id, std::string GetFriendlyName(const std::string& device_id,
EDataFlow data_flow, EDataFlow data_flow,
ERole role); ERole role);
@ -349,11 +349,11 @@ std::string GetFriendlyName(const std::string& device_id,
EDataFlow GetDataFlow(IMMDevice* device); EDataFlow GetDataFlow(IMMDevice* device);
// Enumerates all input devices and adds the names (friendly name and unique // Enumerates all input devices and adds the names (friendly name and unique
// device id) to the list in |device_names|. // device id) to the list in `device_names`.
bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names); bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names);
// Enumerates all output devices and adds the names (friendly name and unique // Enumerates all output devices and adds the names (friendly name and unique
// device id) to the list in |device_names|. // device id) to the list in `device_names`.
bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names); bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
// The Windows Audio Session API (WASAPI) enables client applications to // The Windows Audio Session API (WASAPI) enables client applications to
@ -361,18 +361,18 @@ bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI // device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
// interfaces. // interfaces.
// Creates an IAudioSessionManager2 interface for the specified |device|. // Creates an IAudioSessionManager2 interface for the specified `device`.
// This interface provides access to e.g. the IAudioSessionEnumerator // This interface provides access to e.g. the IAudioSessionEnumerator
Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2( Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2(
IMMDevice* device); IMMDevice* device);
// Creates an IAudioSessionEnumerator interface for the specified |device|. // Creates an IAudioSessionEnumerator interface for the specified `device`.
// The client can use the interface to enumerate audio sessions on the audio // The client can use the interface to enumerate audio sessions on the audio
// device // device
Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator( Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
IMMDevice* device); IMMDevice* device);
// Number of active audio sessions for the given |device|. Expired or inactive // Number of active audio sessions for the given `device`. Expired or inactive
// sessions are not included. // sessions are not included.
int NumberOfActiveSessions(IMMDevice* device); int NumberOfActiveSessions(IMMDevice* device);
@ -387,15 +387,15 @@ Microsoft::WRL::ComPtr<IAudioClient3>
CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role); CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role);
// Sets the AudioCategory_Communications category. Should be called before // Sets the AudioCategory_Communications category. Should be called before
// GetSharedModeMixFormat() and IsFormatSupported(). The |client| argument must // GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must
// be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported // be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported
// on Windows 8 and above. // on Windows 8 and above.
// TODO(henrika): evaluate effect (if any). // TODO(henrika): evaluate effect (if any).
HRESULT SetClientProperties(IAudioClient2* client); HRESULT SetClientProperties(IAudioClient2* client);
// Returns the buffer size limits of the hardware audio engine in // Returns the buffer size limits of the hardware audio engine in
// 100-nanosecond units given a specified |format|. Does not require prior // 100-nanosecond units given a specified `format`. Does not require prior
// audio stream initialization. The |client| argument must be an IAudioClient2 // audio stream initialization. The `client` argument must be an IAudioClient2
// or IAudioClient3 interface pointer, hence only supported on Windows 8 and // or IAudioClient3 interface pointer, hence only supported on Windows 8 and
// above. // above.
// TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY. // TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY.
@ -412,29 +412,29 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client,
HRESULT GetSharedModeMixFormat(IAudioClient* client, HRESULT GetSharedModeMixFormat(IAudioClient* client,
WAVEFORMATEXTENSIBLE* format); WAVEFORMATEXTENSIBLE* format);
// Returns true if the specified |client| supports the format in |format| // Returns true if the specified `client` supports the format in `format`
// for the given |share_mode| (shared or exclusive). The client can call this // for the given `share_mode` (shared or exclusive). The client can call this
// method before calling IAudioClient::Initialize. // method before calling IAudioClient::Initialize.
bool IsFormatSupported(IAudioClient* client, bool IsFormatSupported(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode, AUDCLNT_SHAREMODE share_mode,
const WAVEFORMATEXTENSIBLE* format); const WAVEFORMATEXTENSIBLE* format);
// For a shared-mode stream, the audio engine periodically processes the // For a shared-mode stream, the audio engine periodically processes the
// data in the endpoint buffer at the period obtained in |device_period|. // data in the endpoint buffer at the period obtained in `device_period`.
// For an exclusive mode stream, |device_period| corresponds to the minimum // For an exclusive mode stream, `device_period` corresponds to the minimum
// time interval between successive processing by the endpoint device. // time interval between successive processing by the endpoint device.
// This period plus the stream latency between the buffer and endpoint device // This period plus the stream latency between the buffer and endpoint device
// represents the minimum possible latency that an audio application can // represents the minimum possible latency that an audio application can
// achieve. The time in |device_period| is expressed in 100-nanosecond units. // achieve. The time in `device_period` is expressed in 100-nanosecond units.
HRESULT GetDevicePeriod(IAudioClient* client, HRESULT GetDevicePeriod(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode, AUDCLNT_SHAREMODE share_mode,
REFERENCE_TIME* device_period); REFERENCE_TIME* device_period);
// Returns the range of periodicities supported by the engine for the specified // Returns the range of periodicities supported by the engine for the specified
// stream |format|. The periodicity of the engine is the rate at which the // stream `format`. The periodicity of the engine is the rate at which the
// engine wakes an event-driven audio client to transfer audio data to or from // engine wakes an event-driven audio client to transfer audio data to or from
// the engine. Can be used for low-latency support on some devices. // the engine. Can be used for low-latency support on some devices.
// The |client| argument must be an IAudioClient3 interface pointer, hence only // The `client` argument must be an IAudioClient3 interface pointer, hence only
// supported on Windows 10 and above. // supported on Windows 10 and above.
HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
const WAVEFORMATEXTENSIBLE* format, const WAVEFORMATEXTENSIBLE* format,
@ -443,14 +443,14 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
uint32_t* min_period_in_frames, uint32_t* min_period_in_frames,
uint32_t* max_period_in_frames); uint32_t* max_period_in_frames);
// Get the preferred audio parameters for the given |client| corresponding to // Get the preferred audio parameters for the given `client` corresponding to
// the stream format that the audio engine uses for its internal processing of // the stream format that the audio engine uses for its internal processing of
// shared-mode streams. The acquired values should only be utilized for shared // shared-mode streams. The acquired values should only be utilized for shared
// mode streamed since there are no preferred settings for an exclusive mode // mode streamed since there are no preferred settings for an exclusive mode
// stream. // stream.
HRESULT GetPreferredAudioParameters(IAudioClient* client, HRESULT GetPreferredAudioParameters(IAudioClient* client,
webrtc::AudioParameters* params); webrtc::AudioParameters* params);
// As above but override the preferred sample rate and use |sample_rate| // As above but override the preferred sample rate and use `sample_rate`
// instead. Intended mainly for testing purposes and in combination with rate // instead. Intended mainly for testing purposes and in combination with rate
// conversion. // conversion.
HRESULT GetPreferredAudioParameters(IAudioClient* client, HRESULT GetPreferredAudioParameters(IAudioClient* client,
@ -461,20 +461,20 @@ HRESULT GetPreferredAudioParameters(IAudioClient* client,
// the client must initialize it once, and only once, to initialize the audio // the client must initialize it once, and only once, to initialize the audio
// stream between the client and the device. In shared mode, the client // stream between the client and the device. In shared mode, the client
// connects indirectly through the audio engine which does the mixing. // connects indirectly through the audio engine which does the mixing.
// If a valid event is provided in |event_handle|, the client will be // If a valid event is provided in `event_handle`, the client will be
// initialized for event-driven buffer handling. If |event_handle| is set to // initialized for event-driven buffer handling. If `event_handle` is set to
// nullptr, event-driven buffer handling is not utilized. To achieve the // nullptr, event-driven buffer handling is not utilized. To achieve the
// minimum stream latency between the client application and audio endpoint // minimum stream latency between the client application and audio endpoint
// device, set |buffer_duration| to 0. A client has the option of requesting a // device, set `buffer_duration` to 0. A client has the option of requesting a
// buffer size that is larger than what is strictly necessary to make timing // buffer size that is larger than what is strictly necessary to make timing
// glitches rare or nonexistent. Increasing the buffer size does not necessarily // glitches rare or nonexistent. Increasing the buffer size does not necessarily
// increase the stream latency. Each unit of reference time is 100 nanoseconds. // increase the stream latency. Each unit of reference time is 100 nanoseconds.
// The |auto_convert_pcm| parameter can be used for testing purposes to ensure // The `auto_convert_pcm` parameter can be used for testing purposes to ensure
// that the sample rate of the client side does not have to match the audio // that the sample rate of the client side does not have to match the audio
// engine mix format. If |auto_convert_pcm| is set to true, a rate converter // engine mix format. If `auto_convert_pcm` is set to true, a rate converter
// will be inserted to convert between the sample rate in |format| and the // will be inserted to convert between the sample rate in `format` and the
// preferred rate given by GetPreferredAudioParameters(). // preferred rate given by GetPreferredAudioParameters().
// The output parameter |endpoint_buffer_size| contains the size of the // The output parameter `endpoint_buffer_size` contains the size of the
// endpoint buffer and it is expressed as the number of audio frames the // endpoint buffer and it is expressed as the number of audio frames the
// buffer can hold. // buffer can hold.
HRESULT SharedModeInitialize(IAudioClient* client, HRESULT SharedModeInitialize(IAudioClient* client,
@ -486,7 +486,7 @@ HRESULT SharedModeInitialize(IAudioClient* client,
// Works as SharedModeInitialize() but adds support for using smaller engine // Works as SharedModeInitialize() but adds support for using smaller engine
// periods than the default period. // periods than the default period.
// The |client| argument must be an IAudioClient3 interface pointer, hence only // The `client` argument must be an IAudioClient3 interface pointer, hence only
// supported on Windows 10 and above. // supported on Windows 10 and above.
// TODO(henrika): can probably be merged into SharedModeInitialize() to avoid // TODO(henrika): can probably be merged into SharedModeInitialize() to avoid
// duplicating code. Keeping as separate method for now until decided if we // duplicating code. Keeping as separate method for now until decided if we
@ -499,43 +499,43 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
uint32_t* endpoint_buffer_size); uint32_t* endpoint_buffer_size);
// Creates an IAudioRenderClient client for an existing IAudioClient given by // Creates an IAudioRenderClient client for an existing IAudioClient given by
// |client|. The IAudioRenderClient interface enables a client to write // `client`. The IAudioRenderClient interface enables a client to write
// output data to a rendering endpoint buffer. The methods in this interface // output data to a rendering endpoint buffer. The methods in this interface
// manage the movement of data packets that contain audio-rendering data. // manage the movement of data packets that contain audio-rendering data.
Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient( Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient(
IAudioClient* client); IAudioClient* client);
// Creates an IAudioCaptureClient client for an existing IAudioClient given by // Creates an IAudioCaptureClient client for an existing IAudioClient given by
// |client|. The IAudioCaptureClient interface enables a client to read // `client`. The IAudioCaptureClient interface enables a client to read
// input data from a capture endpoint buffer. The methods in this interface // input data from a capture endpoint buffer. The methods in this interface
// manage the movement of data packets that contain capture data. // manage the movement of data packets that contain capture data.
Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient( Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
IAudioClient* client); IAudioClient* client);
// Creates an IAudioClock interface for an existing IAudioClient given by // Creates an IAudioClock interface for an existing IAudioClient given by
// |client|. The IAudioClock interface enables a client to monitor a stream's // `client`. The IAudioClock interface enables a client to monitor a stream's
// data rate and the current position in the stream. // data rate and the current position in the stream.
Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client); Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
// Creates an AudioSessionControl interface for an existing IAudioClient given // Creates an AudioSessionControl interface for an existing IAudioClient given
// by |client|. The IAudioControl interface enables a client to configure the // by `client`. The IAudioControl interface enables a client to configure the
// control parameters for an audio session and to monitor events in the session. // control parameters for an audio session and to monitor events in the session.
Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl( Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl(
IAudioClient* client); IAudioClient* client);
// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by // Creates an ISimpleAudioVolume interface for an existing IAudioClient given by
// |client|. This interface enables a client to control the master volume level // `client`. This interface enables a client to control the master volume level
// of an active audio session. // of an active audio session.
Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume( Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(
IAudioClient* client); IAudioClient* client);
// Fills up the endpoint rendering buffer with silence for an existing // Fills up the endpoint rendering buffer with silence for an existing
// IAudioClient given by |client| and a corresponding IAudioRenderClient // IAudioClient given by `client` and a corresponding IAudioRenderClient
// given by |render_client|. // given by `render_client`.
bool FillRenderEndpointBufferWithSilence(IAudioClient* client, bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client); IAudioRenderClient* render_client);
// Prints/logs all fields of the format structure in |format|. // Prints/logs all fields of the format structure in `format`.
// Also supports extended versions (WAVEFORMATEXTENSIBLE). // Also supports extended versions (WAVEFORMATEXTENSIBLE).
std::string WaveFormatToString(const WaveFormatWrapper format); std::string WaveFormatToString(const WaveFormatWrapper format);
@ -543,8 +543,8 @@ std::string WaveFormatToString(const WaveFormatWrapper format);
// generic webrtc::TimeDelta which then can be converted to any time unit. // generic webrtc::TimeDelta which then can be converted to any time unit.
webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time); webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time);
// Converts size expressed in number of audio frames, |num_frames|, into // Converts size expressed in number of audio frames, `num_frames`, into
// milliseconds given a specified |sample_rate|. // milliseconds given a specified `sample_rate`.
double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate); double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate);
// Converts a COM error into a human-readable string. // Converts a COM error into a human-readable string.

View File

@ -107,7 +107,7 @@ TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) {
TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) { TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) {
// Use default constructor for WAVEFORMATEXTENSIBLE and verify that it // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it
// results in same size as for WAVEFORMATEX even if the size of |format_ex| // results in same size as for WAVEFORMATEX even if the size of `format_ex`
// equals the size of WAVEFORMATEXTENSIBLE. // equals the size of WAVEFORMATEXTENSIBLE.
WAVEFORMATEXTENSIBLE format_ex = {}; WAVEFORMATEXTENSIBLE format_ex = {};
core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex); core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex);
@ -319,7 +319,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) {
EDataFlow data_flow[] = {eRender, eCapture}; EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioSessionManager2 interface for a default audio // Obtain reference to an IAudioSessionManager2 interface for a default audio
// endpoint device specified by two different data flows and the |eConsole| // endpoint device specified by two different data flows and the `eConsole`
// role. // role.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice( ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
@ -339,7 +339,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) {
// Obtain reference to an IAudioSessionEnumerator interface for a default // Obtain reference to an IAudioSessionEnumerator interface for a default
// audio endpoint device specified by two different data flows and the // audio endpoint device specified by two different data flows and the
// |eConsole| role. // `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice( ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole)); AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
@ -364,7 +364,7 @@ TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) {
EDataFlow data_flow[] = {eRender, eCapture}; EDataFlow data_flow[] = {eRender, eCapture};
// Count number of active audio session for a default audio endpoint device // Count number of active audio session for a default audio endpoint device
// specified by two different data flows and the |eConsole| role. // specified by two different data flows and the `eConsole` role.
// Ensure that the number of active audio sessions is less than or equal to // Ensure that the number of active audio sessions is less than or equal to
// the total number of audio sessions on that same device. // the total number of audio sessions on that same device.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
@ -394,7 +394,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient) {
EDataFlow data_flow[] = {eRender, eCapture}; EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient interface for a default audio endpoint // Obtain reference to an IAudioClient interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role. // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client = core_audio_utility::CreateClient( ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
@ -409,7 +409,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient2) {
EDataFlow data_flow[] = {eRender, eCapture}; EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient2 interface for a default audio endpoint // Obtain reference to an IAudioClient2 interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role. // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2( ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
@ -424,7 +424,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient3) {
EDataFlow data_flow[] = {eRender, eCapture}; EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient3 interface for a default audio endpoint // Obtain reference to an IAudioClient3 interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role. // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) { for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3( ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);