Remove usage of INFO alias for LS_INFO in log messages

Bug: webrtc:13362
Change-Id: Ifda893861a036a85c045cd366f9eab33c62ebde0
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/237221
Reviewed-by: Niels Moller <nisse@webrtc.org>
Commit-Queue: Harald Alvestrand <hta@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35310}
This commit is contained in:
Harald Alvestrand
2021-11-04 12:01:23 +00:00
committed by WebRTC LUCI CQ
parent bd9106d88f
commit 97597c0f51
70 changed files with 1140 additions and 1125 deletions

View File

@ -28,19 +28,19 @@ enum AudioDeviceMessageType : uint32_t {
AAudioPlayer::AAudioPlayer(const AudioParameters& audio_parameters)
: main_thread_(rtc::Thread::Current()),
aaudio_(audio_parameters, AAUDIO_DIRECTION_OUTPUT, this) {
RTC_LOG(INFO) << "ctor";
RTC_LOG(LS_INFO) << "ctor";
thread_checker_aaudio_.Detach();
}
AAudioPlayer::~AAudioPlayer() {
RTC_LOG(INFO) << "dtor";
RTC_LOG(LS_INFO) << "dtor";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
Terminate();
RTC_LOG(INFO) << "#detected underruns: " << underrun_count_;
RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_;
}
int AAudioPlayer::Init() {
RTC_LOG(INFO) << "Init";
RTC_LOG(LS_INFO) << "Init";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
if (aaudio_.audio_parameters().channels() == 2) {
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
@ -49,14 +49,14 @@ int AAudioPlayer::Init() {
}
int AAudioPlayer::Terminate() {
RTC_LOG(INFO) << "Terminate";
RTC_LOG(LS_INFO) << "Terminate";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
StopPlayout();
return 0;
}
int AAudioPlayer::InitPlayout() {
RTC_LOG(INFO) << "InitPlayout";
RTC_LOG(LS_INFO) << "InitPlayout";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
RTC_DCHECK(!initialized_);
RTC_DCHECK(!playing_);
@ -73,7 +73,7 @@ bool AAudioPlayer::PlayoutIsInitialized() const {
}
int AAudioPlayer::StartPlayout() {
RTC_LOG(INFO) << "StartPlayout";
RTC_LOG(LS_INFO) << "StartPlayout";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
RTC_DCHECK(!playing_);
if (!initialized_) {
@ -94,7 +94,7 @@ int AAudioPlayer::StartPlayout() {
}
int AAudioPlayer::StopPlayout() {
RTC_LOG(INFO) << "StopPlayout";
RTC_LOG(LS_INFO) << "StopPlayout";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
if (!initialized_ || !playing_) {
return 0;
@ -115,7 +115,7 @@ bool AAudioPlayer::Playing() const {
}
void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
RTC_DLOG(INFO) << "AttachAudioBuffer";
RTC_DLOG(LS_INFO) << "AttachAudioBuffer";
RTC_DCHECK_RUN_ON(&main_thread_checker_);
audio_device_buffer_ = audioBuffer;
const AudioParameters audio_parameters = aaudio_.audio_parameters();
@ -173,9 +173,9 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
// Log device id in first data callback to ensure that a valid device is
// utilized.
if (first_data_callback_) {
RTC_LOG(INFO) << "--- First output data callback: "
"device id="
<< aaudio_.device_id();
RTC_LOG(LS_INFO) << "--- First output data callback: "
"device id="
<< aaudio_.device_id();
first_data_callback_ = false;
}
@ -195,8 +195,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
latency_millis_ = aaudio_.EstimateLatencyMillis();
// TODO(henrika): use for development only.
if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
RTC_DLOG(INFO) << "output latency: " << latency_millis_
<< ", num_frames: " << num_frames;
RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_
<< ", num_frames: " << num_frames;
}
// Read audio data from the WebRTC source using the FineAudioBuffer object
@ -231,7 +231,7 @@ void AAudioPlayer::OnMessage(rtc::Message* msg) {
void AAudioPlayer::HandleStreamDisconnected() {
RTC_DCHECK_RUN_ON(&main_thread_checker_);
RTC_DLOG(INFO) << "HandleStreamDisconnected";
RTC_DLOG(LS_INFO) << "HandleStreamDisconnected";
if (!initialized_ || !playing_) {
return;
}

View File

@ -29,19 +29,19 @@ enum AudioDeviceMessageType : uint32_t {
AAudioRecorder::AAudioRecorder(const AudioParameters& audio_parameters)
: main_thread_(rtc::Thread::Current()),
aaudio_(audio_parameters, AAUDIO_DIRECTION_INPUT, this) {
RTC_LOG(INFO) << "ctor";
RTC_LOG(LS_INFO) << "ctor";
thread_checker_aaudio_.Detach();
}
AAudioRecorder::~AAudioRecorder() {
RTC_LOG(INFO) << "dtor";
RTC_LOG(LS_INFO) << "dtor";
RTC_DCHECK(thread_checker_.IsCurrent());
Terminate();
RTC_LOG(INFO) << "detected owerflows: " << overflow_count_;
RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_;
}
int AAudioRecorder::Init() {
RTC_LOG(INFO) << "Init";
RTC_LOG(LS_INFO) << "Init";
RTC_DCHECK(thread_checker_.IsCurrent());
if (aaudio_.audio_parameters().channels() == 2) {
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
@ -50,14 +50,14 @@ int AAudioRecorder::Init() {
}
int AAudioRecorder::Terminate() {
RTC_LOG(INFO) << "Terminate";
RTC_LOG(LS_INFO) << "Terminate";
RTC_DCHECK(thread_checker_.IsCurrent());
StopRecording();
return 0;
}
int AAudioRecorder::InitRecording() {
RTC_LOG(INFO) << "InitRecording";
RTC_LOG(LS_INFO) << "InitRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
RTC_DCHECK(!initialized_);
RTC_DCHECK(!recording_);
@ -73,7 +73,7 @@ bool AAudioRecorder::RecordingIsInitialized() const {
}
int AAudioRecorder::StartRecording() {
RTC_LOG(INFO) << "StartRecording";
RTC_LOG(LS_INFO) << "StartRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
RTC_DCHECK(initialized_);
RTC_DCHECK(!recording_);
@ -90,7 +90,7 @@ int AAudioRecorder::StartRecording() {
}
int AAudioRecorder::StopRecording() {
RTC_LOG(INFO) << "StopRecording";
RTC_LOG(LS_INFO) << "StopRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
if (!initialized_ || !recording_) {
return 0;
@ -109,7 +109,7 @@ bool AAudioRecorder::Recording() const {
}
void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
RTC_LOG(INFO) << "AttachAudioBuffer";
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
RTC_DCHECK(thread_checker_.IsCurrent());
audio_device_buffer_ = audioBuffer;
const AudioParameters audio_parameters = aaudio_.audio_parameters();
@ -131,13 +131,13 @@ bool AAudioRecorder::IsNoiseSuppressorSupported() const {
}
int AAudioRecorder::EnableBuiltInAEC(bool enable) {
RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable;
RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable;
RTC_LOG(LS_ERROR) << "Not implemented";
return -1;
}
int AAudioRecorder::EnableBuiltInNS(bool enable) {
RTC_LOG(INFO) << "EnableBuiltInNS: " << enable;
RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable;
RTC_LOG(LS_ERROR) << "Not implemented";
return -1;
}
@ -165,14 +165,14 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
int32_t num_frames) {
// TODO(henrika): figure out why we sometimes hit this one.
// RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
// RTC_LOG(INFO) << "OnDataCallback: " << num_frames;
// RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames;
// Drain the input buffer at first callback to ensure that it does not
// contain any old data. Will also ensure that the lowest possible latency
// is obtained.
if (first_data_callback_) {
RTC_LOG(INFO) << "--- First input data callback: "
"device id="
<< aaudio_.device_id();
RTC_LOG(LS_INFO) << "--- First input data callback: "
"device id="
<< aaudio_.device_id();
aaudio_.ClearInputStream(audio_data, num_frames);
first_data_callback_ = false;
}
@ -188,8 +188,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
latency_millis_ = aaudio_.EstimateLatencyMillis();
// TODO(henrika): use for development only.
if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
RTC_DLOG(INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames;
RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames;
}
// Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object.
@ -215,7 +215,7 @@ void AAudioRecorder::OnMessage(rtc::Message* msg) {
void AAudioRecorder::HandleStreamDisconnected() {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_LOG(INFO) << "HandleStreamDisconnected";
RTC_LOG(LS_INFO) << "HandleStreamDisconnected";
if (!initialized_ || !recording_) {
return;
}

View File

@ -137,20 +137,20 @@ AAudioWrapper::AAudioWrapper(const AudioParameters& audio_parameters,
: audio_parameters_(audio_parameters),
direction_(direction),
observer_(observer) {
RTC_LOG(INFO) << "ctor";
RTC_LOG(LS_INFO) << "ctor";
RTC_DCHECK(observer_);
aaudio_thread_checker_.Detach();
RTC_LOG(INFO) << audio_parameters_.ToString();
RTC_LOG(LS_INFO) << audio_parameters_.ToString();
}
AAudioWrapper::~AAudioWrapper() {
RTC_LOG(INFO) << "dtor";
RTC_LOG(LS_INFO) << "dtor";
RTC_DCHECK(thread_checker_.IsCurrent());
RTC_DCHECK(!stream_);
}
bool AAudioWrapper::Init() {
RTC_LOG(INFO) << "Init";
RTC_LOG(LS_INFO) << "Init";
RTC_DCHECK(thread_checker_.IsCurrent());
// Creates a stream builder which can be used to open an audio stream.
ScopedStreamBuilder builder;
@ -174,7 +174,7 @@ bool AAudioWrapper::Init() {
}
bool AAudioWrapper::Start() {
RTC_LOG(INFO) << "Start";
RTC_LOG(LS_INFO) << "Start";
RTC_DCHECK(thread_checker_.IsCurrent());
// TODO(henrika): this state check might not be needed.
aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
@ -190,7 +190,7 @@ bool AAudioWrapper::Start() {
}
bool AAudioWrapper::Stop() {
RTC_LOG(INFO) << "Stop: " << DirectionToString(direction());
RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction());
RTC_DCHECK(thread_checker_.IsCurrent());
// Asynchronous request for the stream to stop.
RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
@ -240,7 +240,7 @@ double AAudioWrapper::EstimateLatencyMillis() const {
// Returns new buffer size or a negative error value if buffer size could not
// be increased.
bool AAudioWrapper::IncreaseOutputBufferSize() {
RTC_LOG(INFO) << "IncreaseBufferSize";
RTC_LOG(LS_INFO) << "IncreaseBufferSize";
RTC_DCHECK(stream_);
RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
@ -255,20 +255,20 @@ bool AAudioWrapper::IncreaseOutputBufferSize() {
<< ") is higher than max: " << max_buffer_size;
return false;
}
RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size
<< " (max=" << max_buffer_size << ")";
RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size
<< " (max=" << max_buffer_size << ")";
buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
if (buffer_size < 0) {
RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
<< AAudio_convertResultToText(buffer_size);
return false;
}
RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size;
RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size;
return true;
}
void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
RTC_LOG(INFO) << "ClearInputStream";
RTC_LOG(LS_INFO) << "ClearInputStream";
RTC_DCHECK(stream_);
RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
@ -357,7 +357,7 @@ int64_t AAudioWrapper::frames_read() const {
}
void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
RTC_LOG(INFO) << "SetStreamConfiguration";
RTC_LOG(LS_INFO) << "SetStreamConfiguration";
RTC_DCHECK(builder);
RTC_DCHECK(thread_checker_.IsCurrent());
// Request usage of default primary output/input device.
@ -390,7 +390,7 @@ void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
}
bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
RTC_LOG(INFO) << "OpenStream";
RTC_LOG(LS_INFO) << "OpenStream";
RTC_DCHECK(builder);
AAudioStream* stream = nullptr;
RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
@ -400,7 +400,7 @@ bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
}
void AAudioWrapper::CloseStream() {
RTC_LOG(INFO) << "CloseStream";
RTC_LOG(LS_INFO) << "CloseStream";
RTC_DCHECK(stream_);
LOG_ON_ERROR(AAudioStream_close(stream_));
stream_ = nullptr;
@ -419,16 +419,16 @@ void AAudioWrapper::LogStreamConfiguration() {
ss << ", direction=" << DirectionToString(direction());
ss << ", device id=" << AAudioStream_getDeviceId(stream_);
ss << ", frames per callback=" << frames_per_callback();
RTC_LOG(INFO) << ss.str();
RTC_LOG(LS_INFO) << ss.str();
}
void AAudioWrapper::LogStreamState() {
RTC_LOG(INFO) << "AAudio stream state: "
<< AAudio_convertStreamStateToText(stream_state());
RTC_LOG(LS_INFO) << "AAudio stream state: "
<< AAudio_convertStreamStateToText(stream_state());
}
bool AAudioWrapper::VerifyStreamConfiguration() {
RTC_LOG(INFO) << "VerifyStreamConfiguration";
RTC_LOG(LS_INFO) << "VerifyStreamConfiguration";
RTC_DCHECK(stream_);
// TODO(henrika): should we verify device ID as well?
if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
@ -466,16 +466,16 @@ bool AAudioWrapper::VerifyStreamConfiguration() {
}
bool AAudioWrapper::OptimizeBuffers() {
RTC_LOG(INFO) << "OptimizeBuffers";
RTC_LOG(LS_INFO) << "OptimizeBuffers";
RTC_DCHECK(stream_);
// Maximum number of frames that can be filled without blocking.
RTC_LOG(INFO) << "max buffer capacity in frames: "
<< buffer_capacity_in_frames();
RTC_LOG(LS_INFO) << "max buffer capacity in frames: "
<< buffer_capacity_in_frames();
// Query the number of frames that the application should read or write at
// one time for optimal performance.
int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
RTC_LOG(INFO) << "frames per burst for optimal performance: "
<< frames_per_burst;
RTC_LOG(LS_INFO) << "frames per burst for optimal performance: "
<< frames_per_burst;
frames_per_burst_ = frames_per_burst;
if (direction() == AAUDIO_DIRECTION_INPUT) {
// There is no point in calling setBufferSizeInFrames() for input streams
@ -492,7 +492,7 @@ bool AAudioWrapper::OptimizeBuffers() {
return false;
}
// Maximum number of frames that can be filled without blocking.
RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size;
RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size;
return true;
}

View File

@ -70,26 +70,26 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
initialized_(false) {
RTC_CHECK(input_);
RTC_CHECK(output_);
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
thread_checker_.Detach();
}
~AndroidAudioDeviceModule() override { RTC_DLOG(INFO) << __FUNCTION__; }
~AndroidAudioDeviceModule() override { RTC_DLOG(LS_INFO) << __FUNCTION__; }
int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer* audioLayer) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*audioLayer = audio_layer_;
return 0;
}
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return audio_device_buffer_->RegisterAudioCallback(audioCallback);
}
int32_t Init() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_DCHECK(thread_checker_.IsCurrent());
audio_device_buffer_ =
std::make_unique<AudioDeviceBuffer>(task_queue_factory_.get());
@ -118,7 +118,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t Terminate() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return 0;
RTC_DCHECK(thread_checker_.IsCurrent());
@ -132,19 +132,19 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
bool Initialized() const override {
RTC_DLOG(INFO) << __FUNCTION__ << ":" << initialized_;
RTC_DLOG(LS_INFO) << __FUNCTION__ << ":" << initialized_;
return initialized_;
}
int16_t PlayoutDevices() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_LOG(INFO) << "output: " << 1;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_LOG(LS_INFO) << "output: " << 1;
return 1;
}
int16_t RecordingDevices() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(INFO) << "output: " << 1;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << "output: " << 1;
return 1;
}
@ -163,7 +163,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
int32_t SetPlayoutDevice(uint16_t index) override {
// OK to use but it has no effect currently since device selection is
// done using Andoid APIs instead.
RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
return 0;
}
@ -175,7 +175,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
int32_t SetRecordingDevice(uint16_t index) override {
// OK to use but it has no effect currently since device selection is
// done using Andoid APIs instead.
RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
return 0;
}
@ -185,66 +185,66 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t PlayoutIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*available = true;
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return 0;
}
int32_t InitPlayout() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (PlayoutIsInitialized()) {
return 0;
}
int32_t result = output_->InitPlayout();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
static_cast<int>(result == 0));
return result;
}
bool PlayoutIsInitialized() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return output_->PlayoutIsInitialized();
}
int32_t RecordingIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*available = true;
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return 0;
}
int32_t InitRecording() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (RecordingIsInitialized()) {
return 0;
}
int32_t result = input_->InitRecording();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
static_cast<int>(result == 0));
return result;
}
bool RecordingIsInitialized() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return input_->RecordingIsInitialized();
}
int32_t StartPlayout() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (Playing()) {
return 0;
}
int32_t result = output_->StartPlayout();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
static_cast<int>(result == 0));
if (result == 0) {
@ -256,34 +256,34 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t StopPlayout() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (!Playing())
return 0;
RTC_LOG(INFO) << __FUNCTION__;
RTC_LOG(LS_INFO) << __FUNCTION__;
audio_device_buffer_->StopPlayout();
int32_t result = output_->StopPlayout();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
static_cast<int>(result == 0));
return result;
}
bool Playing() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return output_->Playing();
}
int32_t StartRecording() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (Recording()) {
return 0;
}
int32_t result = input_->StartRecording();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
static_cast<int>(result == 0));
if (result == 0) {
@ -295,74 +295,74 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t StopRecording() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
if (!Recording())
return 0;
audio_device_buffer_->StopRecording();
int32_t result = input_->StopRecording();
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
static_cast<int>(result == 0));
return result;
}
bool Recording() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return input_->Recording();
}
int32_t InitSpeaker() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return initialized_ ? 0 : -1;
}
bool SpeakerIsInitialized() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return initialized_;
}
int32_t InitMicrophone() override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return initialized_ ? 0 : -1;
}
bool MicrophoneIsInitialized() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return initialized_;
}
int32_t SpeakerVolumeIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
*available = output_->SpeakerVolumeIsAvailable();
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return 0;
}
int32_t SetSpeakerVolume(uint32_t volume) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
return output_->SetSpeakerVolume(volume);
}
int32_t SpeakerVolume(uint32_t* output_volume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
absl::optional<uint32_t> volume = output_->SpeakerVolume();
if (!volume)
return -1;
*output_volume = *volume;
RTC_DLOG(INFO) << "output: " << *volume;
RTC_DLOG(LS_INFO) << "output: " << *volume;
return 0;
}
int32_t MaxSpeakerVolume(uint32_t* output_max_volume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
absl::optional<uint32_t> max_volume = output_->MaxSpeakerVolume();
@ -373,7 +373,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t MinSpeakerVolume(uint32_t* output_min_volume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return -1;
absl::optional<uint32_t> min_volume = output_->MinSpeakerVolume();
@ -384,71 +384,71 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t MicrophoneVolumeIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*available = false;
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return -1;
}
int32_t SetMicrophoneVolume(uint32_t volume) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
RTC_CHECK_NOTREACHED();
}
int32_t MicrophoneVolume(uint32_t* volume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t SpeakerMuteIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t SetSpeakerMute(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_CHECK_NOTREACHED();
}
int32_t SpeakerMute(bool* enabled) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t MicrophoneMuteIsAvailable(bool* available) override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t SetMicrophoneMute(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_CHECK_NOTREACHED();
}
int32_t MicrophoneMute(bool* enabled) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_CHECK_NOTREACHED();
}
int32_t StereoPlayoutIsAvailable(bool* available) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*available = is_stereo_playout_supported_;
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return 0;
}
int32_t SetStereoPlayout(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
// Android does not support changes between mono and stero on the fly. The
// use of stereo or mono is determined by the audio layer. It is allowed
// to call this method if that same state is not modified.
@ -461,21 +461,21 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t StereoPlayout(bool* enabled) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*enabled = is_stereo_playout_supported_;
RTC_DLOG(INFO) << "output: " << *enabled;
RTC_DLOG(LS_INFO) << "output: " << *enabled;
return 0;
}
int32_t StereoRecordingIsAvailable(bool* available) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*available = is_stereo_record_supported_;
RTC_DLOG(INFO) << "output: " << *available;
RTC_DLOG(LS_INFO) << "output: " << *available;
return 0;
}
int32_t SetStereoRecording(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
// Android does not support changes between mono and stero on the fly. The
// use of stereo or mono is determined by the audio layer. It is allowed
// to call this method if that same state is not modified.
@ -488,9 +488,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t StereoRecording(bool* enabled) const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
*enabled = is_stereo_record_supported_;
RTC_DLOG(INFO) << "output: " << *enabled;
RTC_DLOG(LS_INFO) << "output: " << *enabled;
return 0;
}
@ -514,18 +514,18 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
// a "Not Implemented" log will be filed. This non-perfect state will remain
// until I have added full support for audio effects based on OpenSL ES APIs.
bool BuiltInAECIsAvailable() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return false;
bool isAvailable = input_->IsAcousticEchoCancelerSupported();
RTC_DLOG(INFO) << "output: " << isAvailable;
RTC_DLOG(LS_INFO) << "output: " << isAvailable;
return isAvailable;
}
// Not implemented for any input device on Android.
bool BuiltInAGCIsAvailable() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(INFO) << "output: " << false;
RTC_DLOG(LS_INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << "output: " << false;
return false;
}
@ -534,38 +534,38 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
// TODO(henrika): add implementation for OpenSL ES based audio as well.
// In addition, see comments for BuiltInAECIsAvailable().
bool BuiltInNSIsAvailable() const override {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
if (!initialized_)
return false;
bool isAvailable = input_->IsNoiseSuppressorSupported();
RTC_DLOG(INFO) << "output: " << isAvailable;
RTC_DLOG(LS_INFO) << "output: " << isAvailable;
return isAvailable;
}
// TODO(henrika): add implementation for OpenSL ES based audio as well.
int32_t EnableBuiltInAEC(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
if (!initialized_)
return -1;
RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
int32_t result = input_->EnableBuiltInAEC(enable);
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
return result;
}
int32_t EnableBuiltInAGC(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_CHECK_NOTREACHED();
}
// TODO(henrika): add implementation for OpenSL ES based audio as well.
int32_t EnableBuiltInNS(bool enable) override {
RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")";
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
if (!initialized_)
return -1;
RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
int32_t result = input_->EnableBuiltInNS(enable);
RTC_DLOG(INFO) << "output: " << result;
RTC_DLOG(LS_INFO) << "output: " << result;
return result;
}
@ -576,7 +576,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
}
int32_t AttachAudioBuffer() {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
output_->AttachAudioBuffer(audio_device_buffer_.get());
input_->AttachAudioBuffer(audio_device_buffer_.get());
return 0;
@ -640,7 +640,7 @@ rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
uint16_t playout_delay_ms,
std::unique_ptr<AudioInput> audio_input,
std::unique_ptr<AudioOutput> audio_output) {
RTC_DLOG(INFO) << __FUNCTION__;
RTC_DLOG(LS_INFO) << __FUNCTION__;
return rtc::make_ref_counted<AndroidAudioDeviceModule>(
audio_layer, is_stereo_playout_supported, is_stereo_record_supported,
playout_delay_ms, std::move(audio_input), std::move(audio_output));

View File

@ -38,7 +38,7 @@ class ScopedHistogramTimer {
~ScopedHistogramTimer() {
const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
RTC_LOG(INFO) << histogram_name_ << ": " << life_time_ms;
RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
}
private:
@ -68,7 +68,7 @@ AudioRecordJni::AudioRecordJni(JNIEnv* env,
initialized_(false),
recording_(false),
audio_device_buffer_(nullptr) {
RTC_LOG(INFO) << "ctor";
RTC_LOG(LS_INFO) << "ctor";
RTC_DCHECK(audio_parameters_.is_valid());
Java_WebRtcAudioRecord_setNativeAudioRecord(env, j_audio_record_,
jni::jlongFromPointer(this));
@ -79,20 +79,20 @@ AudioRecordJni::AudioRecordJni(JNIEnv* env,
}
AudioRecordJni::~AudioRecordJni() {
RTC_LOG(INFO) << "dtor";
RTC_LOG(LS_INFO) << "dtor";
RTC_DCHECK(thread_checker_.IsCurrent());
Terminate();
}
int32_t AudioRecordJni::Init() {
RTC_LOG(INFO) << "Init";
RTC_LOG(LS_INFO) << "Init";
env_ = AttachCurrentThreadIfNeeded();
RTC_DCHECK(thread_checker_.IsCurrent());
return 0;
}
int32_t AudioRecordJni::Terminate() {
RTC_LOG(INFO) << "Terminate";
RTC_LOG(LS_INFO) << "Terminate";
RTC_DCHECK(thread_checker_.IsCurrent());
StopRecording();
thread_checker_.Detach();
@ -100,7 +100,7 @@ int32_t AudioRecordJni::Terminate() {
}
int32_t AudioRecordJni::InitRecording() {
RTC_LOG(INFO) << "InitRecording";
RTC_LOG(LS_INFO) << "InitRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
if (initialized_) {
// Already initialized.
@ -118,7 +118,7 @@ int32_t AudioRecordJni::InitRecording() {
return -1;
}
frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_;
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
frames_per_buffer_ * bytes_per_frame);
@ -132,7 +132,7 @@ bool AudioRecordJni::RecordingIsInitialized() const {
}
int32_t AudioRecordJni::StartRecording() {
RTC_LOG(INFO) << "StartRecording";
RTC_LOG(LS_INFO) << "StartRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
if (recording_) {
// Already recording.
@ -153,7 +153,7 @@ int32_t AudioRecordJni::StartRecording() {
}
int32_t AudioRecordJni::StopRecording() {
RTC_LOG(INFO) << "StopRecording";
RTC_LOG(LS_INFO) << "StopRecording";
RTC_DCHECK(thread_checker_.IsCurrent());
if (!initialized_ || !recording_) {
return 0;
@ -166,8 +166,9 @@ int32_t AudioRecordJni::StopRecording() {
env_, j_audio_record_);
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.SourceMatchesRecordingSession",
session_was_ok);
RTC_LOG(INFO) << "HISTOGRAM(WebRTC.Audio.SourceMatchesRecordingSession): "
<< session_was_ok;
RTC_LOG(LS_INFO)
<< "HISTOGRAM(WebRTC.Audio.SourceMatchesRecordingSession): "
<< session_was_ok;
}
if (!Java_WebRtcAudioRecord_stopRecording(env_, j_audio_record_)) {
RTC_LOG(LS_ERROR) << "StopRecording failed";
@ -188,14 +189,14 @@ bool AudioRecordJni::Recording() const {
}
void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
RTC_LOG(INFO) << "AttachAudioBuffer";
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
RTC_DCHECK(thread_checker_.IsCurrent());
audio_device_buffer_ = audioBuffer;
const int sample_rate_hz = audio_parameters_.sample_rate();
RTC_LOG(INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
const size_t channels = audio_parameters_.channels();
RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")";
RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
audio_device_buffer_->SetRecordingChannels(channels);
}
@ -212,7 +213,7 @@ bool AudioRecordJni::IsNoiseSuppressorSupported() const {
}
int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
RTC_LOG(INFO) << "EnableBuiltInAEC(" << enable << ")";
RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
RTC_DCHECK(thread_checker_.IsCurrent());
return Java_WebRtcAudioRecord_enableBuiltInAEC(env_, j_audio_record_, enable)
? 0
@ -220,7 +221,7 @@ int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
}
int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
RTC_LOG(INFO) << "EnableBuiltInNS(" << enable << ")";
RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
RTC_DCHECK(thread_checker_.IsCurrent());
return Java_WebRtcAudioRecord_enableBuiltInNS(env_, j_audio_record_, enable)
? 0
@ -231,12 +232,12 @@ void AudioRecordJni::CacheDirectBufferAddress(
JNIEnv* env,
const JavaParamRef<jobject>& j_caller,
const JavaParamRef<jobject>& byte_buffer) {
RTC_LOG(INFO) << "OnCacheDirectBufferAddress";
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
RTC_DCHECK(thread_checker_.IsCurrent());
RTC_DCHECK(!direct_buffer_address_);
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer.obj());
jlong capacity = env->GetDirectBufferCapacity(byte_buffer.obj());
RTC_LOG(INFO) << "direct buffer capacity: " << capacity;
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
}
@ -257,7 +258,7 @@ void AudioRecordJni::DataIsRecorded(JNIEnv* env,
// of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_ms_, 0);
if (audio_device_buffer_->DeliverRecordedData() == -1) {
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
}
}

View File

@ -44,7 +44,7 @@ AudioTrackJni::AudioTrackJni(JNIEnv* env,
initialized_(false),
playing_(false),
audio_device_buffer_(nullptr) {
RTC_LOG(INFO) << "ctor";
RTC_LOG(LS_INFO) << "ctor";
RTC_DCHECK(audio_parameters_.is_valid());
Java_WebRtcAudioTrack_setNativeAudioTrack(env, j_audio_track_,
jni::jlongFromPointer(this));
@ -55,20 +55,20 @@ AudioTrackJni::AudioTrackJni(JNIEnv* env,
}
AudioTrackJni::~AudioTrackJni() {
RTC_LOG(INFO) << "dtor";
RTC_LOG(LS_INFO) << "dtor";
RTC_DCHECK(thread_checker_.IsCurrent());
Terminate();
}
int32_t AudioTrackJni::Init() {
RTC_LOG(INFO) << "Init";
RTC_LOG(LS_INFO) << "Init";
env_ = AttachCurrentThreadIfNeeded();
RTC_DCHECK(thread_checker_.IsCurrent());
return 0;
}
int32_t AudioTrackJni::Terminate() {
RTC_LOG(INFO) << "Terminate";
RTC_LOG(LS_INFO) << "Terminate";
RTC_DCHECK(thread_checker_.IsCurrent());
StopPlayout();
thread_checker_.Detach();
@ -76,7 +76,7 @@ int32_t AudioTrackJni::Terminate() {
}
int32_t AudioTrackJni::InitPlayout() {
RTC_LOG(INFO) << "InitPlayout";
RTC_LOG(LS_INFO) << "InitPlayout";
RTC_DCHECK(thread_checker_.IsCurrent());
if (initialized_) {
// Already initialized.
@ -126,7 +126,7 @@ bool AudioTrackJni::PlayoutIsInitialized() const {
}
int32_t AudioTrackJni::StartPlayout() {
RTC_LOG(INFO) << "StartPlayout";
RTC_LOG(LS_INFO) << "StartPlayout";
RTC_DCHECK(thread_checker_.IsCurrent());
if (playing_) {
// Already playing.
@ -146,7 +146,7 @@ int32_t AudioTrackJni::StartPlayout() {
}
int32_t AudioTrackJni::StopPlayout() {
RTC_LOG(INFO) << "StopPlayout";
RTC_LOG(LS_INFO) << "StopPlayout";
RTC_DCHECK(thread_checker_.IsCurrent());
if (!initialized_ || !playing_) {
return 0;
@ -185,7 +185,7 @@ bool AudioTrackJni::SpeakerVolumeIsAvailable() {
}
int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
RTC_LOG(INFO) << "SetSpeakerVolume(" << volume << ")";
RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")";
RTC_DCHECK(thread_checker_.IsCurrent());
return Java_WebRtcAudioTrack_setStreamVolume(env_, j_audio_track_,
static_cast<int>(volume))
@ -207,7 +207,7 @@ absl::optional<uint32_t> AudioTrackJni::SpeakerVolume() const {
RTC_DCHECK(thread_checker_.IsCurrent());
const uint32_t volume =
Java_WebRtcAudioTrack_getStreamVolume(env_, j_audio_track_);
RTC_LOG(INFO) << "SpeakerVolume: " << volume;
RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume;
return volume;
}
@ -217,30 +217,30 @@ int AudioTrackJni::GetPlayoutUnderrunCount() {
// TODO(henrika): possibly add stereo support.
void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
RTC_LOG(INFO) << "AttachAudioBuffer";
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
RTC_DCHECK(thread_checker_.IsCurrent());
audio_device_buffer_ = audioBuffer;
const int sample_rate_hz = audio_parameters_.sample_rate();
RTC_LOG(INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
const size_t channels = audio_parameters_.channels();
RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")";
RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
audio_device_buffer_->SetPlayoutChannels(channels);
}
void AudioTrackJni::CacheDirectBufferAddress(
JNIEnv* env,
const JavaParamRef<jobject>& byte_buffer) {
RTC_LOG(INFO) << "OnCacheDirectBufferAddress";
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
RTC_DCHECK(thread_checker_.IsCurrent());
RTC_DCHECK(!direct_buffer_address_);
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer.obj());
jlong capacity = env->GetDirectBufferCapacity(byte_buffer.obj());
RTC_LOG(INFO) << "direct buffer capacity: " << capacity;
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame;
RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_;
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
}
// This method is called on a high-priority thread from Java. The name of

View File

@ -107,7 +107,7 @@ OpenSLEngineManager::OpenSLEngineManager() {
}
SLObjectItf OpenSLEngineManager::GetOpenSLEngine() {
RTC_LOG(INFO) << "GetOpenSLEngine";
RTC_LOG(LS_INFO) << "GetOpenSLEngine";
RTC_DCHECK(thread_checker_.IsCurrent());
// OpenSL ES for Android only supports a single engine per application.
// If one already has been created, return existing object instead of