Fixes logging levels in WebRtcAudioXXX.java classes

BUG=NONE
R=magjed@webrtc.org

Review URL: https://codereview.webrtc.org/1363673005 .

Cr-Commit-Position: refs/heads/master@{#10082}
This commit is contained in:
henrika
2015-09-28 09:23:18 +02:00
parent d6d27e7340
commit 69984f0533
2 changed files with 29 additions and 37 deletions

View File

@ -70,7 +70,7 @@ class WebRtcAudioRecord {
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logging.w(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
assertTrue(audioRecord.getRecordingState()
== AudioRecord.RECORDSTATE_RECORDING);
@ -90,7 +90,7 @@ class WebRtcAudioRecord {
long durationInMs =
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
lastTime = nowTime;
Logging.w(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
}
}
@ -114,7 +114,7 @@ class WebRtcAudioRecord {
}
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
Logging.w(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioRecord = nativeAudioRecord;
if (DEBUG) {
@ -124,7 +124,7 @@ class WebRtcAudioRecord {
}
private boolean enableBuiltInAEC(boolean enable) {
Logging.w(TAG, "enableBuiltInAEC(" + enable + ')');
Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in AEC is not supported on this platform");
return false;
@ -133,7 +133,7 @@ class WebRtcAudioRecord {
}
private boolean enableBuiltInAGC(boolean enable) {
Logging.w(TAG, "enableBuiltInAGC(" + enable + ')');
Logging.d(TAG, "enableBuiltInAGC(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in AGC is not supported on this platform");
return false;
@ -142,7 +142,7 @@ class WebRtcAudioRecord {
}
private boolean enableBuiltInNS(boolean enable) {
Logging.w(TAG, "enableBuiltInNS(" + enable + ')');
Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in NS is not supported on this platform");
return false;
@ -151,7 +151,7 @@ class WebRtcAudioRecord {
}
private int initRecording(int sampleRate, int channels) {
Logging.w(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" +
Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
if (!WebRtcAudioUtils.hasPermission(
context, android.Manifest.permission.RECORD_AUDIO)) {
@ -165,7 +165,7 @@ class WebRtcAudioRecord {
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
Logging.w(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
@ -183,14 +183,14 @@ class WebRtcAudioRecord {
Logging.e(TAG, "AudioRecord.getMinBufferSize failed: " + minBufferSize);
return -1;
}
Logging.w(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
// Use a larger buffer size than the minimum required when creating the
// AudioRecord instance to ensure smooth recording under load. It has been
// verified that it does not increase the actual recording latency.
int bufferSizeInBytes =
Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
Logging.w(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
@ -206,7 +206,7 @@ class WebRtcAudioRecord {
Logging.e(TAG,"Failed to create a new AudioRecord instance");
return -1;
}
Logging.w(TAG, "AudioRecord "
Logging.d(TAG, "AudioRecord "
+ "session ID: " + audioRecord.getAudioSessionId() + ", "
+ "audio format: " + audioRecord.getAudioFormat() + ", "
+ "channels: " + audioRecord.getChannelCount() + ", "
@ -227,7 +227,7 @@ class WebRtcAudioRecord {
}
private boolean startRecording() {
Logging.w(TAG, "startRecording");
Logging.d(TAG, "startRecording");
assertTrue(audioRecord != null);
assertTrue(audioThread == null);
try {
@ -246,7 +246,7 @@ class WebRtcAudioRecord {
}
private boolean stopRecording() {
Logging.w(TAG, "stopRecording");
Logging.d(TAG, "stopRecording");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;

View File

@ -61,7 +61,7 @@ class WebRtcAudioTrack {
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logd("AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
try {
// In MODE_STREAM mode we can optionally prime the output buffer by
@ -71,7 +71,7 @@ class WebRtcAudioTrack {
audioTrack.play();
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
} catch (IllegalStateException e) {
Loge("AudioTrack.play failed: " + e.getMessage());
Logging.e(TAG, "AudioTrack.play failed: " + e.getMessage());
return;
}
@ -99,7 +99,7 @@ class WebRtcAudioTrack {
sizeInBytes);
}
if (bytesWritten != sizeInBytes) {
Loge("AudioTrack.write failed: " + bytesWritten);
Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten);
if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) {
keepAlive = false;
}
@ -117,7 +117,7 @@ class WebRtcAudioTrack {
try {
audioTrack.stop();
} catch (IllegalStateException e) {
Loge("AudioTrack.stop failed: " + e.getMessage());
Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
}
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
audioTrack.flush();
@ -136,7 +136,7 @@ class WebRtcAudioTrack {
}
WebRtcAudioTrack(Context context, long nativeAudioTrack) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
@ -147,12 +147,12 @@ class WebRtcAudioTrack {
}
private void initPlayout(int sampleRate, int channels) {
Logd("initPlayout(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels="
+ channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
byteBuffer = byteBuffer.allocateDirect(
bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
@ -166,7 +166,7 @@ class WebRtcAudioTrack {
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
Logd("AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
assertTrue(audioTrack == null);
// For the streaming mode, data must be written to the audio sink in
@ -184,7 +184,7 @@ class WebRtcAudioTrack {
minBufferSizeInBytes,
AudioTrack.MODE_STREAM);
} catch (IllegalArgumentException e) {
Logd(e.getMessage());
Logging.d(TAG, e.getMessage());
return;
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
@ -193,7 +193,7 @@ class WebRtcAudioTrack {
}
private boolean startPlayout() {
Logd("startPlayout");
Logging.d(TAG, "startPlayout");
assertTrue(audioTrack != null);
assertTrue(audioThread == null);
audioThread = new AudioTrackThread("AudioTrackJavaThread");
@ -202,7 +202,7 @@ class WebRtcAudioTrack {
}
private boolean stopPlayout() {
Logd("stopPlayout");
Logging.d(TAG, "stopPlayout");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;
@ -215,18 +215,18 @@ class WebRtcAudioTrack {
/** Get max possible volume index for a phone call audio stream. */
private int getStreamMaxVolume() {
Logd("getStreamMaxVolume");
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Set current volume level for a phone call audio stream. */
private boolean setStreamVolume(int volume) {
Logd("setStreamVolume(" + volume + ")");
Logging.d(TAG, "setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
if (audioManager.isVolumeFixed()) {
Loge("The device implements a fixed volume policy.");
Logging.e(TAG, "The device implements a fixed volume policy.");
return false;
}
}
@ -236,7 +236,7 @@ class WebRtcAudioTrack {
/** Get current volume level for a phone call audio stream. */
private int getStreamVolume() {
Logd("getStreamVolume");
Logging.d(TAG, "getStreamVolume");
assertTrue(audioManager != null);
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}
@ -248,14 +248,6 @@ class WebRtcAudioTrack {
}
}
private static void Logd(String msg) {
Logging.d(TAG, msg);
}
private static void Loge(String msg) {
Logging.e(TAG, msg);
}
private native void nativeCacheDirectBufferAddress(
ByteBuffer byteBuffer, long nativeAudioRecord);