Stop using LOG macros in favor of RTC_ prefixed macros.

This CL has been generated with the following script:

for m in PLOG \
  LOG_TAG \
  LOG_GLEM \
  LOG_GLE_EX \
  LOG_GLE \
  LAST_SYSTEM_ERROR \
  LOG_ERRNO_EX \
  LOG_ERRNO \
  LOG_ERR_EX \
  LOG_ERR \
  LOG_V \
  LOG_F \
  LOG_T_F \
  LOG_E \
  LOG_T \
  LOG_CHECK_LEVEL_V \
  LOG_CHECK_LEVEL \
  LOG
do
  git grep -l $m | xargs sed -i "s,\b$m\b,RTC_$m,g"
done
git checkout rtc_base/logging.h
git cl format

Bug: webrtc:8452
Change-Id: I1a53ef3e0a5ef6e244e62b2e012b864914784600
Reviewed-on: https://webrtc-review.googlesource.com/21325
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20617}
This commit is contained in:
Mirko Bonadei
2017-11-09 11:09:25 +01:00
committed by Commit Bot
parent 34fa309129
commit 675513b96a
407 changed files with 5753 additions and 5371 deletions

View File

@ -74,7 +74,7 @@ H264Encoder* H264Encoder::Create(const cricket::VideoCodec& codec) {
RTC_DCHECK(H264Encoder::IsSupported());
#if defined(WEBRTC_USE_H264)
RTC_CHECK(g_rtc_use_h264);
LOG(LS_INFO) << "Creating H264EncoderImpl.";
RTC_LOG(LS_INFO) << "Creating H264EncoderImpl.";
return new H264EncoderImpl(codec);
#else
RTC_NOTREACHED();
@ -90,7 +90,7 @@ H264Decoder* H264Decoder::Create() {
RTC_DCHECK(H264Decoder::IsSupported());
#if defined(WEBRTC_USE_H264)
RTC_CHECK(g_rtc_use_h264);
LOG(LS_INFO) << "Creating H264DecoderImpl.";
RTC_LOG(LS_INFO) << "Creating H264DecoderImpl.";
return new H264DecoderImpl();
#else
RTC_NOTREACHED();

View File

@ -119,7 +119,7 @@ int H264DecoderImpl::AVGetBuffer2(
int ret = av_image_check_size(static_cast<unsigned int>(width),
static_cast<unsigned int>(height), 0, nullptr);
if (ret < 0) {
LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
decoder->ReportError();
return ret;
}
@ -244,14 +244,14 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
if (!codec) {
// This is an indication that FFmpeg has not been initialized or it has not
// been compiled/initialized with the correct set of codecs.
LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
int res = avcodec_open2(av_context_.get(), codec, nullptr);
if (res < 0) {
LOG(LS_ERROR) << "avcodec_open2 error: " << res;
RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
@ -283,8 +283,9 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (!decoded_image_callback_) {
LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
"has not been set with RegisterDecodeCompleteCallback()";
RTC_LOG(LS_WARNING)
<< "InitDecode() has been called, but a callback function "
"has not been set with RegisterDecodeCompleteCallback()";
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@ -323,14 +324,14 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
int result = avcodec_send_packet(av_context_.get(), &packet);
if (result < 0) {
LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
if (result < 0) {
LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}

View File

@ -111,9 +111,10 @@ static void RtpFragmentize(EncodedImage* encoded_image,
VideoType::kI420, frame_buffer.width(), frame_buffer.height());
if (encoded_image->_size < required_size) {
// Encoded data > unencoded data. Allocate required bytes.
LOG(LS_WARNING) << "Encoding produced more bytes than the original image "
<< "data! Original bytes: " << encoded_image->_size
<< ", encoded bytes: " << required_size << ".";
RTC_LOG(LS_WARNING)
<< "Encoding produced more bytes than the original image "
<< "data! Original bytes: " << encoded_image->_size
<< ", encoded bytes: " << required_size << ".";
encoded_image->_size = required_size;
}
encoded_image->_buffer = new uint8_t[encoded_image->_size];
@ -209,7 +210,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
// Create encoder.
if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
// Failed to create encoder.
LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
RTC_DCHECK(!openh264_encoder_);
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
@ -243,7 +244,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
// Initialize.
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
@ -309,8 +310,9 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (!encoded_image_callback_) {
LOG(LS_WARNING) << "InitEncode() has been called, but a callback function "
<< "has not been set with RegisterEncodeCompleteCallback()";
RTC_LOG(LS_WARNING)
<< "InitEncode() has been called, but a callback function "
<< "has not been set with RegisterEncodeCompleteCallback()";
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@ -355,8 +357,8 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
// Encode!
int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info);
if (enc_ret != 0) {
LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
<< enc_ret << ".";
RTC_LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
<< enc_ret << ".";
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
@ -449,8 +451,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams() const {
encoder_params.iTargetBitrate;
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
encoder_params.iMaxBitrate;
LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
<< OPENH264_MINOR;
RTC_LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
<< OPENH264_MINOR;
switch (packetization_mode_) {
case H264PacketizationMode::SingleNalUnit:
// Limit the size of the packets produced.

View File

@ -66,7 +66,7 @@ int StereoEncoderAdapter::InitEncode(const VideoCodec* inst,
factory_->CreateVideoEncoder(format);
const int rv = encoder->InitEncode(inst, number_of_cores, max_payload_size);
if (rv) {
LOG(LS_ERROR) << "Failed to create stere codec index " << i;
RTC_LOG(LS_ERROR) << "Failed to create stere codec index " << i;
return rv;
}
adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(

View File

@ -494,15 +494,16 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
if (pattern_idx_ == temporal_ids_.size()) {
// All non key-frame buffers should be updated each pattern cycle.
if (!last_.is_keyframe && !last_.is_updated_this_cycle) {
LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
RTC_LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
return false;
}
if (!arf_.is_keyframe && !arf_.is_updated_this_cycle) {
LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
RTC_LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
return false;
}
if (!golden_.is_keyframe && !golden_.is_updated_this_cycle) {
LOG(LS_ERROR) << "Golden buffer was not updated during pattern cycle.";
RTC_LOG(LS_ERROR)
<< "Golden buffer was not updated during pattern cycle.";
return false;
}
last_.is_updated_this_cycle = false;
@ -512,9 +513,9 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
}
uint8_t expected_tl_idx = temporal_ids_[pattern_idx_];
if (frame_config.packetizer_temporal_idx != expected_tl_idx) {
LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
<< static_cast<int>(expected_tl_idx) << " Actual: "
<< static_cast<int>(frame_config.packetizer_temporal_idx);
RTC_LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
<< static_cast<int>(expected_tl_idx) << " Actual: "
<< static_cast<int>(frame_config.packetizer_temporal_idx);
return false;
}
@ -555,8 +556,8 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
}
if (need_sync != frame_config.layer_sync) {
LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
<< need_sync << " Actual: " << frame_config.layer_sync;
RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
<< need_sync << " Actual: " << frame_config.layer_sync;
return false;
}
@ -565,10 +566,11 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
for (i = 0; i < dependencies.size(); ++i) {
if (temporal_dependencies_[pattern_idx_].find(dependencies[i]) ==
temporal_dependencies_[pattern_idx_].end()) {
LOG(LS_ERROR) << "Illegal temporal dependency out of defined pattern "
"from position "
<< static_cast<int>(pattern_idx_) << " to position "
<< static_cast<int>(dependencies[i]);
RTC_LOG(LS_ERROR)
<< "Illegal temporal dependency out of defined pattern "
"from position "
<< static_cast<int>(pattern_idx_) << " to position "
<< static_cast<int>(dependencies[i]);
return false;
}
}

View File

@ -48,7 +48,7 @@ bool TemporalLayersChecker::CheckAndUpdateBufferState(
}
if (!frame_is_keyframe && !state->is_keyframe &&
state->temporal_layer > temporal_layer) {
LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
RTC_LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
return false;
}
}
@ -72,9 +72,9 @@ bool TemporalLayersChecker::CheckTemporalConfig(
if (frame_config.packetizer_temporal_idx >= num_temporal_layers_ ||
(frame_config.packetizer_temporal_idx == kNoTemporalIdx &&
num_temporal_layers_ > 1)) {
LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
<< frame_config.packetizer_temporal_idx
<< " num_temporal_layers: " << num_temporal_layers_;
RTC_LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
<< frame_config.packetizer_temporal_idx
<< " num_temporal_layers: " << num_temporal_layers_;
return false;
}
@ -86,7 +86,7 @@ bool TemporalLayersChecker::CheckTemporalConfig(
&last_, &need_sync, frame_is_keyframe,
frame_config.packetizer_temporal_idx, frame_config.last_buffer_flags,
sequence_number_, &lowest_sequence_referenced)) {
LOG(LS_ERROR) << "Error in the Last buffer";
RTC_LOG(LS_ERROR) << "Error in the Last buffer";
return false;
}
if (!CheckAndUpdateBufferState(&golden_, &need_sync, frame_is_keyframe,
@ -94,22 +94,22 @@ bool TemporalLayersChecker::CheckTemporalConfig(
frame_config.golden_buffer_flags,
sequence_number_,
&lowest_sequence_referenced)) {
LOG(LS_ERROR) << "Error in the Golden buffer";
RTC_LOG(LS_ERROR) << "Error in the Golden buffer";
return false;
}
if (!CheckAndUpdateBufferState(
&arf_, &need_sync, frame_is_keyframe,
frame_config.packetizer_temporal_idx, frame_config.arf_buffer_flags,
sequence_number_, &lowest_sequence_referenced)) {
LOG(LS_ERROR) << "Error in the Arf buffer";
RTC_LOG(LS_ERROR) << "Error in the Arf buffer";
return false;
}
if (lowest_sequence_referenced < last_sync_sequence_number_ &&
!frame_is_keyframe) {
LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
<< lowest_sequence_referenced << ", but sync was at "
<< last_sync_sequence_number_;
RTC_LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
<< lowest_sequence_referenced << ", but sync was at "
<< last_sync_sequence_number_;
return false;
}
@ -126,8 +126,8 @@ bool TemporalLayersChecker::CheckTemporalConfig(
}
if (need_sync != frame_config.layer_sync) {
LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
<< need_sync << " Actual: " << frame_config.layer_sync;
RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
<< need_sync << " Actual: " << frame_config.layer_sync;
return false;
}
return true;

View File

@ -70,7 +70,7 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
available_buffer = new rtc::RefCountedObject<Vp9FrameBuffer>();
allocated_buffers_.push_back(available_buffer);
if (allocated_buffers_.size() > max_num_buffers_) {
LOG(LS_WARNING)
RTC_LOG(LS_WARNING)
<< allocated_buffers_.size() << " Vp9FrameBuffers have been "
<< "allocated by a Vp9FrameBufferPool (exceeding what is "
<< "considered reasonable, " << max_num_buffers_ << ").";

View File

@ -128,8 +128,8 @@ bool VP9EncoderImpl::SetSvcRates() {
if (ExplicitlyConfiguredSpatialLayers()) {
if (num_temporal_layers_ > 1) {
LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
"spatial layers not implemented yet!";
RTC_LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
"spatial layers not implemented yet!";
return false;
}
int total_bitrate_bps = 0;
@ -150,7 +150,7 @@ bool VP9EncoderImpl::SetSvcRates() {
for (i = 0; i < num_spatial_layers_; ++i) {
if (svc_params_.scaling_factor_num[i] <= 0 ||
svc_params_.scaling_factor_den[i] <= 0) {
LOG(LS_ERROR) << "Scaling factors not specified!";
RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
return false;
}
rate_ratio[i] =
@ -178,8 +178,8 @@ bool VP9EncoderImpl::SetSvcRates() {
config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
config_->ss_target_bitrate[i];
} else {
LOG(LS_ERROR) << "Unsupported number of temporal layers: "
<< num_temporal_layers_;
RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
<< num_temporal_layers_;
return false;
}
}
@ -861,8 +861,8 @@ VP9DecoderImpl::~VP9DecoderImpl() {
// The frame buffers are reference counted and frames are exposed after
// decoding. There may be valid usage cases where previous frames are still
// referenced after ~VP9DecoderImpl that is not a leak.
LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still "
<< "referenced during ~VP9DecoderImpl.";
RTC_LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still "
<< "referenced during ~VP9DecoderImpl.";
}
}