Concatenate string literals at compile time.

This CL was generated by running:
git ls-files | grep ".cc" | xargs perl -i -ne 'BEGIN {undef $/}; s/("[\s\n]*<<[\s\n]*")/" "/g; print;'; git cl format

After that I manually edited modules/audio_processing/gain_controller2.cc to preserve its original
formatting.

This primary benefit of this change is a small reduction in binary size.

Bug: None
Change-Id: I689fa7ba9c717c314bb167e5d592c3c4e0871e29
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/165961
Reviewed-by: Alessio Bazzica <alessiob@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Jonas Olsson <jonasolsson@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30251}
This commit is contained in:
Jonas Olsson
2020-01-14 12:11:31 +01:00
committed by Commit Bot
parent 6153e15d31
commit b2b2031457
98 changed files with 544 additions and 353 deletions

View File

@ -378,7 +378,7 @@ int32_t H264EncoderImpl::Encode(
if (!encoded_image_callback_) {
RTC_LOG(LS_WARNING)
<< "InitEncode() has been called, but a callback function "
<< "has not been set with RegisterEncodeCompleteCallback()";
"has not been set with RegisterEncodeCompleteCallback()";
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}

View File

@ -72,9 +72,11 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
allocated_buffers_.push_back(available_buffer);
if (allocated_buffers_.size() > max_num_buffers_) {
RTC_LOG(LS_WARNING)
<< allocated_buffers_.size() << " Vp9FrameBuffers have been "
<< "allocated by a Vp9FrameBufferPool (exceeding what is "
<< "considered reasonable, " << max_num_buffers_ << ").";
<< allocated_buffers_.size()
<< " Vp9FrameBuffers have been "
"allocated by a Vp9FrameBufferPool (exceeding what is "
"considered reasonable, "
<< max_num_buffers_ << ").";
// TODO(phoglund): this limit is being hit in tests since Oct 5 2016.
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=6484.

View File

@ -1032,7 +1032,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
if (rv != VPX_CODEC_OK) {
RTC_LOG(LS_ERROR) << "Encoding error: " << vpx_codec_err_to_string(rv)
<< "\n"
<< "Details: " << vpx_codec_error(encoder_) << "\n"
"Details: "
<< vpx_codec_error(encoder_) << "\n"
<< vpx_codec_error_detail(encoder_);
return WEBRTC_VIDEO_CODEC_ERROR;
}
@ -1608,8 +1609,9 @@ VP9DecoderImpl::~VP9DecoderImpl() {
// The frame buffers are reference counted and frames are exposed after
// decoding. There may be valid usage cases where previous frames are still
// referenced after ~VP9DecoderImpl that is not a leak.
RTC_LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still "
<< "referenced during ~VP9DecoderImpl.";
RTC_LOG(LS_INFO) << num_buffers_in_use
<< " Vp9FrameBuffers are still "
"referenced during ~VP9DecoderImpl.";
}
}

View File

@ -297,7 +297,7 @@ bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
if (is_flexible_mode && frame->PictureId() == kNoPictureId) {
RTC_LOG(LS_WARNING) << "Frame is marked as using flexible mode but no"
<< "picture id is set.";
"picture id is set.";
return false;
}
return is_flexible_mode;

View File

@ -350,7 +350,8 @@ bool FrameBuffer::HasBadRenderTiming(const EncodedFrame& frame,
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
RTC_LOG(LS_WARNING)
<< "A frame about to be decoded is out of the configured "
<< "delay bounds (" << frame_delay << " > " << kMaxVideoDelayMs
"delay bounds ("
<< frame_delay << " > " << kMaxVideoDelayMs
<< "). Resetting the video jitter buffer.";
return true;
}
@ -482,14 +483,14 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
<< id.picture_id << ":"
<< static_cast<int>(id.spatial_layer)
<< ") but buffer is full, clearing"
<< " buffer and inserting the frame.";
" buffer and inserting the frame.";
ClearFramesAndHistory();
} else {
RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
<< id.picture_id << ":"
<< static_cast<int>(id.spatial_layer)
<< ") could not be inserted due to the frame "
<< "buffer being full, dropping frame.";
"buffer being full, dropping frame.";
return last_continuous_picture_id;
}
}
@ -662,7 +663,7 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
<< "Frame with (picture_id:spatial_id) (" << id.picture_id << ":"
<< static_cast<int>(id.spatial_layer)
<< ") depends on a non-decoded frame more previous than"
<< " the last decoded frame, dropping frame.";
" the last decoded frame, dropping frame.";
last_log_non_decoded_ms_ = now_ms;
}
return false;

View File

@ -357,8 +357,8 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
RTC_LOG(LS_WARNING)
<< "Received H.264-IDR frame "
<< "(SPS: " << has_h264_sps << ", PPS: " << has_h264_pps
<< "). Treating as "
"(SPS: "
<< has_h264_sps << ", PPS: " << has_h264_pps << "). Treating as "
<< (sps_pps_idr_is_h264_keyframe_ ? "delta" : "key")
<< " frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
<< (sps_pps_idr_is_h264_keyframe_ ? "enabled." : "disabled");

View File

@ -114,7 +114,8 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
RTC_LOG(LS_WARNING)
<< "A frame about to be decoded is out of the configured "
<< "delay bounds (" << frame_delay << " > " << max_video_delay_ms_
"delay bounds ("
<< frame_delay << " > " << max_video_delay_ms_
<< "). Resetting the video jitter buffer.";
timing_error = true;
} else if (static_cast<int>(timing_->TargetVideoDelay()) >

View File

@ -384,7 +384,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
<< " and packet range [" << frame->first_seq_num()
<< ", " << frame->last_seq_num()
<< "] already received, "
<< " dropping frame.";
" dropping frame.";
return kDrop;
}
@ -585,8 +585,9 @@ bool RtpFrameReferenceFinder::MissingRequiredFrameVp9(uint16_t picture_id,
size_t temporal_idx = info.gof->temporal_idx[gof_idx];
if (temporal_idx >= kMaxTemporalLayers) {
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers << " temporal "
<< "layers are supported.";
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
<< " temporal "
"layers are supported.";
return true;
}
@ -628,8 +629,9 @@ void RtpFrameReferenceFinder::FrameReceivedVp9(uint16_t picture_id,
size_t temporal_idx = info->gof->temporal_idx[gof_idx];
if (temporal_idx >= kMaxTemporalLayers) {
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers << " temporal "
<< "layers are supported.";
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
<< " temporal "
"layers are supported.";
return;
}
@ -646,8 +648,9 @@ void RtpFrameReferenceFinder::FrameReceivedVp9(uint16_t picture_id,
size_t temporal_idx = info->gof->temporal_idx[gof_idx];
if (temporal_idx >= kMaxTemporalLayers) {
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers << " temporal "
<< "layers are supported.";
RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
<< " temporal "
"layers are supported.";
return;
}
@ -783,7 +786,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264(
<< " and packet range [" << frame->first_seq_num()
<< ", " << frame->last_seq_num()
<< "] already received, "
<< " dropping frame.";
" dropping frame.";
return kDrop;
}