Stop using LOG macros in favor of RTC_ prefixed macros.
This CL has been generated with the following script: for m in PLOG \ LOG_TAG \ LOG_GLEM \ LOG_GLE_EX \ LOG_GLE \ LAST_SYSTEM_ERROR \ LOG_ERRNO_EX \ LOG_ERRNO \ LOG_ERR_EX \ LOG_ERR \ LOG_V \ LOG_F \ LOG_T_F \ LOG_E \ LOG_T \ LOG_CHECK_LEVEL_V \ LOG_CHECK_LEVEL \ LOG do git grep -l $m | xargs sed -i "s,\b$m\b,RTC_$m,g" done git checkout rtc_base/logging.h git cl format Bug: webrtc:8452 Change-Id: I1a53ef3e0a5ef6e244e62b2e012b864914784600 Reviewed-on: https://webrtc-review.googlesource.com/21325 Reviewed-by: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org> Cr-Commit-Position: refs/heads/master@{#20617}
This commit is contained in:
committed by
Commit Bot
parent
34fa309129
commit
675513b96a
@ -45,7 +45,7 @@ static std::unique_ptr<VCMGenericDecoder> CreateDecoder(VideoCodecType type) {
|
||||
default:
|
||||
break;
|
||||
}
|
||||
LOG(LS_WARNING) << "No internal decoder of this type exists.";
|
||||
RTC_LOG(LS_WARNING) << "No internal decoder of this type exists.";
|
||||
return std::unique_ptr<VCMGenericDecoder>();
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
||||
encoded_frame_callback_->SetInternalSource(internal_source_);
|
||||
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
|
||||
max_payload_size_) < 0) {
|
||||
LOG(LS_ERROR) << "Failed to initialize video encoder.";
|
||||
RTC_LOG(LS_ERROR) << "Failed to initialize video encoder.";
|
||||
DeleteEncoder();
|
||||
return false;
|
||||
}
|
||||
@ -395,13 +395,13 @@ std::unique_ptr<VCMGenericDecoder> VCMCodecDataBase::CreateAndInitDecoder(
|
||||
const VCMEncodedFrame& frame,
|
||||
VideoCodec* new_codec) const {
|
||||
uint8_t payload_type = frame.PayloadType();
|
||||
LOG(LS_INFO) << "Initializing decoder with payload type '"
|
||||
<< static_cast<int>(payload_type) << "'.";
|
||||
RTC_LOG(LS_INFO) << "Initializing decoder with payload type '"
|
||||
<< static_cast<int>(payload_type) << "'.";
|
||||
RTC_DCHECK(new_codec);
|
||||
const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
|
||||
if (!decoder_item) {
|
||||
LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
|
||||
<< static_cast<int>(payload_type);
|
||||
RTC_LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
|
||||
<< static_cast<int>(payload_type);
|
||||
return nullptr;
|
||||
}
|
||||
std::unique_ptr<VCMGenericDecoder> ptr_decoder;
|
||||
|
||||
@ -74,7 +74,7 @@ H264Encoder* H264Encoder::Create(const cricket::VideoCodec& codec) {
|
||||
RTC_DCHECK(H264Encoder::IsSupported());
|
||||
#if defined(WEBRTC_USE_H264)
|
||||
RTC_CHECK(g_rtc_use_h264);
|
||||
LOG(LS_INFO) << "Creating H264EncoderImpl.";
|
||||
RTC_LOG(LS_INFO) << "Creating H264EncoderImpl.";
|
||||
return new H264EncoderImpl(codec);
|
||||
#else
|
||||
RTC_NOTREACHED();
|
||||
@ -90,7 +90,7 @@ H264Decoder* H264Decoder::Create() {
|
||||
RTC_DCHECK(H264Decoder::IsSupported());
|
||||
#if defined(WEBRTC_USE_H264)
|
||||
RTC_CHECK(g_rtc_use_h264);
|
||||
LOG(LS_INFO) << "Creating H264DecoderImpl.";
|
||||
RTC_LOG(LS_INFO) << "Creating H264DecoderImpl.";
|
||||
return new H264DecoderImpl();
|
||||
#else
|
||||
RTC_NOTREACHED();
|
||||
|
||||
@ -119,7 +119,7 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
int ret = av_image_check_size(static_cast<unsigned int>(width),
|
||||
static_cast<unsigned int>(height), 0, nullptr);
|
||||
if (ret < 0) {
|
||||
LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
|
||||
RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
|
||||
decoder->ReportError();
|
||||
return ret;
|
||||
}
|
||||
@ -244,14 +244,14 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
|
||||
if (!codec) {
|
||||
// This is an indication that FFmpeg has not been initialized or it has not
|
||||
// been compiled/initialized with the correct set of codecs.
|
||||
LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
|
||||
RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
|
||||
Release();
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
int res = avcodec_open2(av_context_.get(), codec, nullptr);
|
||||
if (res < 0) {
|
||||
LOG(LS_ERROR) << "avcodec_open2 error: " << res;
|
||||
RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
|
||||
Release();
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
@ -283,8 +283,9 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (!decoded_image_callback_) {
|
||||
LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
|
||||
"has not been set with RegisterDecodeCompleteCallback()";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "InitDecode() has been called, but a callback function "
|
||||
"has not been set with RegisterDecodeCompleteCallback()";
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
@ -323,14 +324,14 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
|
||||
int result = avcodec_send_packet(av_context_.get(), &packet);
|
||||
if (result < 0) {
|
||||
LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
|
||||
RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
|
||||
if (result < 0) {
|
||||
LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
|
||||
RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
@ -111,9 +111,10 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
VideoType::kI420, frame_buffer.width(), frame_buffer.height());
|
||||
if (encoded_image->_size < required_size) {
|
||||
// Encoded data > unencoded data. Allocate required bytes.
|
||||
LOG(LS_WARNING) << "Encoding produced more bytes than the original image "
|
||||
<< "data! Original bytes: " << encoded_image->_size
|
||||
<< ", encoded bytes: " << required_size << ".";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Encoding produced more bytes than the original image "
|
||||
<< "data! Original bytes: " << encoded_image->_size
|
||||
<< ", encoded bytes: " << required_size << ".";
|
||||
encoded_image->_size = required_size;
|
||||
}
|
||||
encoded_image->_buffer = new uint8_t[encoded_image->_size];
|
||||
@ -209,7 +210,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
// Create encoder.
|
||||
if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
|
||||
// Failed to create encoder.
|
||||
LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
|
||||
RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
|
||||
RTC_DCHECK(!openh264_encoder_);
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
@ -243,7 +244,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
|
||||
// Initialize.
|
||||
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
|
||||
LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
|
||||
RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
|
||||
Release();
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
@ -309,8 +310,9 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (!encoded_image_callback_) {
|
||||
LOG(LS_WARNING) << "InitEncode() has been called, but a callback function "
|
||||
<< "has not been set with RegisterEncodeCompleteCallback()";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "InitEncode() has been called, but a callback function "
|
||||
<< "has not been set with RegisterEncodeCompleteCallback()";
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
@ -355,8 +357,8 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
// Encode!
|
||||
int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info);
|
||||
if (enc_ret != 0) {
|
||||
LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
|
||||
<< enc_ret << ".";
|
||||
RTC_LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
|
||||
<< enc_ret << ".";
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
@ -449,8 +451,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams() const {
|
||||
encoder_params.iTargetBitrate;
|
||||
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
|
||||
encoder_params.iMaxBitrate;
|
||||
LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
|
||||
<< OPENH264_MINOR;
|
||||
RTC_LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
|
||||
<< OPENH264_MINOR;
|
||||
switch (packetization_mode_) {
|
||||
case H264PacketizationMode::SingleNalUnit:
|
||||
// Limit the size of the packets produced.
|
||||
|
||||
@ -66,7 +66,7 @@ int StereoEncoderAdapter::InitEncode(const VideoCodec* inst,
|
||||
factory_->CreateVideoEncoder(format);
|
||||
const int rv = encoder->InitEncode(inst, number_of_cores, max_payload_size);
|
||||
if (rv) {
|
||||
LOG(LS_ERROR) << "Failed to create stere codec index " << i;
|
||||
RTC_LOG(LS_ERROR) << "Failed to create stere codec index " << i;
|
||||
return rv;
|
||||
}
|
||||
adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
|
||||
|
||||
@ -494,15 +494,16 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
|
||||
if (pattern_idx_ == temporal_ids_.size()) {
|
||||
// All non key-frame buffers should be updated each pattern cycle.
|
||||
if (!last_.is_keyframe && !last_.is_updated_this_cycle) {
|
||||
LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
|
||||
RTC_LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
|
||||
return false;
|
||||
}
|
||||
if (!arf_.is_keyframe && !arf_.is_updated_this_cycle) {
|
||||
LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
|
||||
RTC_LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
|
||||
return false;
|
||||
}
|
||||
if (!golden_.is_keyframe && !golden_.is_updated_this_cycle) {
|
||||
LOG(LS_ERROR) << "Golden buffer was not updated during pattern cycle.";
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Golden buffer was not updated during pattern cycle.";
|
||||
return false;
|
||||
}
|
||||
last_.is_updated_this_cycle = false;
|
||||
@ -512,9 +513,9 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
|
||||
}
|
||||
uint8_t expected_tl_idx = temporal_ids_[pattern_idx_];
|
||||
if (frame_config.packetizer_temporal_idx != expected_tl_idx) {
|
||||
LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
|
||||
<< static_cast<int>(expected_tl_idx) << " Actual: "
|
||||
<< static_cast<int>(frame_config.packetizer_temporal_idx);
|
||||
RTC_LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
|
||||
<< static_cast<int>(expected_tl_idx) << " Actual: "
|
||||
<< static_cast<int>(frame_config.packetizer_temporal_idx);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -555,8 +556,8 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
|
||||
}
|
||||
|
||||
if (need_sync != frame_config.layer_sync) {
|
||||
LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
|
||||
<< need_sync << " Actual: " << frame_config.layer_sync;
|
||||
RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
|
||||
<< need_sync << " Actual: " << frame_config.layer_sync;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -565,10 +566,11 @@ bool DefaultTemporalLayersChecker::CheckTemporalConfig(
|
||||
for (i = 0; i < dependencies.size(); ++i) {
|
||||
if (temporal_dependencies_[pattern_idx_].find(dependencies[i]) ==
|
||||
temporal_dependencies_[pattern_idx_].end()) {
|
||||
LOG(LS_ERROR) << "Illegal temporal dependency out of defined pattern "
|
||||
"from position "
|
||||
<< static_cast<int>(pattern_idx_) << " to position "
|
||||
<< static_cast<int>(dependencies[i]);
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Illegal temporal dependency out of defined pattern "
|
||||
"from position "
|
||||
<< static_cast<int>(pattern_idx_) << " to position "
|
||||
<< static_cast<int>(dependencies[i]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ bool TemporalLayersChecker::CheckAndUpdateBufferState(
|
||||
}
|
||||
if (!frame_is_keyframe && !state->is_keyframe &&
|
||||
state->temporal_layer > temporal_layer) {
|
||||
LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
|
||||
RTC_LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -72,9 +72,9 @@ bool TemporalLayersChecker::CheckTemporalConfig(
|
||||
if (frame_config.packetizer_temporal_idx >= num_temporal_layers_ ||
|
||||
(frame_config.packetizer_temporal_idx == kNoTemporalIdx &&
|
||||
num_temporal_layers_ > 1)) {
|
||||
LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
|
||||
<< frame_config.packetizer_temporal_idx
|
||||
<< " num_temporal_layers: " << num_temporal_layers_;
|
||||
RTC_LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
|
||||
<< frame_config.packetizer_temporal_idx
|
||||
<< " num_temporal_layers: " << num_temporal_layers_;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ bool TemporalLayersChecker::CheckTemporalConfig(
|
||||
&last_, &need_sync, frame_is_keyframe,
|
||||
frame_config.packetizer_temporal_idx, frame_config.last_buffer_flags,
|
||||
sequence_number_, &lowest_sequence_referenced)) {
|
||||
LOG(LS_ERROR) << "Error in the Last buffer";
|
||||
RTC_LOG(LS_ERROR) << "Error in the Last buffer";
|
||||
return false;
|
||||
}
|
||||
if (!CheckAndUpdateBufferState(&golden_, &need_sync, frame_is_keyframe,
|
||||
@ -94,22 +94,22 @@ bool TemporalLayersChecker::CheckTemporalConfig(
|
||||
frame_config.golden_buffer_flags,
|
||||
sequence_number_,
|
||||
&lowest_sequence_referenced)) {
|
||||
LOG(LS_ERROR) << "Error in the Golden buffer";
|
||||
RTC_LOG(LS_ERROR) << "Error in the Golden buffer";
|
||||
return false;
|
||||
}
|
||||
if (!CheckAndUpdateBufferState(
|
||||
&arf_, &need_sync, frame_is_keyframe,
|
||||
frame_config.packetizer_temporal_idx, frame_config.arf_buffer_flags,
|
||||
sequence_number_, &lowest_sequence_referenced)) {
|
||||
LOG(LS_ERROR) << "Error in the Arf buffer";
|
||||
RTC_LOG(LS_ERROR) << "Error in the Arf buffer";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lowest_sequence_referenced < last_sync_sequence_number_ &&
|
||||
!frame_is_keyframe) {
|
||||
LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
|
||||
<< lowest_sequence_referenced << ", but sync was at "
|
||||
<< last_sync_sequence_number_;
|
||||
RTC_LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
|
||||
<< lowest_sequence_referenced << ", but sync was at "
|
||||
<< last_sync_sequence_number_;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -126,8 +126,8 @@ bool TemporalLayersChecker::CheckTemporalConfig(
|
||||
}
|
||||
|
||||
if (need_sync != frame_config.layer_sync) {
|
||||
LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
|
||||
<< need_sync << " Actual: " << frame_config.layer_sync;
|
||||
RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
|
||||
<< need_sync << " Actual: " << frame_config.layer_sync;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@ -70,7 +70,7 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
|
||||
available_buffer = new rtc::RefCountedObject<Vp9FrameBuffer>();
|
||||
allocated_buffers_.push_back(available_buffer);
|
||||
if (allocated_buffers_.size() > max_num_buffers_) {
|
||||
LOG(LS_WARNING)
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< allocated_buffers_.size() << " Vp9FrameBuffers have been "
|
||||
<< "allocated by a Vp9FrameBufferPool (exceeding what is "
|
||||
<< "considered reasonable, " << max_num_buffers_ << ").";
|
||||
|
||||
@ -128,8 +128,8 @@ bool VP9EncoderImpl::SetSvcRates() {
|
||||
|
||||
if (ExplicitlyConfiguredSpatialLayers()) {
|
||||
if (num_temporal_layers_ > 1) {
|
||||
LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
|
||||
"spatial layers not implemented yet!";
|
||||
RTC_LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
|
||||
"spatial layers not implemented yet!";
|
||||
return false;
|
||||
}
|
||||
int total_bitrate_bps = 0;
|
||||
@ -150,7 +150,7 @@ bool VP9EncoderImpl::SetSvcRates() {
|
||||
for (i = 0; i < num_spatial_layers_; ++i) {
|
||||
if (svc_params_.scaling_factor_num[i] <= 0 ||
|
||||
svc_params_.scaling_factor_den[i] <= 0) {
|
||||
LOG(LS_ERROR) << "Scaling factors not specified!";
|
||||
RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
|
||||
return false;
|
||||
}
|
||||
rate_ratio[i] =
|
||||
@ -178,8 +178,8 @@ bool VP9EncoderImpl::SetSvcRates() {
|
||||
config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
|
||||
config_->ss_target_bitrate[i];
|
||||
} else {
|
||||
LOG(LS_ERROR) << "Unsupported number of temporal layers: "
|
||||
<< num_temporal_layers_;
|
||||
RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
|
||||
<< num_temporal_layers_;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -861,8 +861,8 @@ VP9DecoderImpl::~VP9DecoderImpl() {
|
||||
// The frame buffers are reference counted and frames are exposed after
|
||||
// decoding. There may be valid usage cases where previous frames are still
|
||||
// referenced after ~VP9DecoderImpl that is not a leak.
|
||||
LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still "
|
||||
<< "referenced during ~VP9DecoderImpl.";
|
||||
RTC_LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still "
|
||||
<< "referenced during ~VP9DecoderImpl.";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -81,15 +81,15 @@ void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
|
||||
for (const NaluInfo& nalu : frame->GetNaluInfos()) {
|
||||
if (nalu.type == H264::NaluType::kPps) {
|
||||
if (nalu.pps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received pps without pps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received pps without pps id.";
|
||||
} else if (nalu.sps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received pps without sps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received pps without sps id.";
|
||||
} else {
|
||||
received_pps_[nalu.pps_id] = nalu.sps_id;
|
||||
}
|
||||
} else if (nalu.type == H264::NaluType::kSps) {
|
||||
if (nalu.sps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received sps without sps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received sps without sps id.";
|
||||
} else {
|
||||
received_sps_.insert(nalu.sps_id);
|
||||
}
|
||||
@ -295,8 +295,8 @@ bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
|
||||
frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
|
||||
frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
|
||||
if (is_flexible_mode && frame->PictureId() == kNoPictureId) {
|
||||
LOG(LS_WARNING) << "Frame is marked as using flexible mode but no"
|
||||
<< "picture id is set.";
|
||||
RTC_LOG(LS_WARNING) << "Frame is marked as using flexible mode but no"
|
||||
<< "picture id is set.";
|
||||
return false;
|
||||
}
|
||||
return is_flexible_mode;
|
||||
@ -326,16 +326,16 @@ bool VCMDecodingState::HaveSpsAndPps(const std::vector<NaluInfo>& nalus) const {
|
||||
switch (nalu.type) {
|
||||
case H264::NaluType::kPps:
|
||||
if (nalu.pps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received pps without pps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received pps without pps id.";
|
||||
} else if (nalu.sps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received pps without sps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received pps without sps id.";
|
||||
} else {
|
||||
new_pps[nalu.pps_id] = nalu.sps_id;
|
||||
}
|
||||
break;
|
||||
case H264::NaluType::kSps:
|
||||
if (nalu.sps_id < 0) {
|
||||
LOG(LS_WARNING) << "Received sps without sps id.";
|
||||
RTC_LOG(LS_WARNING) << "Received sps without sps id.";
|
||||
} else {
|
||||
new_sps.insert(nalu.sps_id);
|
||||
}
|
||||
|
||||
@ -122,8 +122,8 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
||||
(requiredSizeBytes % kBufferIncStepSizeBytes > 0);
|
||||
const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes;
|
||||
if (newSize > kMaxJBFrameSizeBytes) {
|
||||
LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
|
||||
"big.";
|
||||
RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
|
||||
"big.";
|
||||
return kSizeError;
|
||||
}
|
||||
VerifyAndAllocate(newSize);
|
||||
|
||||
@ -178,16 +178,15 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
|
||||
// TODO(brandtr): Consider clearing the entire buffer when we hit
|
||||
// these conditions.
|
||||
LOG(LS_WARNING) << "Frame with (timestamp:picture_id:spatial_id) ("
|
||||
<< frame->timestamp << ":" << frame->picture_id << ":"
|
||||
<< static_cast<int>(frame->spatial_layer) << ")"
|
||||
<< " sent to decoder after frame with"
|
||||
<< " (timestamp:picture_id:spatial_id) ("
|
||||
<< last_decoded_frame_timestamp_ << ":"
|
||||
<< last_decoded_frame_key.picture_id << ":"
|
||||
<< static_cast<int>(
|
||||
last_decoded_frame_key.spatial_layer)
|
||||
<< ").";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Frame with (timestamp:picture_id:spatial_id) ("
|
||||
<< frame->timestamp << ":" << frame->picture_id << ":"
|
||||
<< static_cast<int>(frame->spatial_layer) << ")"
|
||||
<< " sent to decoder after frame with"
|
||||
<< " (timestamp:picture_id:spatial_id) ("
|
||||
<< last_decoded_frame_timestamp_ << ":"
|
||||
<< last_decoded_frame_key.picture_id << ":"
|
||||
<< static_cast<int>(last_decoded_frame_key.spatial_layer) << ").";
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,15 +217,15 @@ bool FrameBuffer::HasBadRenderTiming(const FrameObject& frame, int64_t now_ms) {
|
||||
}
|
||||
if (std::abs(render_time_ms - now_ms) > kMaxVideoDelayMs) {
|
||||
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
|
||||
LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
|
||||
<< "delay bounds (" << frame_delay << " > "
|
||||
<< kMaxVideoDelayMs
|
||||
<< "). Resetting the video jitter buffer.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "A frame about to be decoded is out of the configured "
|
||||
<< "delay bounds (" << frame_delay << " > " << kMaxVideoDelayMs
|
||||
<< "). Resetting the video jitter buffer.";
|
||||
return true;
|
||||
}
|
||||
if (static_cast<int>(timing_->TargetVideoDelay()) > kMaxVideoDelayMs) {
|
||||
LOG(LS_WARNING) << "The video target delay has grown larger than "
|
||||
<< kMaxVideoDelayMs << " ms.";
|
||||
RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
|
||||
<< kMaxVideoDelayMs << " ms.";
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -302,17 +301,19 @@ int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
|
||||
: last_continuous_frame_it_->first.picture_id;
|
||||
|
||||
if (!ValidReferences(*frame)) {
|
||||
LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
|
||||
<< ":" << static_cast<int>(key.spatial_layer)
|
||||
<< ") has invalid frame references, dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
|
||||
<< key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") has invalid frame references, dropping frame.";
|
||||
return last_continuous_picture_id;
|
||||
}
|
||||
|
||||
if (num_frames_buffered_ >= kMaxFramesBuffered) {
|
||||
LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
|
||||
<< ":" << static_cast<int>(key.spatial_layer)
|
||||
<< ") could not be inserted due to the frame "
|
||||
<< "buffer being full, dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
|
||||
<< key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") could not be inserted due to the frame "
|
||||
<< "buffer being full, dropping frame.";
|
||||
return last_continuous_picture_id;
|
||||
}
|
||||
|
||||
@ -325,18 +326,19 @@ int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
|
||||
// reconfiguration or some other reason. Even though this is not according
|
||||
// to spec we can still continue to decode from this frame if it is a
|
||||
// keyframe.
|
||||
LOG(LS_WARNING) << "A jump in picture id was detected, clearing buffer.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "A jump in picture id was detected, clearing buffer.";
|
||||
ClearFramesAndHistory();
|
||||
last_continuous_picture_id = -1;
|
||||
} else {
|
||||
LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
|
||||
<< key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") inserted after frame ("
|
||||
<< last_decoded_frame_it_->first.picture_id << ":"
|
||||
<< static_cast<int>(
|
||||
last_decoded_frame_it_->first.spatial_layer)
|
||||
<< ") was handed off for decoding, dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
|
||||
<< key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") inserted after frame ("
|
||||
<< last_decoded_frame_it_->first.picture_id << ":"
|
||||
<< static_cast<int>(
|
||||
last_decoded_frame_it_->first.spatial_layer)
|
||||
<< ") was handed off for decoding, dropping frame.";
|
||||
return last_continuous_picture_id;
|
||||
}
|
||||
}
|
||||
@ -347,7 +349,8 @@ int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
|
||||
if (!frames_.empty() &&
|
||||
key < frames_.begin()->first &&
|
||||
frames_.rbegin()->first < key) {
|
||||
LOG(LS_WARNING) << "A jump in picture id was detected, clearing buffer.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "A jump in picture id was detected, clearing buffer.";
|
||||
ClearFramesAndHistory();
|
||||
last_continuous_picture_id = -1;
|
||||
}
|
||||
@ -355,9 +358,10 @@ int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
|
||||
auto info = frames_.insert(std::make_pair(key, FrameInfo())).first;
|
||||
|
||||
if (info->second.frame) {
|
||||
LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
|
||||
<< ":" << static_cast<int>(key.spatial_layer)
|
||||
<< ") already inserted, dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
|
||||
<< key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") already inserted, dropping frame.";
|
||||
return last_continuous_picture_id;
|
||||
}
|
||||
|
||||
@ -476,7 +480,7 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const FrameObject& frame,
|
||||
if (ref_info == frames_.end()) {
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) {
|
||||
LOG(LS_WARNING)
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Frame with (picture_id:spatial_id) (" << key.picture_id << ":"
|
||||
<< static_cast<int>(key.spatial_layer)
|
||||
<< ") depends on a non-decoded frame more previous than"
|
||||
|
||||
@ -78,8 +78,8 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
}
|
||||
|
||||
if (frameInfo == NULL) {
|
||||
LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
|
||||
"this one.";
|
||||
RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
|
||||
"this one.";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -234,10 +234,10 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||
|
||||
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
|
||||
if (ret < WEBRTC_VIDEO_CODEC_OK) {
|
||||
LOG(LS_WARNING) << "Failed to decode frame with timestamp "
|
||||
<< frame.TimeStamp() << ", error code: " << ret;
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
return ret;
|
||||
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
|
||||
<< frame.TimeStamp() << ", error code: " << ret;
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
return ret;
|
||||
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
|
||||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
|
||||
// No output
|
||||
|
||||
@ -60,9 +60,9 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
|
||||
vcm_encoded_frame_callback_->OnFrameRateChanged(settings->maxFramerate);
|
||||
|
||||
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
|
||||
LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
|
||||
"payload name: "
|
||||
<< settings->plName;
|
||||
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
|
||||
"payload name: "
|
||||
<< settings->plName;
|
||||
return -1;
|
||||
}
|
||||
vcm_encoded_frame_callback_->Reset();
|
||||
@ -103,19 +103,19 @@ void VCMGenericEncoder::SetEncoderParameters(const EncoderParameters& params) {
|
||||
if (channel_parameters_have_changed) {
|
||||
int res = encoder_->SetChannelParameters(params.loss_rate, params.rtt);
|
||||
if (res != 0) {
|
||||
LOG(LS_WARNING) << "Error set encoder parameters (loss = "
|
||||
<< params.loss_rate << ", rtt = " << params.rtt
|
||||
<< "): " << res;
|
||||
RTC_LOG(LS_WARNING) << "Error set encoder parameters (loss = "
|
||||
<< params.loss_rate << ", rtt = " << params.rtt
|
||||
<< "): " << res;
|
||||
}
|
||||
}
|
||||
if (rates_have_changed) {
|
||||
int res = encoder_->SetRateAllocation(params.target_bitrate,
|
||||
params.input_frame_rate);
|
||||
if (res != 0) {
|
||||
LOG(LS_WARNING) << "Error set encoder rate (total bitrate bps = "
|
||||
<< params.target_bitrate.get_sum_bps()
|
||||
<< ", framerate = " << params.input_frame_rate
|
||||
<< "): " << res;
|
||||
RTC_LOG(LS_WARNING) << "Error set encoder rate (total bitrate bps = "
|
||||
<< params.target_bitrate.get_sum_bps()
|
||||
<< ", framerate = " << params.input_frame_rate
|
||||
<< "): " << res;
|
||||
}
|
||||
vcm_encoded_frame_callback_->OnFrameRateChanged(params.input_frame_rate);
|
||||
for (size_t i = 0; i < streams_or_svc_num_; ++i) {
|
||||
@ -225,8 +225,8 @@ void VCMEncodedFrameCallback::OnEncodeStarted(int64_t capture_time_ms,
|
||||
.capture_time_ms) >= 0);
|
||||
if (timing_frames_info_[simulcast_svc_idx].encode_start_list.size() ==
|
||||
kMaxEncodeStartTimeListSize) {
|
||||
LOG(LS_WARNING) << "Too many frames in the encode_start_list."
|
||||
" Did encoder stall?";
|
||||
RTC_LOG(LS_WARNING) << "Too many frames in the encode_start_list."
|
||||
" Did encoder stall?";
|
||||
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
|
||||
timing_frames_info_[simulcast_svc_idx].encode_start_list.pop_front();
|
||||
}
|
||||
|
||||
@ -29,20 +29,20 @@ namespace webrtc {
|
||||
|
||||
bool H264SpropParameterSets::DecodeSprop(const std::string& sprop) {
|
||||
size_t separator_pos = sprop.find(',');
|
||||
LOG(LS_INFO) << "Parsing sprop \"" << sprop << "\"";
|
||||
RTC_LOG(LS_INFO) << "Parsing sprop \"" << sprop << "\"";
|
||||
if ((separator_pos <= 0) || (separator_pos >= sprop.length() - 1)) {
|
||||
LOG(LS_WARNING) << "Invalid seperator position " << separator_pos << " *"
|
||||
<< sprop << "*";
|
||||
RTC_LOG(LS_WARNING) << "Invalid seperator position " << separator_pos
|
||||
<< " *" << sprop << "*";
|
||||
return false;
|
||||
}
|
||||
std::string sps_str = sprop.substr(0, separator_pos);
|
||||
std::string pps_str = sprop.substr(separator_pos + 1, std::string::npos);
|
||||
if (!DecodeAndConvert(sps_str, &sps_)) {
|
||||
LOG(LS_WARNING) << "Failed to decode sprop/sps *" << sprop << "*";
|
||||
RTC_LOG(LS_WARNING) << "Failed to decode sprop/sps *" << sprop << "*";
|
||||
return false;
|
||||
}
|
||||
if (!DecodeAndConvert(pps_str, &pps_)) {
|
||||
LOG(LS_WARNING) << "Failed to decode sprop/pps *" << sprop << "*";
|
||||
RTC_LOG(LS_WARNING) << "Failed to decode sprop/pps *" << sprop << "*";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@ -60,20 +60,20 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
||||
// to prepend the SPS/PPS to the bitstream with start codes.
|
||||
if (video_header.is_first_packet_in_frame) {
|
||||
if (nalu.pps_id == -1) {
|
||||
LOG(LS_WARNING) << "No PPS id in IDR nalu.";
|
||||
RTC_LOG(LS_WARNING) << "No PPS id in IDR nalu.";
|
||||
return kRequestKeyframe;
|
||||
}
|
||||
|
||||
pps = pps_data_.find(nalu.pps_id);
|
||||
if (pps == pps_data_.end()) {
|
||||
LOG(LS_WARNING) << "No PPS with id << " << nalu.pps_id
|
||||
<< " received";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "No PPS with id << " << nalu.pps_id << " received";
|
||||
return kRequestKeyframe;
|
||||
}
|
||||
|
||||
sps = sps_data_.find(pps->second.sps_id);
|
||||
if (sps == sps_data_.end()) {
|
||||
LOG(LS_WARNING)
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "No SPS with id << " << pps->second.sps_id << " received";
|
||||
return kRequestKeyframe;
|
||||
}
|
||||
@ -159,8 +159,8 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
||||
codec_header->nalus[codec_header->nalus_length++] = sps_info;
|
||||
codec_header->nalus[codec_header->nalus_length++] = pps_info;
|
||||
} else {
|
||||
LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
|
||||
"SPS/PPS provided out-of-band.";
|
||||
RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
|
||||
"SPS/PPS provided out-of-band.";
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,21 +202,21 @@ void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
|
||||
const std::vector<uint8_t>& pps) {
|
||||
constexpr size_t kNaluHeaderOffset = 1;
|
||||
if (sps.size() < kNaluHeaderOffset) {
|
||||
LOG(LS_WARNING) << "SPS size " << sps.size() << " is smaller than "
|
||||
<< kNaluHeaderOffset;
|
||||
RTC_LOG(LS_WARNING) << "SPS size " << sps.size() << " is smaller than "
|
||||
<< kNaluHeaderOffset;
|
||||
return;
|
||||
}
|
||||
if ((sps[0] & 0x1f) != H264::NaluType::kSps) {
|
||||
LOG(LS_WARNING) << "SPS Nalu header missing";
|
||||
RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
|
||||
return;
|
||||
}
|
||||
if (pps.size() < kNaluHeaderOffset) {
|
||||
LOG(LS_WARNING) << "PPS size " << pps.size() << " is smaller than "
|
||||
<< kNaluHeaderOffset;
|
||||
RTC_LOG(LS_WARNING) << "PPS size " << pps.size() << " is smaller than "
|
||||
<< kNaluHeaderOffset;
|
||||
return;
|
||||
}
|
||||
if ((pps[0] & 0x1f) != H264::NaluType::kPps) {
|
||||
LOG(LS_WARNING) << "SPS Nalu header missing";
|
||||
RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
|
||||
return;
|
||||
}
|
||||
rtc::Optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
|
||||
@ -225,11 +225,11 @@ void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
|
||||
pps.data() + kNaluHeaderOffset, pps.size() - kNaluHeaderOffset);
|
||||
|
||||
if (!parsed_sps) {
|
||||
LOG(LS_WARNING) << "Failed to parse SPS.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to parse SPS.";
|
||||
}
|
||||
|
||||
if (!parsed_pps) {
|
||||
LOG(LS_WARNING) << "Failed to parse PPS.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to parse PPS.";
|
||||
}
|
||||
|
||||
if (!parsed_pps || !parsed_sps) {
|
||||
@ -253,9 +253,9 @@ void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
|
||||
pps_info.data.reset(pps_data);
|
||||
pps_data_[parsed_pps->id] = std::move(pps_info);
|
||||
|
||||
LOG(LS_INFO) << "Inserted SPS id " << parsed_sps->id << " and PPS id "
|
||||
<< parsed_pps->id << " (referencing SPS " << parsed_pps->sps_id
|
||||
<< ")";
|
||||
RTC_LOG(LS_INFO) << "Inserted SPS id " << parsed_sps->id << " and PPS id "
|
||||
<< parsed_pps->id << " (referencing SPS "
|
||||
<< parsed_pps->sps_id << ")";
|
||||
}
|
||||
|
||||
} // namespace video_coding
|
||||
|
||||
@ -605,7 +605,7 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
|
||||
*frame = GetEmptyFrame();
|
||||
if (*frame == NULL) {
|
||||
// No free frame! Try to reclaim some...
|
||||
LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
|
||||
RTC_LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
|
||||
bool found_key_frame = RecycleFramesUntilKeyFrame();
|
||||
*frame = GetEmptyFrame();
|
||||
RTC_CHECK(*frame);
|
||||
@ -655,7 +655,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
FindAndInsertContinuousFramesWithState(last_decoded_state_);
|
||||
|
||||
if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
|
||||
LOG(LS_WARNING)
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< num_consecutive_old_packets_
|
||||
<< " consecutive old packets received. Flushing the jitter buffer.";
|
||||
Flush();
|
||||
@ -996,9 +996,9 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
|
||||
int non_continuous_incomplete_duration =
|
||||
NonContinuousOrIncompleteDuration();
|
||||
if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) {
|
||||
LOG_F(LS_WARNING) << "Too long non-decodable duration: "
|
||||
<< non_continuous_incomplete_duration << " > "
|
||||
<< 90 * max_incomplete_time_ms_;
|
||||
RTC_LOG_F(LS_WARNING) << "Too long non-decodable duration: "
|
||||
<< non_continuous_incomplete_duration << " > "
|
||||
<< 90 * max_incomplete_time_ms_;
|
||||
FrameList::reverse_iterator rit = find_if(
|
||||
incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
|
||||
if (rit == incomplete_frames_.rend()) {
|
||||
@ -1052,12 +1052,13 @@ bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
|
||||
"seqnum", i);
|
||||
}
|
||||
if (TooLargeNackList() && !HandleTooLargeNackList()) {
|
||||
LOG(LS_WARNING) << "Requesting key frame due to too large NACK list.";
|
||||
RTC_LOG(LS_WARNING) << "Requesting key frame due to too large NACK list.";
|
||||
return false;
|
||||
}
|
||||
if (MissingTooOldPacket(sequence_number) &&
|
||||
!HandleTooOldPackets(sequence_number)) {
|
||||
LOG(LS_WARNING) << "Requesting key frame due to missing too old packets";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Requesting key frame due to missing too old packets";
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
@ -1075,9 +1076,9 @@ bool VCMJitterBuffer::TooLargeNackList() const {
|
||||
bool VCMJitterBuffer::HandleTooLargeNackList() {
|
||||
// Recycle frames until the NACK list is small enough. It is likely cheaper to
|
||||
// request a key frame than to retransmit this many missing packets.
|
||||
LOG_F(LS_WARNING) << "NACK list has grown too large: "
|
||||
<< missing_sequence_numbers_.size() << " > "
|
||||
<< max_nack_list_size_;
|
||||
RTC_LOG_F(LS_WARNING) << "NACK list has grown too large: "
|
||||
<< missing_sequence_numbers_.size() << " > "
|
||||
<< max_nack_list_size_;
|
||||
bool key_frame_found = false;
|
||||
while (TooLargeNackList()) {
|
||||
key_frame_found = RecycleFramesUntilKeyFrame();
|
||||
@ -1101,9 +1102,9 @@ bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
|
||||
bool key_frame_found = false;
|
||||
const uint16_t age_of_oldest_missing_packet =
|
||||
latest_sequence_number - *missing_sequence_numbers_.begin();
|
||||
LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
|
||||
<< age_of_oldest_missing_packet << " > "
|
||||
<< max_packet_age_to_nack_;
|
||||
RTC_LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
|
||||
<< age_of_oldest_missing_packet << " > "
|
||||
<< max_packet_age_to_nack_;
|
||||
while (MissingTooOldPacket(latest_sequence_number)) {
|
||||
key_frame_found = RecycleFramesUntilKeyFrame();
|
||||
}
|
||||
@ -1163,7 +1164,7 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
|
||||
}
|
||||
TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame");
|
||||
if (key_frame_found) {
|
||||
LOG(LS_INFO) << "Found key frame while dropping frames.";
|
||||
RTC_LOG(LS_INFO) << "Found key frame while dropping frames.";
|
||||
// Reset last decoded state to make sure the next frame decoded is a key
|
||||
// frame, and start NACKing from here.
|
||||
last_decoded_state_.Reset();
|
||||
@ -1195,7 +1196,7 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
++receive_statistics_.key_frames;
|
||||
if (receive_statistics_.key_frames == 1) {
|
||||
LOG(LS_INFO) << "Received first complete key frame";
|
||||
RTC_LOG(LS_INFO) << "Received first complete key frame";
|
||||
}
|
||||
} else {
|
||||
++receive_statistics_.delta_frames;
|
||||
|
||||
@ -196,8 +196,8 @@ void NackModule::AddPacketsToNack(uint16_t seq_num_start,
|
||||
|
||||
if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
|
||||
nack_list_.clear();
|
||||
LOG(LS_WARNING) << "NACK list full, clearing NACK"
|
||||
" list and requesting keyframe.";
|
||||
RTC_LOG(LS_WARNING) << "NACK list full, clearing NACK"
|
||||
" list and requesting keyframe.";
|
||||
keyframe_request_sender_->RequestKeyFrame();
|
||||
return;
|
||||
}
|
||||
@ -223,8 +223,8 @@ std::vector<uint16_t> NackModule::GetNackBatch(NackFilterOptions options) {
|
||||
++it->second.retries;
|
||||
it->second.sent_at_time = now_ms;
|
||||
if (it->second.retries >= kMaxNackRetries) {
|
||||
LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
|
||||
<< " removed from NACK list due to max retries.";
|
||||
RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
|
||||
<< " removed from NACK list due to max retries.";
|
||||
it = nack_list_.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
@ -237,8 +237,8 @@ std::vector<uint16_t> NackModule::GetNackBatch(NackFilterOptions options) {
|
||||
++it->second.retries;
|
||||
it->second.sent_at_time = now_ms;
|
||||
if (it->second.retries >= kMaxNackRetries) {
|
||||
LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
|
||||
<< " removed from NACK list due to max retries.";
|
||||
RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
|
||||
<< " removed from NACK list due to max retries.";
|
||||
it = nack_list_.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
|
||||
@ -206,8 +206,8 @@ rtc::Optional<int64_t> PacketBuffer::LastReceivedKeyframePacketMs() const {
|
||||
|
||||
bool PacketBuffer::ExpandBufferSize() {
|
||||
if (size_ == max_size_) {
|
||||
LOG(LS_WARNING) << "PacketBuffer is already at max size (" << max_size_
|
||||
<< "), failed to increase size. Clearing PacketBuffer.";
|
||||
RTC_LOG(LS_WARNING) << "PacketBuffer is already at max size (" << max_size_
|
||||
<< "), failed to increase size. Clearing PacketBuffer.";
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
@ -225,7 +225,7 @@ bool PacketBuffer::ExpandBufferSize() {
|
||||
size_ = new_size;
|
||||
sequence_buffer_ = std::move(new_sequence_buffer);
|
||||
data_buffer_ = std::move(new_data_buffer);
|
||||
LOG(LS_INFO) << "PacketBuffer size expanded to " << new_size;
|
||||
RTC_LOG(LS_INFO) << "PacketBuffer size expanded to " << new_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -344,7 +344,7 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||
ss << "Treating as key frame since "
|
||||
"WebRTC-SpsPpsIdrIsH264Keyframe is disabled.";
|
||||
}
|
||||
LOG(LS_WARNING) << ss.str();
|
||||
RTC_LOG(LS_WARNING) << ss.str();
|
||||
}
|
||||
|
||||
// Now that we have decided whether to treat this frame as a key frame
|
||||
@ -419,9 +419,9 @@ bool PacketBuffer::GetBitstream(const RtpFrameObject& frame,
|
||||
RTC_DCHECK_EQ(data_buffer_[index].seqNum, sequence_buffer_[index].seq_num);
|
||||
size_t length = data_buffer_[index].sizeBytes;
|
||||
if (destination + length > destination_end) {
|
||||
LOG(LS_WARNING) << "Frame (" << frame.picture_id << ":"
|
||||
<< static_cast<int>(frame.spatial_layer) << ")"
|
||||
<< " bitstream buffer is not large enough.";
|
||||
RTC_LOG(LS_WARNING) << "Frame (" << frame.picture_id << ":"
|
||||
<< static_cast<int>(frame.spatial_layer) << ")"
|
||||
<< " bitstream buffer is not large enough.";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -165,15 +165,16 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
|
||||
timing_error = true;
|
||||
} else if (std::abs(render_time_ms - now_ms) > max_video_delay_ms_) {
|
||||
int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
|
||||
LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
|
||||
<< "delay bounds (" << frame_delay << " > "
|
||||
<< max_video_delay_ms_
|
||||
<< "). Resetting the video jitter buffer.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "A frame about to be decoded is out of the configured "
|
||||
<< "delay bounds (" << frame_delay << " > " << max_video_delay_ms_
|
||||
<< "). Resetting the video jitter buffer.";
|
||||
timing_error = true;
|
||||
} else if (static_cast<int>(timing_->TargetVideoDelay()) >
|
||||
max_video_delay_ms_) {
|
||||
LOG(LS_WARNING) << "The video target delay has grown larger than "
|
||||
<< max_video_delay_ms_ << " ms. Resetting jitter buffer.";
|
||||
RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
|
||||
<< max_video_delay_ms_
|
||||
<< " ms. Resetting jitter buffer.";
|
||||
timing_error = true;
|
||||
}
|
||||
|
||||
|
||||
@ -204,9 +204,10 @@ RtpFrameReferenceFinder::ManageFrameGeneric(RtpFrameObject* frame,
|
||||
// that this frame indirectly references.
|
||||
auto seq_num_it = last_seq_num_gop_.upper_bound(frame->last_seq_num());
|
||||
if (seq_num_it == last_seq_num_gop_.begin()) {
|
||||
LOG(LS_WARNING) << "Generic frame with packet range ["
|
||||
<< frame->first_seq_num() << ", " << frame->last_seq_num()
|
||||
<< "] has no GoP, dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Generic frame with packet range ["
|
||||
<< frame->first_seq_num() << ", "
|
||||
<< frame->last_seq_num()
|
||||
<< "] has no GoP, dropping frame.";
|
||||
return kDrop;
|
||||
}
|
||||
seq_num_it--;
|
||||
@ -244,7 +245,8 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
|
||||
RtpFrameObject* frame) {
|
||||
rtc::Optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
if (!rtp_codec_header) {
|
||||
LOG(LS_WARNING) << "Failed to get codec header from frame, dropping frame.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Failed to get codec header from frame, dropping frame.";
|
||||
return kDrop;
|
||||
}
|
||||
|
||||
@ -351,10 +353,11 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
|
||||
|
||||
if (!(AheadOf<uint16_t, kPicIdLength>(frame->picture_id,
|
||||
layer_info_it->second[layer]))) {
|
||||
LOG(LS_WARNING) << "Frame with picture id " << frame->picture_id
|
||||
<< " and packet range [" << frame->first_seq_num() << ", "
|
||||
<< frame->last_seq_num() << "] already received, "
|
||||
<< " dropping frame.";
|
||||
RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->picture_id
|
||||
<< " and packet range [" << frame->first_seq_num()
|
||||
<< ", " << frame->last_seq_num()
|
||||
<< "] already received, "
|
||||
<< " dropping frame.";
|
||||
return kDrop;
|
||||
}
|
||||
|
||||
@ -397,7 +400,8 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
||||
RtpFrameObject* frame) {
|
||||
rtc::Optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
if (!rtp_codec_header) {
|
||||
LOG(LS_WARNING) << "Failed to get codec header from frame, dropping frame.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Failed to get codec header from frame, dropping frame.";
|
||||
return kDrop;
|
||||
}
|
||||
|
||||
@ -432,8 +436,9 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
||||
if (codec_header.ss_data_available) {
|
||||
// Scalability structures can only be sent with tl0 frames.
|
||||
if (codec_header.temporal_idx != 0) {
|
||||
LOG(LS_WARNING) << "Received scalability structure on a non base layer"
|
||||
" frame. Scalability structure ignored.";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Received scalability structure on a non base layer"
|
||||
" frame. Scalability structure ignored.";
|
||||
} else {
|
||||
current_ss_idx_ = Add<kMaxGofSaved>(current_ss_idx_, 1);
|
||||
scalability_structures_[current_ss_idx_] = codec_header.gof;
|
||||
@ -453,7 +458,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
||||
if (frame->frame_type() == kVideoFrameKey) {
|
||||
// When using GOF all keyframes must include the scalability structure.
|
||||
if (!codec_header.ss_data_available)
|
||||
LOG(LS_WARNING) << "Received keyframe without scalability structure";
|
||||
RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
|
||||
|
||||
frame->num_references = 0;
|
||||
GofInfo info = gof_info_.find(codec_header.tl0_pic_idx)->second;
|
||||
|
||||
@ -437,7 +437,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
}
|
||||
|
||||
if (packets_.size() == kMaxPacketsInSession) {
|
||||
LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
|
||||
RTC_LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -478,8 +478,9 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
|
||||
} else if (first_packet_seq_num_ != -1 &&
|
||||
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
|
||||
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
return -3;
|
||||
} else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
|
||||
// Update the frame type with the type of the first media packet.
|
||||
@ -492,8 +493,9 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
last_packet_seq_num_ = static_cast<int>(packet.seqNum);
|
||||
} else if (last_packet_seq_num_ != -1 &&
|
||||
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
|
||||
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,7 +50,7 @@ std::unique_ptr<IvfFileWriter> IvfFileWriter::Wrap(rtc::File file,
|
||||
|
||||
bool IvfFileWriter::WriteHeader() {
|
||||
if (!file_.Seek(0)) {
|
||||
LOG(LS_WARNING) << "Unable to rewind ivf output file.";
|
||||
RTC_LOG(LS_WARNING) << "Unable to rewind ivf output file.";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ bool IvfFileWriter::WriteHeader() {
|
||||
ivf_header[11] = '4';
|
||||
break;
|
||||
default:
|
||||
LOG(LS_ERROR) << "Unknown CODEC type: " << codec_type_;
|
||||
RTC_LOG(LS_ERROR) << "Unknown CODEC type: " << codec_type_;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ bool IvfFileWriter::WriteHeader() {
|
||||
ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[28], 0); // Reserved.
|
||||
|
||||
if (file_.Write(ivf_header, kIvfHeaderSize) < kIvfHeaderSize) {
|
||||
LOG(LS_ERROR) << "Unable to write IVF header for ivf output file.";
|
||||
RTC_LOG(LS_ERROR) << "Unable to write IVF header for ivf output file.";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -124,10 +124,11 @@ bool IvfFileWriter::InitFromFirstFrame(const EncodedImage& encoded_image,
|
||||
|
||||
const char* codec_name =
|
||||
CodecTypeToPayloadString(codec_type_);
|
||||
LOG(LS_WARNING) << "Created IVF file for codec data of type " << codec_name
|
||||
<< " at resolution " << width_ << " x " << height_
|
||||
<< ", using " << (using_capture_timestamps_ ? "1" : "90")
|
||||
<< "kHz clock resolution.";
|
||||
RTC_LOG(LS_WARNING) << "Created IVF file for codec data of type "
|
||||
<< codec_name << " at resolution " << width_ << " x "
|
||||
<< height_ << ", using "
|
||||
<< (using_capture_timestamps_ ? "1" : "90")
|
||||
<< "kHz clock resolution.";
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -143,7 +144,7 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
|
||||
if ((encoded_image._encodedWidth > 0 || encoded_image._encodedHeight > 0) &&
|
||||
(encoded_image._encodedHeight != height_ ||
|
||||
encoded_image._encodedWidth != width_)) {
|
||||
LOG(LS_WARNING)
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Incomig frame has diffferent resolution then previous: (" << width_
|
||||
<< "x" << height_ << ") -> (" << encoded_image._encodedWidth << "x"
|
||||
<< encoded_image._encodedHeight << ")";
|
||||
@ -153,16 +154,16 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
|
||||
? encoded_image.capture_time_ms_
|
||||
: wrap_handler_.Unwrap(encoded_image._timeStamp);
|
||||
if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
|
||||
LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_ << " -> "
|
||||
<< timestamp;
|
||||
RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
|
||||
<< " -> " << timestamp;
|
||||
}
|
||||
last_timestamp_ = timestamp;
|
||||
|
||||
const size_t kFrameHeaderSize = 12;
|
||||
if (byte_limit_ != 0 &&
|
||||
bytes_written_ + kFrameHeaderSize + encoded_image._length > byte_limit_) {
|
||||
LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
|
||||
<< byte_limit_ << " bytes.";
|
||||
RTC_LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
|
||||
<< byte_limit_ << " bytes.";
|
||||
Close();
|
||||
return false;
|
||||
}
|
||||
@ -173,7 +174,7 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
|
||||
if (file_.Write(frame_header, kFrameHeaderSize) < kFrameHeaderSize ||
|
||||
file_.Write(encoded_image._buffer, encoded_image._length) <
|
||||
encoded_image._length) {
|
||||
LOG(LS_ERROR) << "Unable to write frame to file.";
|
||||
RTC_LOG(LS_ERROR) << "Unable to write frame to file.";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -73,13 +73,13 @@ static VideoEncoder::QpThresholds CodecTypeToDefaultThresholds(
|
||||
class QualityScaler::CheckQPTask : public rtc::QueuedTask {
|
||||
public:
|
||||
explicit CheckQPTask(QualityScaler* scaler) : scaler_(scaler) {
|
||||
LOG(LS_INFO) << "Created CheckQPTask. Scheduling on queue...";
|
||||
RTC_LOG(LS_INFO) << "Created CheckQPTask. Scheduling on queue...";
|
||||
rtc::TaskQueue::Current()->PostDelayedTask(
|
||||
std::unique_ptr<rtc::QueuedTask>(this), scaler_->GetSamplingPeriodMs());
|
||||
}
|
||||
void Stop() {
|
||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
|
||||
LOG(LS_INFO) << "Stopping QP Check task.";
|
||||
RTC_LOG(LS_INFO) << "Stopping QP Check task.";
|
||||
stop_ = true;
|
||||
}
|
||||
|
||||
@ -122,8 +122,8 @@ QualityScaler::QualityScaler(AdaptationObserverInterface* observer,
|
||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
|
||||
RTC_DCHECK(observer_ != nullptr);
|
||||
check_qp_task_ = new CheckQPTask(this);
|
||||
LOG(LS_INFO) << "QP thresholds: low: " << thresholds_.low
|
||||
<< ", high: " << thresholds_.high;
|
||||
RTC_LOG(LS_INFO) << "QP thresholds: low: " << thresholds_.low
|
||||
<< ", high: " << thresholds_.high;
|
||||
}
|
||||
|
||||
QualityScaler::~QualityScaler() {
|
||||
@ -168,7 +168,7 @@ void QualityScaler::CheckQP() {
|
||||
// Check if we should scale up or down based on QP.
|
||||
const rtc::Optional<int> avg_qp = average_qp_.GetAverage();
|
||||
if (avg_qp) {
|
||||
LOG(LS_INFO) << "Checking average QP " << *avg_qp;
|
||||
RTC_LOG(LS_INFO) << "Checking average QP " << *avg_qp;
|
||||
if (*avg_qp > thresholds_.high) {
|
||||
ReportQPHigh();
|
||||
return;
|
||||
|
||||
@ -162,7 +162,7 @@ static void ParseFilterHeader(VP8BitReader* br) {
|
||||
|
||||
bool GetQp(const uint8_t* buf, size_t length, int* qp) {
|
||||
if (length < kCommonPayloadHeaderLength) {
|
||||
LOG(LS_WARNING) << "Failed to get QP, invalid length.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length.";
|
||||
return false;
|
||||
}
|
||||
VP8BitReader br;
|
||||
@ -175,7 +175,7 @@ bool GetQp(const uint8_t* buf, size_t length, int* qp) {
|
||||
header_length = kKeyPayloadHeaderLength;
|
||||
}
|
||||
if (header_length + partition_length > length) {
|
||||
LOG(LS_WARNING) << "Failed to get QP, invalid length: " << length;
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length: " << length;
|
||||
return false;
|
||||
}
|
||||
buf += header_length;
|
||||
@ -193,7 +193,7 @@ bool GetQp(const uint8_t* buf, size_t length, int* qp) {
|
||||
// Base QP.
|
||||
const int base_q0 = VP8GetValue(&br, 7);
|
||||
if (br.eof_ == 1) {
|
||||
LOG(LS_WARNING) << "Failed to get QP, end of file reached.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP, end of file reached.";
|
||||
return false;
|
||||
}
|
||||
*qp = base_q0;
|
||||
|
||||
@ -35,7 +35,7 @@ bool Vp9ReadProfile(rtc::BitBuffer* br, uint8_t* profile) {
|
||||
uint32_t reserved_bit;
|
||||
RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1));
|
||||
if (reserved_bit) {
|
||||
LOG(LS_WARNING) << "Failed to get QP. Unsupported bitstream profile.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. Unsupported bitstream profile.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -46,7 +46,7 @@ bool Vp9ReadSyncCode(rtc::BitBuffer* br) {
|
||||
uint32_t sync_code;
|
||||
RETURN_FALSE_IF_ERROR(br->ReadBits(&sync_code, 24));
|
||||
if (sync_code != 0x498342) {
|
||||
LOG(LS_WARNING) << "Failed to get QP. Invalid sync code.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. Invalid sync code.";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -71,7 +71,7 @@ bool Vp9ReadColorConfig(rtc::BitBuffer* br, uint8_t profile) {
|
||||
uint32_t reserved_bit;
|
||||
RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1));
|
||||
if (reserved_bit) {
|
||||
LOG(LS_WARNING) << "Failed to get QP. Reserved bit set.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. Reserved bit set.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -80,12 +80,12 @@ bool Vp9ReadColorConfig(rtc::BitBuffer* br, uint8_t profile) {
|
||||
uint32_t reserved_bit;
|
||||
RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1));
|
||||
if (reserved_bit) {
|
||||
LOG(LS_WARNING) << "Failed to get QP. Reserved bit set.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. Reserved bit set.";
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
LOG(LS_WARNING) << "Failed to get QP. 4:4:4 color not supported in "
|
||||
"profile 0 or 2.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. 4:4:4 color not supported in "
|
||||
"profile 0 or 2.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -173,7 +173,7 @@ bool GetQp(const uint8_t* buf, size_t length, int* qp) {
|
||||
uint32_t frame_marker;
|
||||
RETURN_FALSE_IF_ERROR(br.ReadBits(&frame_marker, 2));
|
||||
if (frame_marker != 0x2) {
|
||||
LOG(LS_WARNING) << "Failed to get QP. Frame marker should be 2.";
|
||||
RTC_LOG(LS_WARNING) << "Failed to get QP. Frame marker should be 2.";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -131,7 +131,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
|
||||
streams.back().temporal_layer_thresholds_bps.size() + 1);
|
||||
|
||||
if (nack_enabled && !TemporalLayersConfigured(streams)) {
|
||||
LOG(LS_INFO) << "No temporal layers and nack enabled -> resilience off";
|
||||
RTC_LOG(LS_INFO)
|
||||
<< "No temporal layers and nack enabled -> resilience off";
|
||||
video_codec.VP8()->resilience = kResilienceOff;
|
||||
}
|
||||
break;
|
||||
@ -151,8 +152,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
|
||||
|
||||
if (nack_enabled && !TemporalLayersConfigured(streams) &&
|
||||
video_codec.VP9()->numberOfSpatialLayers == 1) {
|
||||
LOG(LS_INFO) << "No temporal or spatial layers and nack enabled -> "
|
||||
<< "resilience off";
|
||||
RTC_LOG(LS_INFO) << "No temporal or spatial layers and nack enabled -> "
|
||||
<< "resilience off";
|
||||
video_codec.VP9()->resilienceOn = false;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -264,9 +264,9 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
|
||||
clock_->TimeInMilliseconds());
|
||||
|
||||
if (first_frame_received_()) {
|
||||
LOG(LS_INFO) << "Received first "
|
||||
<< (frame->Complete() ? "complete" : "incomplete")
|
||||
<< " decodable video frame";
|
||||
RTC_LOG(LS_INFO) << "Received first "
|
||||
<< (frame->Complete() ? "complete" : "incomplete")
|
||||
<< " decodable video frame";
|
||||
}
|
||||
|
||||
const int32_t ret = Decode(*frame);
|
||||
|
||||
@ -69,8 +69,8 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
current_codec_ = *sendCodec;
|
||||
|
||||
if (!ret) {
|
||||
LOG(LS_ERROR) << "Failed to initialize set encoder with payload name '"
|
||||
<< sendCodec->plName << "'.";
|
||||
RTC_LOG(LS_ERROR) << "Failed to initialize set encoder with payload name '"
|
||||
<< sendCodec->plName << "'.";
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
|
||||
@ -110,10 +110,10 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
encoder_has_internal_source_ = _encoder->InternalSource();
|
||||
}
|
||||
|
||||
LOG(LS_VERBOSE) << " max bitrate " << sendCodec->maxBitrate
|
||||
<< " start bitrate " << sendCodec->startBitrate
|
||||
<< " max frame rate " << sendCodec->maxFramerate
|
||||
<< " max payload size " << maxPayloadSize;
|
||||
RTC_LOG(LS_VERBOSE) << " max bitrate " << sendCodec->maxBitrate
|
||||
<< " start bitrate " << sendCodec->startBitrate
|
||||
<< " max frame rate " << sendCodec->maxFramerate
|
||||
<< " max payload size " << maxPayloadSize;
|
||||
_mediaOpt.SetEncodingData(sendCodec->maxBitrate * 1000,
|
||||
sendCodec->startBitrate * 1000,
|
||||
sendCodec->maxFramerate);
|
||||
@ -296,12 +296,12 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
|
||||
return VCM_UNINITIALIZED;
|
||||
SetEncoderParameters(encoder_params, encoder_has_internal_source);
|
||||
if (_mediaOpt.DropFrame()) {
|
||||
LOG(LS_VERBOSE) << "Drop Frame "
|
||||
<< "target bitrate "
|
||||
<< encoder_params.target_bitrate.get_sum_bps()
|
||||
<< " loss rate " << encoder_params.loss_rate << " rtt "
|
||||
<< encoder_params.rtt << " input frame rate "
|
||||
<< encoder_params.input_frame_rate;
|
||||
RTC_LOG(LS_VERBOSE) << "Drop Frame "
|
||||
<< "target bitrate "
|
||||
<< encoder_params.target_bitrate.get_sum_bps()
|
||||
<< " loss rate " << encoder_params.loss_rate << " rtt "
|
||||
<< encoder_params.rtt << " input frame rate "
|
||||
<< encoder_params.input_frame_rate;
|
||||
post_encode_callback_->OnDroppedFrame(
|
||||
EncodedImageCallback::DropReason::kDroppedByMediaOptimizations);
|
||||
return VCM_OK;
|
||||
@ -310,7 +310,8 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
|
||||
// processing so frame size always matches.
|
||||
if (!_codecDataBase.MatchesCurrentResolution(videoFrame.width(),
|
||||
videoFrame.height())) {
|
||||
LOG(LS_ERROR) << "Incoming frame doesn't match set resolution. Dropping.";
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Incoming frame doesn't match set resolution. Dropping.";
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
VideoFrame converted_frame = videoFrame;
|
||||
@ -327,7 +328,7 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
|
||||
converted_frame.video_frame_buffer()->ToI420());
|
||||
|
||||
if (!converted_buffer) {
|
||||
LOG(LS_ERROR) << "Frame conversion failed, dropping frame.";
|
||||
RTC_LOG(LS_ERROR) << "Frame conversion failed, dropping frame.";
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
converted_frame = VideoFrame(converted_buffer,
|
||||
@ -338,7 +339,7 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
|
||||
int32_t ret =
|
||||
_encoder->Encode(converted_frame, codecSpecificInfo, next_frame_types);
|
||||
if (ret < 0) {
|
||||
LOG(LS_ERROR) << "Failed to encode frame. Error code: " << ret;
|
||||
RTC_LOG(LS_ERROR) << "Failed to encode frame. Error code: " << ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user