Add RTC_ prefix to (D)CHECKs and related macros.

We must remove dependency on Chromium, i.e. we can't use Chromium's base/logging.h. That means we need to define these macros in WebRTC also when doing Chromium builds. And this causes redefinition.

Alternative solutions:
* Check if we already have defined e.g. CHECK, and don't define them in that case. This makes us depend on include order in Chromium, which is not acceptable.
* Don't allow using the macros in WebRTC headers. Error prone since if someone adds it there by mistake it may compile fine, but later break if a header in added or order is changed in Chromium. That will be confusing and hard to enforce.
* Ensure that headers that are included by an embedder don't include our macros. This would require some heavy refactoring to be maintainable and enforcable.
* Changes in Chromium for this is obviously not an option.

BUG=chromium:468375
NOTRY=true

Review URL: https://codereview.webrtc.org/1335923002

Cr-Commit-Position: refs/heads/master@{#9964}
This commit is contained in:
henrikg
2015-09-17 00:24:34 -07:00
committed by Commit bot
parent c0ac6cad00
commit 91d6edef35
232 changed files with 1665 additions and 1646 deletions

View File

@ -36,7 +36,7 @@ bool IsH264CodecSupported() {
}
H264Encoder* H264Encoder::Create() {
DCHECK(H264Encoder::IsSupported());
RTC_DCHECK(H264Encoder::IsSupported());
#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
return new H264VideoToolboxEncoder();
#else
@ -50,7 +50,7 @@ bool H264Encoder::IsSupported() {
}
H264Decoder* H264Decoder::Create() {
DCHECK(H264Decoder::IsSupported());
RTC_DCHECK(H264Decoder::IsSupported());
#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
return new H264VideoToolboxDecoder();
#else

View File

@ -47,9 +47,9 @@ struct FrameDecodeParams {
// instead once the pipeline supports it.
rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
CVPixelBufferRef pixel_buffer) {
DCHECK(pixel_buffer);
DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
RTC_DCHECK(pixel_buffer);
RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0);
// TODO(tkchin): Use a frame buffer pool.
@ -125,7 +125,7 @@ int H264VideoToolboxDecoder::Decode(
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
DCHECK(input_image._buffer);
RTC_DCHECK(input_image._buffer);
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
@ -134,7 +134,7 @@ int H264VideoToolboxDecoder::Decode(
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
DCHECK(sample_buffer);
RTC_DCHECK(sample_buffer);
// Check if the video format has changed, and reinitialize decoder if needed.
CMVideoFormatDescriptionRef description =
CMSampleBufferGetFormatDescription(sample_buffer);
@ -160,7 +160,7 @@ int H264VideoToolboxDecoder::Decode(
int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
DCHECK(!callback_);
RTC_DCHECK(!callback_);
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
@ -238,7 +238,7 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
}
void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
DCHECK(decompression_session_);
RTC_DCHECK(decompression_session_);
#if defined(WEBRTC_IOS)
VTSessionSetProperty(decompression_session_,
kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);

View File

@ -35,7 +35,7 @@ inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
// Copies characters from a CFStringRef into a std::string.
std::string CFStringToString(const CFStringRef cf_string) {
DCHECK(cf_string);
RTC_DCHECK(cf_string);
std::string std_string;
// Get the size needed for UTF8 plus terminating character.
size_t buffer_size =
@ -123,13 +123,13 @@ struct FrameEncodeParams {
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
CVPixelBufferRef pixel_buffer) {
DCHECK(pixel_buffer);
DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
static_cast<size_t>(frame.height()));
DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
static_cast<size_t>(frame.width()));
RTC_DCHECK(pixel_buffer);
RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
static_cast<size_t>(frame.height()));
RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
static_cast<size_t>(frame.width()));
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
if (cvRet != kCVReturnSuccess) {
@ -224,8 +224,8 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) {
DCHECK(codec_settings);
DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
RTC_DCHECK(codec_settings);
RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
// TODO(tkchin): We may need to enforce width/height dimension restrictions
// to match what the encoder supports.
width_ = codec_settings->width;
@ -266,7 +266,7 @@ int H264VideoToolboxEncoder::Encode(
// that the pool is empty.
return WEBRTC_VIDEO_CODEC_ERROR;
}
DCHECK(pixel_buffer);
RTC_DCHECK(pixel_buffer);
if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) {
LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixel_buffer);
@ -397,7 +397,7 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
}
void H264VideoToolboxEncoder::ConfigureCompressionSession() {
DCHECK(compression_session_);
RTC_DCHECK(compression_session_);
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_RealTime, true);
internal::SetVTSessionProperty(compression_session_,

View File

@ -29,8 +29,8 @@ bool H264CMSampleBufferToAnnexBBuffer(
bool is_keyframe,
rtc::Buffer* annexb_buffer,
webrtc::RTPFragmentationHeader** out_header) {
DCHECK(avcc_sample_buffer);
DCHECK(out_header);
RTC_DCHECK(avcc_sample_buffer);
RTC_DCHECK(out_header);
*out_header = nullptr;
// Get format description from the sample buffer.
@ -51,8 +51,8 @@ bool H264CMSampleBufferToAnnexBBuffer(
return false;
}
// TODO(tkchin): handle other potential sizes.
DCHECK_EQ(nalu_header_size, 4);
DCHECK_EQ(param_set_count, 2u);
RTC_DCHECK_EQ(nalu_header_size, 4);
RTC_DCHECK_EQ(param_set_count, 2u);
// Truncate any previous data in the buffer without changing its capacity.
annexb_buffer->SetSize(0);
@ -122,7 +122,7 @@ bool H264CMSampleBufferToAnnexBBuffer(
// The size type here must match |nalu_header_size|, we expect 4 bytes.
// Read the length of the next packet of data. Must convert from big endian
// to host endian.
DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
uint32_t* uint32_data_ptr = reinterpret_cast<uint32*>(data_ptr);
uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
// Update buffer.
@ -137,12 +137,12 @@ bool H264CMSampleBufferToAnnexBBuffer(
bytes_remaining -= bytes_written;
data_ptr += bytes_written;
}
DCHECK_EQ(bytes_remaining, (size_t)0);
RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
header.reset(new webrtc::RTPFragmentationHeader());
header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
for (size_t i = 0; i < frag_offsets.size(); ++i) {
header->fragmentationOffset[i] = frag_offsets[i];
header->fragmentationLength[i] = frag_lengths[i];
@ -159,8 +159,8 @@ bool H264AnnexBBufferToCMSampleBuffer(
size_t annexb_buffer_size,
CMVideoFormatDescriptionRef video_format,
CMSampleBufferRef* out_sample_buffer) {
DCHECK(annexb_buffer);
DCHECK(out_sample_buffer);
RTC_DCHECK(annexb_buffer);
RTC_DCHECK(out_sample_buffer);
*out_sample_buffer = nullptr;
// The buffer we receive via RTP has 00 00 00 01 start code artifically
@ -193,7 +193,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
return false;
}
} else {
DCHECK(video_format);
RTC_DCHECK(video_format);
description = video_format;
// We don't need to retain, but it makes logic easier since we are creating
// in the other block.
@ -241,7 +241,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
CFRelease(contiguous_buffer);
return false;
}
DCHECK(block_buffer_size == reader.BytesRemaining());
RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
// Write Avcc NALUs into block buffer memory.
AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
@ -272,7 +272,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
size_t length)
: start_(annexb_buffer), offset_(0), next_offset_(0), length_(length) {
DCHECK(annexb_buffer);
RTC_DCHECK(annexb_buffer);
offset_ = FindNextNaluHeader(start_, length_, 0);
next_offset_ =
FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
@ -280,8 +280,8 @@ AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
size_t* out_length) {
DCHECK(out_nalu);
DCHECK(out_length);
RTC_DCHECK(out_nalu);
RTC_DCHECK(out_length);
*out_nalu = nullptr;
*out_length = 0;
@ -304,7 +304,7 @@ size_t AnnexBBufferReader::BytesRemaining() const {
size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
size_t length,
size_t offset) const {
DCHECK(start);
RTC_DCHECK(start);
if (offset + sizeof(kAnnexBHeaderBytes) > length) {
return length;
}
@ -329,7 +329,7 @@ size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
: start_(avcc_buffer), offset_(0), length_(length) {
DCHECK(avcc_buffer);
RTC_DCHECK(avcc_buffer);
}
bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {

View File

@ -220,14 +220,14 @@ bool ScreenshareLayers::TimeToSync(int64_t timestamp) const {
RTC_NOTREACHED();
return false;
}
DCHECK_NE(-1, layers_[0].last_qp);
RTC_DCHECK_NE(-1, layers_[0].last_qp);
if (layers_[1].last_qp == -1) {
// First frame in TL1 should only depend on TL0 since there are no
// previous frames in TL1.
return true;
}
DCHECK_NE(-1, last_sync_timestamp_);
RTC_DCHECK_NE(-1, last_sync_timestamp_);
int64_t timestamp_diff = timestamp - last_sync_timestamp_;
if (timestamp_diff > kMaxTimeBetweenSyncs) {
// After a certain time, force a sync frame.

View File

@ -725,8 +725,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// |raw_images_[0]|, the resolution of these frames must match. Note that
// |input_image| might be scaled from |frame|. In that case, the resolution of
// |raw_images_[0]| should have been updated in UpdateCodecFrameSize.
DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.

View File

@ -34,7 +34,7 @@ void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
bool Vp9FrameBufferPool::InitializeVpxUsePool(
vpx_codec_ctx* vpx_codec_context) {
DCHECK(vpx_codec_context);
RTC_DCHECK(vpx_codec_context);
// Tell libvpx to use this pool.
if (vpx_codec_set_frame_buffer_functions(
// In which context to use these callback functions.
@ -53,7 +53,7 @@ bool Vp9FrameBufferPool::InitializeVpxUsePool(
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
DCHECK_GT(min_size, 0u);
RTC_DCHECK_GT(min_size, 0u);
rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
{
rtc::CritScope cs(&buffers_lock_);
@ -101,8 +101,8 @@ void Vp9FrameBufferPool::ClearPool() {
int32 Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
size_t min_size,
vpx_codec_frame_buffer* fb) {
DCHECK(user_priv);
DCHECK(fb);
RTC_DCHECK(user_priv);
RTC_DCHECK(fb);
Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
@ -120,8 +120,8 @@ int32 Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
// static
int32 Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb) {
DCHECK(user_priv);
DCHECK(fb);
RTC_DCHECK(user_priv);
RTC_DCHECK(fb);
Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
if (buffer != nullptr) {
buffer->Release();

View File

@ -441,8 +441,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
if (frame_types && frame_types->size() > 0) {
frame_type = (*frame_types)[0];
}
DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
// Set input image for use in the callback.
// This was necessary since you need some information from input_image.