Use clang-format -style=chromium to correct the format in webrtc/modules/interface/module_common_types.h

R=andrew@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/2979004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5036 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
xians@webrtc.org
2013-10-25 18:15:09 +00:00
parent e4e5683b41
commit c94abd313e

View File

@ -21,20 +21,18 @@
#include "webrtc/typedefs.h"
#ifdef _WIN32
#pragma warning(disable:4351) // remove warning "new behavior: elements of array
// 'array' will be default initialized"
// Remove warning "new behavior: elements of array will be default initialized".
#pragma warning(disable : 4351)
#endif
namespace webrtc {
struct RTPHeaderExtension
{
struct RTPHeaderExtension {
int32_t transmissionTimeOffset;
uint32_t absoluteSendTime;
};
struct RTPHeader
{
struct RTPHeader {
bool markerBit;
uint8_t payloadType;
uint16_t sequenceNumber;
@ -48,24 +46,31 @@ struct RTPHeader
RTPHeaderExtension extension;
};
struct RTPAudioHeader
{
struct RTPAudioHeader {
uint8_t numEnergy; // number of valid entries in arrOfEnergy
uint8_t arrOfEnergy[kRtpCsrcSize]; // one energy byte (0-9) per channel
bool isCNG; // is this CNG
uint8_t channel; // number of channels 2 = stereo
};
enum {kNoPictureId = -1};
enum {kNoTl0PicIdx = -1};
enum {kNoTemporalIdx = -1};
enum {kNoKeyIdx = -1};
enum {kNoSimulcastIdx = 0};
enum {
kNoPictureId = -1
};
enum {
kNoTl0PicIdx = -1
};
enum {
kNoTemporalIdx = -1
};
enum {
kNoKeyIdx = -1
};
enum {
kNoSimulcastIdx = 0
};
struct RTPVideoHeaderVP8
{
void InitRTPVideoHeaderVP8()
{
struct RTPVideoHeaderVP8 {
void InitRTPVideoHeaderVP8() {
nonReference = false;
pictureId = kNoPictureId;
tl0PicIdx = kNoTl0PicIdx;
@ -89,19 +94,16 @@ struct RTPVideoHeaderVP8
bool beginningOfPartition; // True if this packet is the first
// in a VP8 partition. Otherwise false
};
union RTPVideoTypeHeader
{
union RTPVideoTypeHeader {
RTPVideoHeaderVP8 VP8;
};
enum RtpVideoCodecTypes
{
enum RtpVideoCodecTypes {
kRtpVideoNone,
kRtpVideoGeneric,
kRtpVideoVp8
};
struct RTPVideoHeader
{
struct RTPVideoHeader {
uint16_t width; // size
uint16_t height;
@ -111,47 +113,39 @@ struct RTPVideoHeader
RtpVideoCodecTypes codec;
RTPVideoTypeHeader codecHeader;
};
union RTPTypeHeader
{
union RTPTypeHeader {
RTPAudioHeader Audio;
RTPVideoHeader Video;
};
struct WebRtcRTPHeader
{
struct WebRtcRTPHeader {
RTPHeader header;
FrameType frameType;
RTPTypeHeader type;
};
class RTPFragmentationHeader
{
class RTPFragmentationHeader {
public:
RTPFragmentationHeader() :
fragmentationVectorSize(0),
RTPFragmentationHeader()
: fragmentationVectorSize(0),
fragmentationOffset(NULL),
fragmentationLength(NULL),
fragmentationTimeDiff(NULL),
fragmentationPlType(NULL)
{};
fragmentationPlType(NULL) {};
~RTPFragmentationHeader()
{
~RTPFragmentationHeader() {
delete[] fragmentationOffset;
delete[] fragmentationLength;
delete[] fragmentationTimeDiff;
delete[] fragmentationPlType;
}
void CopyFrom(const RTPFragmentationHeader& src)
{
if(this == &src)
{
void CopyFrom(const RTPFragmentationHeader& src) {
if (this == &src) {
return;
}
if(src.fragmentationVectorSize != fragmentationVectorSize)
{
if (src.fragmentationVectorSize != fragmentationVectorSize) {
// new size of vectors
// delete old
@ -164,23 +158,18 @@ public:
delete[] fragmentationPlType;
fragmentationPlType = NULL;
if(src.fragmentationVectorSize > 0)
{
if (src.fragmentationVectorSize > 0) {
// allocate new
if(src.fragmentationOffset)
{
if (src.fragmentationOffset) {
fragmentationOffset = new uint32_t[src.fragmentationVectorSize];
}
if(src.fragmentationLength)
{
if (src.fragmentationLength) {
fragmentationLength = new uint32_t[src.fragmentationVectorSize];
}
if(src.fragmentationTimeDiff)
{
if (src.fragmentationTimeDiff) {
fragmentationTimeDiff = new uint16_t[src.fragmentationVectorSize];
}
if(src.fragmentationPlType)
{
if (src.fragmentationPlType) {
fragmentationPlType = new uint8_t[src.fragmentationVectorSize];
}
}
@ -188,36 +177,29 @@ public:
fragmentationVectorSize = src.fragmentationVectorSize;
}
if(src.fragmentationVectorSize > 0)
{
if (src.fragmentationVectorSize > 0) {
// copy values
if(src.fragmentationOffset)
{
if (src.fragmentationOffset) {
memcpy(fragmentationOffset, src.fragmentationOffset,
src.fragmentationVectorSize * sizeof(uint32_t));
}
if(src.fragmentationLength)
{
if (src.fragmentationLength) {
memcpy(fragmentationLength, src.fragmentationLength,
src.fragmentationVectorSize * sizeof(uint32_t));
}
if(src.fragmentationTimeDiff)
{
if (src.fragmentationTimeDiff) {
memcpy(fragmentationTimeDiff, src.fragmentationTimeDiff,
src.fragmentationVectorSize * sizeof(uint16_t));
}
if(src.fragmentationPlType)
{
if (src.fragmentationPlType) {
memcpy(fragmentationPlType, src.fragmentationPlType,
src.fragmentationVectorSize * sizeof(uint8_t));
}
}
}
void VerifyAndAllocateFragmentationHeader(const uint16_t size)
{
if(fragmentationVectorSize < size)
{
void VerifyAndAllocateFragmentationHeader(const uint16_t size) {
if (fragmentationVectorSize < size) {
uint16_t oldVectorSize = fragmentationVectorSize;
{
// offset
@ -226,7 +208,8 @@ public:
memset(fragmentationOffset + oldVectorSize, 0,
sizeof(uint32_t) * (size - oldVectorSize));
// copy old values
memcpy(fragmentationOffset,oldOffsets, sizeof(uint32_t) * oldVectorSize);
memcpy(fragmentationOffset, oldOffsets,
sizeof(uint32_t) * oldVectorSize);
delete[] oldOffsets;
}
// length
@ -274,8 +257,7 @@ private:
DISALLOW_COPY_AND_ASSIGN(RTPFragmentationHeader);
};
struct RTCPVoIPMetric
{
struct RTCPVoIPMetric {
// RFC 3611 4.7
uint8_t lossRate;
uint8_t discardRate;
@ -327,11 +309,10 @@ class CallStatsObserver {
};
// class describing a complete, or parts of an encoded frame.
class EncodedVideoData
{
class EncodedVideoData {
public:
EncodedVideoData() :
payloadType(0),
EncodedVideoData()
: payloadType(0),
timeStamp(0),
renderTimeMs(0),
encodedWidth(0),
@ -343,11 +324,9 @@ public:
bufferSize(0),
fragmentationHeader(),
frameType(kVideoFrameDelta),
codec(kVideoCodecUnknown)
{};
codec(kVideoCodecUnknown) {};
EncodedVideoData(const EncodedVideoData& data)
{
EncodedVideoData(const EncodedVideoData& data) {
payloadType = data.payloadType;
timeStamp = data.timeStamp;
renderTimeMs = data.renderTimeMs;
@ -359,27 +338,20 @@ public:
fragmentationHeader.CopyFrom(data.fragmentationHeader);
frameType = data.frameType;
codec = data.codec;
if (data.payloadSize > 0)
{
if (data.payloadSize > 0) {
payloadData = new uint8_t[data.payloadSize];
memcpy(payloadData, data.payloadData, data.payloadSize);
}
else
{
} else {
payloadData = NULL;
}
}
~EncodedVideoData()
{
~EncodedVideoData() {
delete[] payloadData;
};
EncodedVideoData& operator=(const EncodedVideoData& data)
{
if (this == &data)
{
EncodedVideoData& operator=(const EncodedVideoData& data) {
if (this == &data) {
return *this;
}
payloadType = data.payloadType;
@ -393,8 +365,7 @@ public:
fragmentationHeader.CopyFrom(data.fragmentationHeader);
frameType = data.frameType;
codec = data.codec;
if (data.payloadSize > 0)
{
if (data.payloadSize > 0) {
delete[] payloadData;
payloadData = new uint8_t[data.payloadSize];
memcpy(payloadData, data.payloadData, data.payloadSize);
@ -402,10 +373,8 @@ public:
}
return *this;
};
void VerifyAndAllocate( const uint32_t size)
{
if (bufferSize < size)
{
void VerifyAndAllocate(const uint32_t size) {
if (bufferSize < size) {
uint8_t* oldPayload = payloadData;
payloadData = new uint8_t[size];
memcpy(payloadData, oldPayload, sizeof(uint8_t) * payloadSize);
@ -435,8 +404,7 @@ struct VideoContentMetrics {
: motion_magnitude(0.0f),
spatial_pred_err(0.0f),
spatial_pred_err_h(0.0f),
spatial_pred_err_v(0.0f) {
}
spatial_pred_err_v(0.0f) {}
void Reset() {
motion_magnitude = 0.0f;
@ -459,40 +427,42 @@ struct VideoContentMetrics {
*
*
*************************************************/
class VideoFrame
{
class VideoFrame {
public:
VideoFrame();
~VideoFrame();
/**
* Verifies that current allocated buffer size is larger than or equal to the input size.
* If the current buffer size is smaller, a new allocation is made and the old buffer data
* Verifies that current allocated buffer size is larger than or equal to the
* input size.
* If the current buffer size is smaller, a new allocation is made and the old
* buffer data
* is copied to the new buffer.
* Buffer size is updated to minimumSize.
*/
int32_t VerifyAndAllocate(const uint32_t minimumSize);
/**
* Update length of data buffer in frame. Function verifies that new length is less or
* Update length of data buffer in frame. Function verifies that new length
* is less or
* equal to allocated size.
*/
int32_t SetLength(const uint32_t newLength);
/*
* Swap buffer and size data
*/
int32_t Swap(uint8_t*& newMemory,
uint32_t& newLength,
uint32_t& newSize);
int32_t Swap(uint8_t*& newMemory, uint32_t& newLength, uint32_t& newSize);
/*
* Swap buffer and size data
*/
int32_t SwapFrame(VideoFrame& videoFrame);
/**
* Copy buffer: If newLength is bigger than allocated size, a new buffer of size length
* Copy buffer: If newLength is bigger than allocated size, a new buffer of
* size length
* is allocated.
*/
int32_t CopyFrame(const VideoFrame& videoFrame);
/**
* Copy buffer: If newLength is bigger than allocated size, a new buffer of size length
* Copy buffer: If newLength is bigger than allocated size, a new buffer of
* size length
* is allocated.
*/
int32_t CopyFrame(uint32_t length, const uint8_t* sourceBuffer);
@ -542,17 +512,16 @@ public:
/**
* Set render time in miliseconds
*/
void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;}
void SetRenderTime(const int64_t renderTimeMs) {
_renderTimeMs = renderTimeMs;
}
/**
* Get render time in miliseconds
*/
int64_t RenderTimeMs() const { return _renderTimeMs; }
private:
void Set(uint8_t* buffer,
uint32_t size,
uint32_t length,
uint32_t timeStamp);
void Set(uint8_t* buffer, uint32_t size, uint32_t length, uint32_t timeStamp);
uint8_t* _buffer; // Pointer to frame buffer
uint32_t _bufferSize; // Allocated buffer size
@ -564,49 +533,35 @@ private:
}; // end of VideoFrame class declaration
// inline implementation of VideoFrame class:
inline
VideoFrame::VideoFrame():
_buffer(0),
inline VideoFrame::VideoFrame()
: _buffer(0),
_bufferSize(0),
_bufferLength(0),
_timeStamp(0),
_width(0),
_height(0),
_renderTimeMs(0)
{
_renderTimeMs(0) {
//
}
inline
VideoFrame::~VideoFrame()
{
if(_buffer)
{
inline VideoFrame::~VideoFrame() {
if (_buffer) {
delete[] _buffer;
_buffer = NULL;
}
}
inline
int32_t
VideoFrame::VerifyAndAllocate(const uint32_t minimumSize)
{
if (minimumSize < 1)
{
inline int32_t VideoFrame::VerifyAndAllocate(const uint32_t minimumSize) {
if (minimumSize < 1) {
return -1;
}
if(minimumSize > _bufferSize)
{
if (minimumSize > _bufferSize) {
// create buffer of sufficient size
uint8_t* newBufferBuffer = new uint8_t[minimumSize];
if(_buffer)
{
if (_buffer) {
// copy old data
memcpy(newBufferBuffer, _buffer, _bufferSize);
delete[] _buffer;
}
else
{
} else {
memset(newBufferBuffer, 0, minimumSize * sizeof(uint8_t));
}
_buffer = newBufferBuffer;
@ -615,22 +570,15 @@ VideoFrame::VerifyAndAllocate(const uint32_t minimumSize)
return 0;
}
inline
int32_t
VideoFrame::SetLength(const uint32_t newLength)
{
if (newLength >_bufferSize )
{ // can't accomodate new value
inline int32_t VideoFrame::SetLength(const uint32_t newLength) {
if (newLength > _bufferSize) { // can't accomodate new value
return -1;
}
_bufferLength = newLength;
return 0;
}
inline
int32_t
VideoFrame::SwapFrame(VideoFrame& videoFrame)
{
inline int32_t VideoFrame::SwapFrame(VideoFrame& videoFrame) {
uint32_t tmpTimeStamp = _timeStamp;
uint32_t tmpWidth = _width;
uint32_t tmpHeight = _height;
@ -646,13 +594,12 @@ VideoFrame::SwapFrame(VideoFrame& videoFrame)
videoFrame._height = tmpHeight;
videoFrame._renderTimeMs = tmpRenderTime;
return Swap(videoFrame._buffer, videoFrame._bufferLength, videoFrame._bufferSize);
return Swap(videoFrame._buffer, videoFrame._bufferLength,
videoFrame._bufferSize);
}
inline
int32_t
VideoFrame::Swap(uint8_t*& newMemory, uint32_t& newLength, uint32_t& newSize)
{
inline int32_t VideoFrame::Swap(uint8_t*& newMemory, uint32_t& newLength,
uint32_t& newSize) {
uint8_t* tmpBuffer = _buffer;
uint32_t tmpLength = _bufferLength;
uint32_t tmpSize = _bufferSize;
@ -665,15 +612,11 @@ VideoFrame::Swap(uint8_t*& newMemory, uint32_t& newLength, uint32_t& newSize)
return 0;
}
inline
int32_t
VideoFrame::CopyFrame(uint32_t length, const uint8_t* sourceBuffer)
{
if (length > _bufferSize)
{
inline int32_t VideoFrame::CopyFrame(uint32_t length,
const uint8_t* sourceBuffer) {
if (length > _bufferSize) {
int32_t ret = VerifyAndAllocate(length);
if (ret < 0)
{
if (ret < 0) {
return ret;
}
}
@ -682,12 +625,8 @@ VideoFrame::CopyFrame(uint32_t length, const uint8_t* sourceBuffer)
return 0;
}
inline
int32_t
VideoFrame::CopyFrame(const VideoFrame& videoFrame)
{
if(CopyFrame(videoFrame.Length(), videoFrame.Buffer()) != 0)
{
inline int32_t VideoFrame::CopyFrame(const VideoFrame& videoFrame) {
if (CopyFrame(videoFrame.Length(), videoFrame.Buffer()) != 0) {
return -1;
}
_timeStamp = videoFrame._timeStamp;
@ -697,10 +636,7 @@ VideoFrame::CopyFrame(const VideoFrame& videoFrame)
return 0;
}
inline
void
VideoFrame::Free()
{
inline void VideoFrame::Free() {
_timeStamp = 0;
_bufferLength = 0;
_bufferSize = 0;
@ -708,14 +644,12 @@ VideoFrame::Free()
_width = 0;
_renderTimeMs = 0;
if(_buffer)
{
if (_buffer) {
delete[] _buffer;
_buffer = NULL;
}
}
/* This class holds up to 60 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
@ -750,17 +684,11 @@ public:
AudioFrame();
virtual ~AudioFrame() {}
// |Interleaved_| is assumed to be unchanged with this UpdateFrame() method.
void UpdateFrame(
int id,
uint32_t timestamp,
const int16_t* data,
int samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
int num_channels = 1,
uint32_t energy = -1);
// |interleaved_| is not changed by this method.
void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
int samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
int num_channels = 1, uint32_t energy = -1);
AudioFrame& Append(const AudioFrame& rhs);
@ -787,8 +715,7 @@ private:
DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
inline
AudioFrame::AudioFrame()
inline AudioFrame::AudioFrame()
: id_(-1),
timestamp_(0),
data_(),
@ -800,16 +727,11 @@ AudioFrame::AudioFrame()
energy_(0xffffffff),
interleaved_(true) {}
inline
void AudioFrame::UpdateFrame(
int id,
uint32_t timestamp,
inline void AudioFrame::UpdateFrame(int id, uint32_t timestamp,
const int16_t* data,
int samples_per_channel,
int sample_rate_hz,
int samples_per_channel, int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
int num_channels,
VADActivity vad_activity, int num_channels,
uint32_t energy) {
id_ = id;
timestamp_ = timestamp;
@ -830,8 +752,7 @@ void AudioFrame::UpdateFrame(
}
inline void AudioFrame::CopyFrom(const AudioFrame& src) {
if(this == &src)
return;
if (this == &src) return;
id_ = src.id_;
timestamp_ = src.timestamp_;
@ -848,16 +769,13 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) {
memcpy(data_, src.data_, sizeof(int16_t) * length);
}
inline
void AudioFrame::Mute() {
inline void AudioFrame::Mute() {
memset(data_, 0, samples_per_channel_ * num_channels_ * sizeof(int16_t));
}
inline
AudioFrame& AudioFrame::operator>>=(const int rhs) {
inline AudioFrame& AudioFrame::operator>>=(const int rhs) {
assert((num_channels_ > 0) && (num_channels_ < 3));
if((num_channels_ > 2) || (num_channels_ < 1))
return *this;
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
data_[i] = static_cast<int16_t>(data_[i] >> rhs);
@ -865,15 +783,12 @@ AudioFrame& AudioFrame::operator>>=(const int rhs) {
return *this;
}
inline
AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
inline AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if((num_channels_ > 2) || (num_channels_ < 1))
return *this;
if(num_channels_ != rhs.num_channels_)
return *this;
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if (num_channels_ != rhs.num_channels_) return *this;
if ((vad_activity_ == kVadActive) || rhs.vad_activity_ == kVadActive) {
vad_activity_ = kVadActive;
@ -892,15 +807,12 @@ AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
return *this;
}
inline
AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if((num_channels_ > 2) || (num_channels_ < 1))
return *this;
if(num_channels_ != rhs.num_channels_)
return *this;
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if (num_channels_ != rhs.num_channels_) return *this;
bool noPrevData = false;
if (samples_per_channel_ != rhs.samples_per_channel_) {
@ -919,8 +831,7 @@ AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
vad_activity_ = kVadUnknown;
}
if(speech_type_ != rhs.speech_type_)
speech_type_ = kUndefined;
if (speech_type_ != rhs.speech_type_) speech_type_ = kUndefined;
if (noPrevData) {
memcpy(data_, rhs.data_,
@ -928,8 +839,8 @@ AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
} else {
// IMPROVEMENT this can be done very fast in assembly
for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrapGuard = static_cast<int32_t>(data_[i]) +
static_cast<int32_t>(rhs.data_[i]);
int32_t wrapGuard =
static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
if (wrapGuard < -32768) {
data_[i] = -32768;
} else if (wrapGuard > 32767) {
@ -943,13 +854,11 @@ AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
return *this;
}
inline
AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
assert(interleaved_ == rhs.interleaved_);
if((num_channels_ > 2)|| (num_channels_ < 1))
return *this;
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
if ((samples_per_channel_ != rhs.samples_per_channel_) ||
(num_channels_ != rhs.num_channels_)) {
@ -961,8 +870,8 @@ AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
speech_type_ = kUndefined;
for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrapGuard = static_cast<int32_t>(data_[i]) -
static_cast<int32_t>(rhs.data_[i]);
int32_t wrapGuard =
static_cast<int32_t>(data_[i]) - static_cast<int32_t>(rhs.data_[i]);
if (wrapGuard < -32768) {
data_[i] = -32768;
} else if (wrapGuard > 32767) {
@ -988,13 +897,13 @@ inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
inline uint16_t LatestSequenceNumber(uint16_t sequence_number1,
uint16_t sequence_number2) {
return IsNewerSequenceNumber(sequence_number1, sequence_number2) ?
sequence_number1 : sequence_number2;
return IsNewerSequenceNumber(sequence_number1, sequence_number2)
? sequence_number1
: sequence_number2;
}
inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 :
timestamp2;
return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2;
}
} // namespace webrtc