pkasting@chromium.org
2014-11-20 22:28:14 +00:00
parent edc6e57a92
commit 4591fbd09f
341 changed files with 2610 additions and 2613 deletions

View File

@ -43,7 +43,7 @@ public:
// Note: packet should contain the RTP/RTCP part of the packet. I.e. the
// first bytes of packet should be the RTP/RTCP header.
virtual int32_t DumpPacket(const uint8_t* packet,
uint16_t packetLength) = 0;
size_t packetLength) = 0;
protected:
virtual ~RtpDump();

View File

@ -54,7 +54,7 @@ int32_t AudioCoder::SetDecodeCodec(const CodecInst& codecInst,
int32_t AudioCoder::Decode(AudioFrame& decodedAudio,
uint32_t sampFreqHz,
const int8_t* incomingPayload,
int32_t payloadLength)
size_t payloadLength)
{
if (payloadLength > 0)
{
@ -79,7 +79,7 @@ int32_t AudioCoder::PlayoutData(AudioFrame& decodedAudio,
int32_t AudioCoder::Encode(const AudioFrame& audio,
int8_t* encodedData,
uint32_t& encodedLengthInBytes)
size_t& encodedLengthInBytes)
{
// Fake a timestamp in case audio doesn't contain a correct timestamp.
// Make a local copy of the audio frame since audio is const
@ -109,7 +109,7 @@ int32_t AudioCoder::SendData(
uint8_t /* payloadType */,
uint32_t /* timeStamp */,
const uint8_t* payloadData,
uint16_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader* /* fragmentation*/)
{
memcpy(_encodedData,payloadData,sizeof(uint8_t) * payloadSize);

View File

@ -34,12 +34,12 @@ public:
ACMAMRPackingFormat amrFormat = AMRBandwidthEfficient);
int32_t Decode(AudioFrame& decodedAudio, uint32_t sampFreqHz,
const int8_t* incomingPayload, int32_t payloadLength);
const int8_t* incomingPayload, size_t payloadLength);
int32_t PlayoutData(AudioFrame& decodedAudio, uint16_t& sampFreqHz);
int32_t Encode(const AudioFrame& audio, int8_t* encodedData,
uint32_t& encodedLengthInBytes);
size_t& encodedLengthInBytes);
protected:
virtual int32_t SendData(
@ -47,7 +47,7 @@ protected:
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
uint16_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) OVERRIDE;
private:
@ -57,7 +57,7 @@ private:
uint32_t _encodeTimestamp;
int8_t* _encodedData;
uint32_t _encodedLengthInBytes;
size_t _encodedLengthInBytes;
uint32_t _decodeTimestamp;
};

View File

@ -124,7 +124,7 @@ int32_t FilePlayerImpl::Get10msAudioFromFile(
unresampledAudioFrame.sample_rate_hz_ = _codec.plfreq;
// L16 is un-encoded data. Just pull 10 ms.
uint32_t lengthInBytes =
size_t lengthInBytes =
sizeof(unresampledAudioFrame.data_);
if (_fileModule.PlayoutAudioData(
(int8_t*)unresampledAudioFrame.data_,
@ -147,11 +147,11 @@ int32_t FilePlayerImpl::Get10msAudioFromFile(
// expects a full frame. If the frame size is larger than 10 ms,
// PlayoutAudioData(..) data should be called proportionally less often.
int16_t encodedBuffer[MAX_AUDIO_BUFFER_IN_SAMPLES];
uint32_t encodedLengthInBytes = 0;
size_t encodedLengthInBytes = 0;
if(++_numberOf10MsInDecoder >= _numberOf10MsPerFrame)
{
_numberOf10MsInDecoder = 0;
uint32_t bytesFromFile = sizeof(encodedBuffer);
size_t bytesFromFile = sizeof(encodedBuffer);
if (_fileModule.PlayoutAudioData((int8_t*)encodedBuffer,
bytesFromFile) == -1)
{
@ -581,7 +581,7 @@ int32_t VideoFilePlayerImpl::TimeUntilNextVideoFrame()
if(_fileFormat == kFileFormatAviFile)
{
// Get next video frame
uint32_t encodedBufferLengthInBytes = _encodedData.bufferSize;
size_t encodedBufferLengthInBytes = _encodedData.bufferSize;
if(_fileModule.PlayoutAVIVideoData(
reinterpret_cast< int8_t*>(_encodedData.payloadData),
encodedBufferLengthInBytes) != 0)
@ -656,7 +656,7 @@ int32_t VideoFilePlayerImpl::SetUpVideoDecoder()
// Size of unencoded data (I420) should be the largest possible frame size
// in a file.
const uint32_t KReadBufferSize = 3 * video_codec_info_.width *
const size_t KReadBufferSize = 3 * video_codec_info_.width *
video_codec_info_.height / 2;
_encodedData.VerifyAndAllocate(KReadBufferSize);
_encodedData.encodedHeight = video_codec_info_.height;

View File

@ -227,7 +227,7 @@ int32_t FileRecorderImpl::RecordAudioToFile(
// NOTE: stereo recording is only supported for WAV files.
// TODO (hellner): WAV expect PCM in little endian byte order. Not
// "encoding" with PCM coder should be a problem for big endian systems.
uint32_t encodedLenInBytes = 0;
size_t encodedLenInBytes = 0;
if (_fileFormat == kFileFormatPreencodedFile ||
STR_CASE_CMP(codec_info_.plname, "L16") != 0)
{
@ -272,9 +272,8 @@ int32_t FileRecorderImpl::RecordAudioToFile(
uint16_t msOfData =
ptrAudioFrame->samples_per_channel_ /
uint16_t(ptrAudioFrame->sample_rate_hz_ / 1000);
if (WriteEncodedAudioData(_audioBuffer,
(uint16_t)encodedLenInBytes,
msOfData, playoutTS) == -1)
if (WriteEncodedAudioData(_audioBuffer, encodedLenInBytes, msOfData,
playoutTS) == -1)
{
return -1;
}
@ -309,7 +308,7 @@ int32_t FileRecorderImpl::codec_info(CodecInst& codecInst) const
int32_t FileRecorderImpl::WriteEncodedAudioData(
const int8_t* audioBuffer,
uint16_t bufferLength,
size_t bufferLength,
uint16_t /*millisecondsOfData*/,
const TickTime* /*playoutTS*/)
{
@ -398,7 +397,7 @@ int32_t AviRecorder::StopRecording()
return FileRecorderImpl::StopRecording();
}
int32_t AviRecorder::CalcI420FrameSize( ) const
size_t AviRecorder::CalcI420FrameSize( ) const
{
return 3 * _videoCodecInst.width * _videoCodecInst.height / 2;
}
@ -641,8 +640,8 @@ int32_t AviRecorder::EncodeAndWriteVideoToFile(I420VideoFrame& videoFrame)
if( STR_CASE_CMP(_videoCodecInst.plName, "I420") == 0)
{
int length = CalcBufferSize(kI420, videoFrame.width(),
videoFrame.height());
size_t length =
CalcBufferSize(kI420, videoFrame.width(), videoFrame.height());
_videoEncodedData.VerifyAndAllocate(length);
// I420 is raw data. No encoding needed (each sample is represented by
@ -681,7 +680,7 @@ int32_t AviRecorder::EncodeAndWriteVideoToFile(I420VideoFrame& videoFrame)
// happens in AviRecorder::Process().
int32_t AviRecorder::WriteEncodedAudioData(
const int8_t* audioBuffer,
uint16_t bufferLength,
size_t bufferLength,
uint16_t millisecondsOfData,
const TickTime* playoutTS)
{

View File

@ -86,7 +86,7 @@ public:
protected:
virtual int32_t WriteEncodedAudioData(
const int8_t* audioBuffer,
uint16_t bufferLength,
size_t bufferLength,
uint16_t millisecondsOfData,
const TickTime* playoutTS);
@ -111,7 +111,7 @@ class AudioFrameFileInfo
{
public:
AudioFrameFileInfo(const int8_t* audioData,
const uint16_t audioSize,
const size_t audioSize,
const uint16_t audioMS,
const TickTime& playoutTS)
: _audioData(), _audioSize(audioSize), _audioMS(audioMS),
@ -127,7 +127,7 @@ class AudioFrameFileInfo
};
// TODO (hellner): either turn into a struct or provide get/set functions.
int8_t _audioData[MAX_AUDIO_BUFFER_IN_BYTES];
uint16_t _audioSize;
size_t _audioSize;
uint16_t _audioMS;
TickTime _playoutTS;
};
@ -151,7 +151,7 @@ public:
protected:
virtual int32_t WriteEncodedAudioData(
const int8_t* audioBuffer,
uint16_t bufferLength,
size_t bufferLength,
uint16_t millisecondsOfData,
const TickTime* playoutTS);
private:
@ -165,7 +165,7 @@ private:
int32_t EncodeAndWriteVideoToFile(I420VideoFrame& videoFrame);
int32_t ProcessAudio();
int32_t CalcI420FrameSize() const;
size_t CalcI420FrameSize() const;
int32_t SetUpVideoEncoder();
VideoCodec _videoCodecInst;
@ -178,7 +178,7 @@ private:
FrameScaler* _frameScaler;
VideoCoder* _videoEncoder;
int32_t _videoMaxPayloadSize;
size_t _videoMaxPayloadSize;
EncodedVideoData _videoEncodedData;
ThreadWrapper* _thread;

View File

@ -12,6 +12,7 @@
#include <assert.h>
#include <stdio.h>
#include <limits>
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@ -145,7 +146,7 @@ bool RtpDumpImpl::IsActive() const
return _file.Open();
}
int32_t RtpDumpImpl::DumpPacket(const uint8_t* packet, uint16_t packetLength)
int32_t RtpDumpImpl::DumpPacket(const uint8_t* packet, size_t packetLength)
{
CriticalSectionScoped lock(_critSect);
if (!IsActive())
@ -158,7 +159,9 @@ int32_t RtpDumpImpl::DumpPacket(const uint8_t* packet, uint16_t packetLength)
return -1;
}
if (packetLength < 1)
rtpDumpPktHdr_t hdr;
size_t total_size = packetLength + sizeof hdr;
if (packetLength < 1 || total_size > std::numeric_limits<uint16_t>::max())
{
return -1;
}
@ -167,11 +170,8 @@ int32_t RtpDumpImpl::DumpPacket(const uint8_t* packet, uint16_t packetLength)
// considered RTP (without further verification).
bool isRTCP = RTCP(packet);
rtpDumpPktHdr_t hdr;
uint32_t offset;
// Offset is relative to when recording was started.
offset = GetTimeInMS();
uint32_t offset = GetTimeInMS();
if (offset < _startTime)
{
// Compensate for wraparound.
@ -181,7 +181,7 @@ int32_t RtpDumpImpl::DumpPacket(const uint8_t* packet, uint16_t packetLength)
}
hdr.offset = RtpDumpHtonl(offset);
hdr.length = RtpDumpHtons((uint16_t)(packetLength + sizeof(hdr)));
hdr.length = RtpDumpHtons((uint16_t)(total_size));
if (isRTCP)
{
hdr.plen = 0;

View File

@ -26,7 +26,7 @@ public:
virtual int32_t Stop() OVERRIDE;
virtual bool IsActive() const OVERRIDE;
virtual int32_t DumpPacket(const uint8_t* packet,
uint16_t packetLength) OVERRIDE;
size_t packetLength) OVERRIDE;
private:
// Return the system time in ms.
inline uint32_t GetTimeInMS() const;

View File

@ -113,7 +113,7 @@ int32_t VideoCoder::SendData(
const uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* /*rtpVideoHdr*/)
{

View File

@ -53,7 +53,7 @@ private:
uint32_t /*timeStamp*/,
int64_t capture_time_ms,
const uint8_t* payloadData,
uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& /* fragmentationHeader*/,
const RTPVideoHeader* rtpTypeHdr) OVERRIDE;