pkasting@chromium.org
2014-11-20 22:28:14 +00:00
parent edc6e57a92
commit 4591fbd09f
341 changed files with 2610 additions and 2613 deletions

View File

@ -467,8 +467,8 @@ public:
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
uint32_t payloadLength,
const WebRtcRTPHeader& rtpInfo) = 0;
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) = 0;
// Minimum playout delay (Used for lip-sync). This is the minimum delay required
// to sync with audio. Not included in VideoCodingModule::Delay()

View File

@ -75,7 +75,7 @@ class VCMPacketizationCallback {
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* rtpVideoHdr) = 0;
protected:

View File

@ -25,6 +25,10 @@
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/system_wrappers/interface/logging.h"
namespace {
const size_t kDefaultPayloadSize = 1440;
}
namespace webrtc {
VideoCodecVP8 VideoEncoder::GetDefaultVp8Settings() {
@ -227,12 +231,12 @@ void VCMCodecDataBase::ResetSender() {
bool VCMCodecDataBase::SetSendCodec(
const VideoCodec* send_codec,
int number_of_cores,
int max_payload_size,
size_t max_payload_size,
VCMEncodedFrameCallback* encoded_frame_callback) {
if (!send_codec) {
return false;
}
if (max_payload_size <= 0) {
if (max_payload_size == 0) {
max_payload_size = kDefaultPayloadSize;
}
if (number_of_cores <= 0) {

View File

@ -22,10 +22,6 @@
namespace webrtc {
enum VCMCodecDBProperties {
kDefaultPayloadSize = 1440
};
struct VCMDecoderMapItem {
public:
VCMDecoderMapItem(VideoCodec* settings,
@ -70,7 +66,7 @@ class VCMCodecDataBase {
// Returns true if the codec was successfully registered, false otherwise.
bool SetSendCodec(const VideoCodec* send_codec,
int number_of_cores,
int max_payload_size,
size_t max_payload_size,
VCMEncodedFrameCallback* encoded_frame_callback);
// Gets the current send codec. Relevant for internal codecs only.
@ -175,7 +171,7 @@ class VCMCodecDataBase {
uint8_t payload_type) const;
int number_of_cores_;
int max_payload_size_;
size_t max_payload_size_;
bool periodic_key_frames_;
bool pending_encoder_reset_;
bool current_enc_is_external_;

View File

@ -56,7 +56,7 @@ public:
/**
* Get frame length
*/
uint32_t Length() const {return _length;}
size_t Length() const {return _length;}
/**
* Get frame timestamp (90kHz)
*/

View File

@ -268,11 +268,11 @@ VCMFrameBuffer::PrepareForDecode(bool continuous) {
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
&_fragmentation);
} else {
int bytes_removed = _sessionInfo.MakeDecodable();
size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
}
#else
int bytes_removed = _sessionInfo.MakeDecodable();
size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
#endif
// Transfer frame information to EncodedFrame and create any codec

View File

@ -82,7 +82,7 @@ int32_t VCMGenericEncoder::Release()
int32_t
VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
uint32_t maxPayloadSize)
size_t maxPayloadSize)
{
_bitRate = settings->startBitrate * 1000;
_frameRate = settings->maxFramerate;
@ -218,7 +218,7 @@ VCMEncodedFrameCallback::Encoded(
FrameType frameType = VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
uint32_t encodedBytes = 0;
size_t encodedBytes = 0;
if (_sendCallback != NULL)
{
encodedBytes = encodedImage._length;

View File

@ -84,7 +84,7 @@ public:
*/
int32_t InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
uint32_t maxPayloadSize);
size_t maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image

View File

@ -62,14 +62,14 @@ void UpdateProtectionCallback(
} // namespace
struct MediaOptimization::EncodedFrameSample {
EncodedFrameSample(int size_bytes,
EncodedFrameSample(size_t size_bytes,
uint32_t timestamp,
int64_t time_complete_ms)
: size_bytes(size_bytes),
timestamp(timestamp),
time_complete_ms(time_complete_ms) {}
uint32_t size_bytes;
size_t size_bytes;
uint32_t timestamp;
int64_t time_complete_ms;
};
@ -369,7 +369,7 @@ VCMFrameCount MediaOptimization::SentFrameCount() {
return count;
}
int32_t MediaOptimization::UpdateWithEncodedData(int encoded_length,
int32_t MediaOptimization::UpdateWithEncodedData(size_t encoded_length,
uint32_t timestamp,
FrameType encoded_frame_type) {
CriticalSectionScoped lock(crit_sect_.get());
@ -532,7 +532,7 @@ void MediaOptimization::UpdateSentBitrate(int64_t now_ms) {
avg_sent_bit_rate_bps_ = 0;
return;
}
int framesize_sum = 0;
size_t framesize_sum = 0;
for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
it != encoded_frame_samples_.end();
++it) {

View File

@ -77,7 +77,7 @@ class MediaOptimization {
void UpdateContentData(const VideoContentMetrics* content_metrics);
// Informs Media Optimization of encoding output: Length and frame type.
int32_t UpdateWithEncodedData(int encoded_length,
int32_t UpdateWithEncodedData(size_t encoded_length,
uint32_t timestamp,
FrameType encoded_frame_type);

View File

@ -30,12 +30,11 @@ class TestMediaOptimization : public ::testing::Test {
next_timestamp_(0) {}
// This method mimics what happens in VideoSender::AddVideoFrame.
void AddFrameAndAdvanceTime(int bitrate_bps, bool expect_frame_drop) {
ASSERT_GE(bitrate_bps, 0);
void AddFrameAndAdvanceTime(uint32_t bitrate_bps, bool expect_frame_drop) {
bool frame_dropped = media_opt_.DropFrame();
EXPECT_EQ(expect_frame_drop, frame_dropped);
if (!frame_dropped) {
int bytes_per_frame = bitrate_bps * frame_time_ms_ / (8 * 1000);
size_t bytes_per_frame = bitrate_bps * frame_time_ms_ / (8 * 1000);
ASSERT_EQ(VCM_OK, media_opt_.UpdateWithEncodedData(
bytes_per_frame, next_timestamp_, kVideoFrameDelta));
}
@ -54,14 +53,14 @@ TEST_F(TestMediaOptimization, VerifyMuting) {
// Enable video suspension with these limits.
// Suspend the video when the rate is below 50 kbps and resume when it gets
// above 50 + 10 kbps again.
const int kThresholdBps = 50000;
const int kWindowBps = 10000;
const uint32_t kThresholdBps = 50000;
const uint32_t kWindowBps = 10000;
media_opt_.SuspendBelowMinBitrate(kThresholdBps, kWindowBps);
// The video should not be suspended from the start.
EXPECT_FALSE(media_opt_.IsVideoSuspended());
int target_bitrate_kbps = 100;
uint32_t target_bitrate_kbps = 100;
media_opt_.SetTargetRates(target_bitrate_kbps * 1000,
0, // Lossrate.
100,

View File

@ -35,7 +35,7 @@ VCMPacket::VCMPacket()
}
VCMPacket::VCMPacket(const uint8_t* ptr,
const uint32_t size,
const size_t size,
const WebRtcRTPHeader& rtpHeader) :
payloadType(rtpHeader.header.payloadType),
timestamp(rtpHeader.header.timestamp),
@ -57,7 +57,11 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
CopyCodecSpecifics(rtpHeader.type.Video);
}
VCMPacket::VCMPacket(const uint8_t* ptr, uint32_t size, uint16_t seq, uint32_t ts, bool mBit) :
VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
uint16_t seq,
uint32_t ts,
bool mBit) :
payloadType(0),
timestamp(ts),
ntp_time_ms_(0),

View File

@ -21,10 +21,10 @@ class VCMPacket {
public:
VCMPacket();
VCMPacket(const uint8_t* ptr,
const uint32_t size,
const size_t size,
const WebRtcRTPHeader& rtpHeader);
VCMPacket(const uint8_t* ptr,
uint32_t size,
size_t size,
uint16_t seqNum,
uint32_t timestamp,
bool markerBit);
@ -37,7 +37,7 @@ public:
int64_t ntp_time_ms_;
uint16_t seqNum;
const uint8_t* dataPtr;
uint32_t sizeBytes;
size_t sizeBytes;
bool markerBit;
FrameType frameType;

View File

@ -239,11 +239,11 @@ void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
}
// Update rate data after every encoded frame.
void VCMQmResolution::UpdateEncodedSize(int encoded_size,
void VCMQmResolution::UpdateEncodedSize(size_t encoded_size,
FrameType encoded_frame_type) {
frame_cnt_++;
// Convert to Kbps.
float encoded_size_kbits = static_cast<float>((encoded_size * 8.0) / 1000.0);
float encoded_size_kbits = 8.0f * static_cast<float>(encoded_size) / 1000.0f;
// Update the buffer level:
// Note this is not the actual encoder buffer level.

View File

@ -216,7 +216,7 @@ class VCMQmResolution : public VCMQmMethod {
// Update with actual bit rate (size of the latest encoded frame)
// and frame type, after every encoded frame.
void UpdateEncodedSize(int encoded_size,
void UpdateEncodedSize(size_t encoded_size,
FrameType encoded_frame_type);
// Update with new target bitrate, actual encoder sent rate, frame_rate,

View File

@ -46,7 +46,7 @@ class QmSelectTest : public ::testing::Test {
int native_height,
int num_layers);
void UpdateQmEncodedFrame(int* encoded_size, int num_updates);
void UpdateQmEncodedFrame(size_t* encoded_size, size_t num_updates);
void UpdateQmRateData(int* target_rate,
int* encoder_sent_rate,
@ -315,8 +315,8 @@ TEST_F(QmSelectTest, DownActionBufferUnderflow) {
// Update with encoded size over a number of frames.
// per-frame bandwidth = 15 = 450/30: simulate (decoder) buffer underflow:
int encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
UpdateQmEncodedFrame(encoded_size, 10);
size_t encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
// Update rates for a sequence of intervals.
int target_rate[] = {300, 300, 300};
@ -359,8 +359,8 @@ TEST_F(QmSelectTest, NoActionBufferStable) {
// Update with encoded size over a number of frames.
// per-frame bandwidth = 15 = 450/30: simulate stable (decoder) buffer levels.
int32_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
UpdateQmEncodedFrame(encoded_size, 10);
size_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
// Update rates for a sequence of intervals.
int target_rate[] = {350, 350, 350};
@ -1262,11 +1262,12 @@ void QmSelectTest::UpdateQmContentData(float motion_metric,
qm_resolution_->UpdateContent(content_metrics_);
}
void QmSelectTest::UpdateQmEncodedFrame(int* encoded_size, int num_updates) {
void QmSelectTest::UpdateQmEncodedFrame(size_t* encoded_size,
size_t num_updates) {
FrameType frame_type = kVideoFrameDelta;
for (int i = 0; i < num_updates; ++i) {
for (size_t i = 0; i < num_updates; ++i) {
// Convert to bytes.
int32_t encoded_size_update = 1000 * encoded_size[i] / 8;
size_t encoded_size_update = 1000 * encoded_size[i] / 8;
qm_resolution_->UpdateEncodedSize(encoded_size_update, frame_type);
}
}

View File

@ -110,8 +110,8 @@ void VCMSessionInfo::Reset() {
last_packet_seq_num_ = -1;
}
int VCMSessionInfo::SessionLength() const {
int length = 0;
size_t VCMSessionInfo::SessionLength() const {
size_t length = 0;
for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
length += (*it).sizeBytes;
return length;
@ -121,13 +121,13 @@ int VCMSessionInfo::NumPackets() const {
return packets_.size();
}
int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
PacketIterator packet_it) {
size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
PacketIterator packet_it) {
VCMPacket& packet = *packet_it;
PacketIterator it;
// Calculate the offset into the frame buffer for this packet.
int offset = 0;
size_t offset = 0;
for (it = packets_.begin(); it != packet_it; ++it)
offset += (*it).sizeBytes;
@ -145,7 +145,7 @@ int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
uint32_t length = BufferToUWord16(nalu_ptr);
size_t length = BufferToUWord16(nalu_ptr);
required_length +=
length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
nalu_ptr += kLengthFieldLength + length;
@ -154,7 +154,7 @@ int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
uint8_t* frame_buffer_ptr = frame_buffer + offset;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
uint32_t length = BufferToUWord16(nalu_ptr);
size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
frame_buffer_ptr += Insert(nalu_ptr,
length,
@ -276,9 +276,9 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
return --packet_it;
}
int VCMSessionInfo::DeletePacketData(PacketIterator start,
PacketIterator end) {
int bytes_to_delete = 0; // The number of bytes to delete.
size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
PacketIterator end) {
size_t bytes_to_delete = 0; // The number of bytes to delete.
PacketIterator packet_after_end = end;
++packet_after_end;
@ -290,20 +290,20 @@ int VCMSessionInfo::DeletePacketData(PacketIterator start,
(*it).dataPtr = NULL;
}
if (bytes_to_delete > 0)
ShiftSubsequentPackets(end, -bytes_to_delete);
ShiftSubsequentPackets(end, -static_cast<int>(bytes_to_delete));
return bytes_to_delete;
}
int VCMSessionInfo::BuildVP8FragmentationHeader(
size_t VCMSessionInfo::BuildVP8FragmentationHeader(
uint8_t* frame_buffer,
int frame_buffer_length,
size_t frame_buffer_length,
RTPFragmentationHeader* fragmentation) {
int new_length = 0;
size_t new_length = 0;
// Allocate space for max number of partitions
fragmentation->VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
fragmentation->fragmentationVectorSize = 0;
memset(fragmentation->fragmentationLength, 0,
kMaxVP8Partitions * sizeof(uint32_t));
kMaxVP8Partitions * sizeof(size_t));
if (packets_.empty())
return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
@ -314,11 +314,11 @@ int VCMSessionInfo::BuildVP8FragmentationHeader(
fragmentation->fragmentationOffset[partition_id] =
(*it).dataPtr - frame_buffer;
assert(fragmentation->fragmentationOffset[partition_id] <
static_cast<uint32_t>(frame_buffer_length));
frame_buffer_length);
fragmentation->fragmentationLength[partition_id] =
(*partition_end).dataPtr + (*partition_end).sizeBytes - (*it).dataPtr;
assert(fragmentation->fragmentationLength[partition_id] <=
static_cast<uint32_t>(frame_buffer_length));
frame_buffer_length);
new_length += fragmentation->fragmentationLength[partition_id];
++partition_end;
it = FindNextPartitionBeginning(partition_end);
@ -385,8 +385,8 @@ bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
(*packet_it).seqNum));
}
int VCMSessionInfo::MakeDecodable() {
int return_length = 0;
size_t VCMSessionInfo::MakeDecodable() {
size_t return_length = 0;
if (packets_.empty()) {
return 0;
}
@ -511,13 +511,13 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
// The insert operation invalidates the iterator |rit|.
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
int returnLength = InsertBuffer(frame_buffer, packet_list_it);
size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
UpdateCompleteSession();
if (decode_error_mode == kWithErrors)
decodable_ = true;
else if (decode_error_mode == kSelectiveErrors)
UpdateDecodableSession(frame_data);
return returnLength;
return static_cast<int>(returnLength);
}
void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {

View File

@ -56,15 +56,15 @@ class VCMSessionInfo {
// Builds fragmentation headers for VP8, each fragment being a decodable
// VP8 partition. Returns the total number of bytes which are decodable. Is
// used instead of MakeDecodable for VP8.
int BuildVP8FragmentationHeader(uint8_t* frame_buffer,
int frame_buffer_length,
RTPFragmentationHeader* fragmentation);
size_t BuildVP8FragmentationHeader(uint8_t* frame_buffer,
size_t frame_buffer_length,
RTPFragmentationHeader* fragmentation);
// Makes the frame decodable. I.e., only contain decodable NALUs. All
// non-decodable NALUs will be deleted and packets will be moved to in
// memory to remove any empty space.
// Returns the number of bytes deleted from the session.
int MakeDecodable();
size_t MakeDecodable();
// Sets decodable_ to false.
// Used by the dual decoder. After the mode is changed to kNoErrors from
@ -72,7 +72,7 @@ class VCMSessionInfo {
// decodable and are not complete are marked as non-decodable.
void SetNotDecodableIfIncomplete();
int SessionLength() const;
size_t SessionLength() const;
int NumPackets() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
@ -114,8 +114,8 @@ class VCMSessionInfo {
PacketIterator FindPartitionEnd(PacketIterator it) const;
static bool InSequence(const PacketIterator& it,
const PacketIterator& prev_it);
int InsertBuffer(uint8_t* frame_buffer,
PacketIterator packetIterator);
size_t InsertBuffer(uint8_t* frame_buffer,
PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
@ -124,8 +124,8 @@ class VCMSessionInfo {
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively.
// Note that this function doesn't delete the actual packets.
int DeletePacketData(PacketIterator start,
PacketIterator end);
size_t DeletePacketData(PacketIterator start,
PacketIterator end);
void UpdateCompleteSession();
// When enabled, determine if session is decodable, i.e. incomplete but

View File

@ -34,20 +34,20 @@ class TestSessionInfo : public ::testing::Test {
}
void FillPacket(uint8_t start_value) {
for (int i = 0; i < packet_buffer_size(); ++i)
for (size_t i = 0; i < packet_buffer_size(); ++i)
packet_buffer_[i] = start_value + i;
}
void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
for (int j = 0; j < packet_buffer_size(); ++j) {
for (size_t j = 0; j < packet_buffer_size(); ++j) {
ASSERT_EQ(start_value + j, start_ptr[j]);
}
}
int packet_buffer_size() const {
size_t packet_buffer_size() const {
return sizeof(packet_buffer_) / sizeof(packet_buffer_[0]);
}
int frame_buffer_size() const {
size_t frame_buffer_size() const {
return sizeof(frame_buffer_) / sizeof(frame_buffer_[0]);
}
@ -77,10 +77,10 @@ class TestVP8Partitions : public TestSessionInfo {
bool VerifyPartition(int partition_id,
int packets_expected,
int start_value) {
EXPECT_EQ(static_cast<uint32_t>(packets_expected * packet_buffer_size()),
EXPECT_EQ(packets_expected * packet_buffer_size(),
fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) {
int packet_index = fragmentation_.fragmentationOffset[partition_id] +
size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
i * packet_buffer_size();
if (packet_index + packet_buffer_size() > frame_buffer_size())
return false;
@ -154,10 +154,8 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.frameType = kVideoFrameKey;
FillPacket(0);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_FALSE(session_.HaveLastPacket());
EXPECT_EQ(kVideoFrameKey, session_.FrameType());
@ -165,10 +163,8 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.markerBit = true;
packet_.seqNum += 1;
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_TRUE(session_.HaveLastPacket());
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
@ -193,31 +189,26 @@ TEST_F(TestSessionInfo, NormalOperation) {
packet_.isFirstPacket = true;
packet_.markerBit = false;
FillPacket(0);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
for (int i = 1; i < 9; ++i) {
packet_.seqNum += 1;
FillPacket(i);
ASSERT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
ASSERT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors,
frame_data)));
}
packet_.seqNum += 1;
packet_.markerBit = true;
FillPacket(9);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(10 * packet_buffer_size(), session_.SessionLength());
for (int i = 0; i < 10; ++i) {
@ -231,11 +222,10 @@ TEST_F(TestSessionInfo, ErrorsEqualDecodableState) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(3);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kWithErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kWithErrors,
frame_data)));
EXPECT_TRUE(session_.decodable());
}
@ -246,21 +236,19 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
FillPacket(1);
frame_data.rolling_average_packets_per_frame = 11;
frame_data.rtt_ms = 150;
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kSelectiveErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kSelectiveErrors,
frame_data)));
EXPECT_FALSE(session_.decodable());
packet_.seqNum -= 1;
FillPacket(0);
packet_.isFirstPacket = true;
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kSelectiveErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kSelectiveErrors,
frame_data)));
EXPECT_TRUE(session_.decodable());
packet_.isFirstPacket = false;
@ -268,21 +256,19 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
for (int i = 2; i < 8; ++i) {
packet_.seqNum += 1;
FillPacket(i);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kSelectiveErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kSelectiveErrors,
frame_data)));
EXPECT_TRUE(session_.decodable());
}
packet_.seqNum += 1;
FillPacket(8);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kSelectiveErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kSelectiveErrors,
frame_data)));
EXPECT_TRUE(session_.decodable());
}
@ -291,11 +277,9 @@ TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = true;
@ -320,11 +304,9 @@ TEST_F(TestSessionInfo, SetMarkerBitOnce) {
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
++packet_.seqNum;
packet_.isFirstPacket = true;
packet_.markerBit = true;
@ -342,10 +324,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
// Insert an older packet with a first packet set.
packet_.seqNum = 0x0004;
packet_.isFirstPacket = true;
@ -360,10 +340,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0008;
packet_.isFirstPacket = false;
packet_.markerBit = true;
@ -380,29 +358,23 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0002;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0xFFF0;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@ -431,20 +403,16 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
// Insert an older packet with a first packet set.
packet_.seqNum = 0x0005;
packet_.isFirstPacket = true;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@ -458,19 +426,15 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0008;
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data));
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum = 0x0009;
packet_.isFirstPacket = false;
@ -493,11 +457,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -507,11 +469,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.sequenceNumber += 2;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -521,18 +481,15 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// One packet should be removed (end of partition 0).
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
2 * packet_buffer_size());
EXPECT_EQ(2 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 1, 0));
SCOPED_TRACE("Calling VerifyPartition");
@ -550,11 +507,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -564,11 +519,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -578,11 +531,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -592,18 +543,15 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 2;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// One packet should be removed (end of partition 2), 3 left.
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
3 * packet_buffer_size());
EXPECT_EQ(3 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@ -621,11 +569,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -635,11 +581,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -649,11 +593,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -663,18 +605,15 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// No packet should be removed.
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
4 * packet_buffer_size());
EXPECT_EQ(4 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
@ -692,11 +631,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -706,11 +643,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -720,11 +655,9 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -734,18 +667,15 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 2;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// One packet should be removed from the last partition
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
3 * packet_buffer_size());
EXPECT_EQ(3 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
@ -764,11 +694,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -778,11 +706,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -792,11 +718,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 3;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -806,18 +730,15 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// No packet should be removed.
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
4 * packet_buffer_size());
EXPECT_EQ(4 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@ -835,11 +756,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -850,11 +769,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -864,11 +781,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 2;
FillPacket(4);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -878,11 +793,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -892,11 +805,9 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -906,18 +817,15 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(7);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// 2 partitions left. 2 packets removed from second partition
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
4 * packet_buffer_size());
EXPECT_EQ(4 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@ -935,11 +843,9 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -949,11 +855,9 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@ -963,18 +867,15 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(session_.InsertPacket(*packet,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
delete packet;
// No packets removed.
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
frame_buffer_size(),
&fragmentation_),
3 * packet_buffer_size());
EXPECT_EQ(3 * packet_buffer_size(),
session_.BuildVP8FragmentationHeader(
frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
// This partition is aggregated in partition 0
@ -996,8 +897,8 @@ TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
kNoErrors,
frame_data));
EXPECT_EQ(0, session_.MakeDecodable());
EXPECT_EQ(0, session_.SessionLength());
EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(0U, session_.SessionLength());
}
TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
@ -1006,24 +907,20 @@ TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluComplete;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(0, session_.MakeDecodable());
EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(2 * packet_buffer_size(), session_.SessionLength());
SCOPED_TRACE("Calling VerifyNalu");
EXPECT_TRUE(VerifyNalu(0, 1, 0));
@ -1037,22 +934,18 @@ TEST_F(TestNalUnits, LossInMiddleOfNalu) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
@ -1066,22 +959,18 @@ TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluIncomplete;
packet_.seqNum += 2;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
@ -1096,35 +985,29 @@ TEST_F(TestNalUnits, ReorderWrapNoLoss) {
packet_.seqNum += 1;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = true;
packet_.completeNALU = kNaluComplete;
packet_.seqNum -= 1;
packet_.markerBit = false;
FillPacket(0);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(0, session_.MakeDecodable());
EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(3 * packet_buffer_size(), session_.SessionLength());
SCOPED_TRACE("Calling VerifyNalu");
EXPECT_TRUE(VerifyNalu(0, 1, 0));
@ -1136,25 +1019,21 @@ TEST_F(TestNalUnits, WrapLosses) {
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(0, session_.SessionLength());
EXPECT_EQ(0U, session_.SessionLength());
}
TEST_F(TestNalUnits, ReorderWrapLosses) {
@ -1165,25 +1044,21 @@ TEST_F(TestNalUnits, ReorderWrapLosses) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
packet_.seqNum -= 2;
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(session_.InsertPacket(packet_,
frame_buffer_,
kNoErrors,
frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
kNoErrors, frame_data)));
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(0, session_.SessionLength());
EXPECT_EQ(0U, session_.SessionLength());
}
} // namespace webrtc

View File

@ -301,7 +301,7 @@ class VideoCodingModuleImpl : public VideoCodingModule {
}
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
uint32_t payloadLength,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) OVERRIDE {
return receiver_->IncomingPacket(incomingPayload, payloadLength, rtpInfo);
}

View File

@ -160,7 +160,7 @@ class VideoReceiver {
VideoCodecType ReceiveCodec() const;
int32_t IncomingPacket(const uint8_t* incomingPayload,
uint32_t payloadLength,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo);
int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs);
int32_t SetRenderDelay(uint32_t timeMS);

View File

@ -631,7 +631,7 @@ VideoCodecType VideoReceiver::ReceiveCodec() const {
// Incoming packet from network parsed and ready for decode, non blocking.
int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
uint32_t payloadLength,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) {
if (rtpInfo.frameType == kVideoFrameKey) {
TRACE_EVENT1("webrtc",

View File

@ -49,7 +49,6 @@ class TestVideoReceiver : public ::testing::Test {
}
void InsertAndVerifyPaddingFrame(const uint8_t* payload,
int length,
WebRtcRTPHeader* header) {
ASSERT_TRUE(header != NULL);
for (int j = 0; j < 5; ++j) {
@ -63,7 +62,7 @@ class TestVideoReceiver : public ::testing::Test {
}
void InsertAndVerifyDecodableFrame(const uint8_t* payload,
int length,
size_t length,
WebRtcRTPHeader* header) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(0, receiver_->IncomingPacket(payload, length, *header));
@ -87,7 +86,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
const unsigned int kPaddingSize = 220;
const size_t kPaddingSize = 220;
const uint8_t payload[kPaddingSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@ -100,7 +99,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
header.type.Video.codec = kRtpVideoVp8;
for (int i = 0; i < 10; ++i) {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
InsertAndVerifyPaddingFrame(payload, 0, &header);
InsertAndVerifyPaddingFrame(payload, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
}
@ -110,8 +109,8 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
const unsigned int kFrameSize = 1200;
const unsigned int kPaddingSize = 220;
const size_t kFrameSize = 1200;
const size_t kPaddingSize = 220;
const uint8_t payload[kFrameSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@ -150,7 +149,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
} else {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
}
InsertAndVerifyPaddingFrame(payload, 0, &header);
InsertAndVerifyPaddingFrame(payload, &header);
}
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
@ -161,8 +160,8 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
const unsigned int kFrameSize = 1200;
const unsigned int kPaddingSize = 220;
const size_t kFrameSize = 1200;
const size_t kPaddingSize = 220;
const uint8_t payload[kFrameSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@ -195,7 +194,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
header.type.Video.isFirstPacket = false;
header.header.markerBit = false;
for (int j = 0; j < 2; ++j) {
// InsertAndVerifyPaddingFrame(payload, 0, &header);
// InsertAndVerifyPaddingFrame(payload, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
}

View File

@ -91,7 +91,7 @@ class PacketizationCallback : public VCMPacketizationCallback {
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
uint32_t payload_size,
size_t payload_size,
const RTPFragmentationHeader& fragmentation_header,
const RTPVideoHeader* rtp_video_header) OVERRIDE {
assert(rtp_video_header);
@ -127,10 +127,10 @@ class PacketizationCallback : public VCMPacketizationCallback {
struct FrameData {
FrameData() {}
FrameData(uint32_t payload_size, const RTPVideoHeader& rtp_video_header)
FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
: payload_size(payload_size), rtp_video_header(rtp_video_header) {}
uint32_t payload_size;
size_t payload_size;
RTPVideoHeader rtp_video_header;
};
@ -152,8 +152,8 @@ class PacketizationCallback : public VCMPacketizationCallback {
return frames;
}
int SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
int payload_size = 0;
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
size_t payload_size = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
const uint8_t temporal_idx =

View File

@ -279,8 +279,9 @@ GenericCodecTest::Perform(CmdArgs& args)
const float nBitrates = sizeof(bitRate)/sizeof(*bitRate);
float _bitRate = 0;
int _frameCnt = 0;
float totalBytesOneSec = 0;//, totalBytesTenSec;
float totalBytes, actualBitrate;
size_t totalBytesOneSec = 0;//, totalBytesTenSec;
size_t totalBytes;
float actualBitrate;
VCMFrameCount frameCount; // testing frame type counters
// start test
NumberOfCodecs = _vcm->NumberOfCodecs();
@ -478,7 +479,7 @@ GenericCodecTest::Print()
}
}
float
size_t
GenericCodecTest::WaitForEncodedFrame() const
{
int64_t startTime = _clock->TimeInMilliseconds();
@ -499,17 +500,17 @@ GenericCodecTest::IncrementDebugClock(float frameRate)
}
int
RTPSendCallback_SizeTest::SendPacket(int channel, const void *data, int len)
RTPSendCallback_SizeTest::SendPacket(int channel, const void *data, size_t len)
{
_nPackets++;
_payloadSizeSum += len;
// Make sure no payloads (len - header size) are larger than maxPayloadSize
TEST(len > 0 && static_cast<uint32_t>(len - 12) <= _maxPayloadSize);
TEST(len > 0 && len - 12 <= _maxPayloadSize);
return 0;
}
void
RTPSendCallback_SizeTest::SetMaxPayloadSize(uint32_t maxPayloadSize)
RTPSendCallback_SizeTest::SetMaxPayloadSize(size_t maxPayloadSize)
{
_maxPayloadSize = maxPayloadSize;
}
@ -533,12 +534,12 @@ RTPSendCallback_SizeTest::AveragePayloadSize() const
int32_t
VCMEncComplete_KeyReqTest::SendData(
const FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& /*fragmentationHeader*/,
const webrtc::RTPVideoHeader* /*videoHdr*/)
{

View File

@ -41,7 +41,7 @@ public:
~GenericCodecTest();
static int RunTest(CmdArgs& args);
int32_t Perform(CmdArgs& args);
float WaitForEncodedFrame() const;
size_t WaitForEncodedFrame() const;
private:
void Setup(CmdArgs& args);
@ -75,14 +75,18 @@ class RTPSendCallback_SizeTest : public webrtc::Transport
public:
// constructor input: (receive side) rtp module to send encoded data to
RTPSendCallback_SizeTest() : _maxPayloadSize(0), _payloadSizeSum(0), _nPackets(0) {}
virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
virtual int SendRTCPPacket(int channel, const void *data, int len) OVERRIDE {return 0;}
void SetMaxPayloadSize(uint32_t maxPayloadSize);
virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
virtual int SendRTCPPacket(int channel,
const void *data,
size_t len) OVERRIDE {
return 0;
}
void SetMaxPayloadSize(size_t maxPayloadSize);
void Reset();
float AveragePayloadSize() const;
private:
uint32_t _maxPayloadSize;
uint32_t _payloadSizeSum;
size_t _maxPayloadSize;
size_t _payloadSizeSum;
uint32_t _nPackets;
};
@ -91,12 +95,12 @@ class VCMEncComplete_KeyReqTest : public webrtc::VCMPacketizationCallback
public:
VCMEncComplete_KeyReqTest(webrtc::VideoCodingModule &vcm) : _vcm(vcm), _seqNo(0), _timeStamp(0) {}
virtual int32_t SendData(
const webrtc::FrameType frameType,
const uint8_t payloadType,
webrtc::FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const webrtc::RTPFragmentationHeader& fragmentationHeader,
const webrtc::RTPVideoHeader* videoHdr) OVERRIDE;
private:

View File

@ -308,7 +308,7 @@ MediaOptTest::Perform()
_vcm->RegisterReceiveCallback(&receiveCallback);
_frameCnt = 0;
_sumEncBytes = 0.0;
_sumEncBytes = 0;
_numFramesDropped = 0;
int half_width = (_width + 1) / 2;
int half_height = (_height + 1) / 2;
@ -338,7 +338,7 @@ MediaOptTest::Perform()
printf ("Decode error in frame # %d",_frameCnt);
}
float encBytes = encodeCompleteCallback->EncodedBytes();
size_t encBytes = encodeCompleteCallback->EncodedBytes();
if (encBytes == 0)
{
_numFramesDropped += 1;

View File

@ -80,7 +80,7 @@ private:
double _lossRate;
uint32_t _renderDelayMs;
int32_t _frameCnt;
float _sumEncBytes;
size_t _sumEncBytes;
int32_t _numFramesDropped;
std::string _codecName;
webrtc::VideoCodecType _sendCodecType;

View File

@ -30,7 +30,7 @@ TransportCallback::~TransportCallback()
}
int
TransportCallback::SendPacket(int channel, const void *data, int len)
TransportCallback::SendPacket(int channel, const void *data, size_t len)
{
_sendCount++;
_totalSentLength += len;

View File

@ -52,7 +52,7 @@ class TransportCallback:public RTPSendCompleteCallback
// Add packets to list
// Incorporate network conditions - delay and packet loss
// Actual transmission will occur on a separate thread
virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
// Send to the receiver packets which are ready to be submitted
int TransportPackets();
};

View File

@ -71,12 +71,12 @@ void VCMNTEncodeCompleteCallback::RegisterTransportCallback(
int32_t
VCMNTEncodeCompleteCallback::SendData(
const FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& /*fragmentationHeader*/,
const webrtc::RTPVideoHeader* videoHdr)
@ -131,7 +131,7 @@ VCMNTEncodeCompleteCallback::RegisterReceiverVCM(VideoCodingModule *vcm)
_VCMReceiver = vcm;
return;
}
int32_t
size_t
VCMNTEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
@ -144,13 +144,13 @@ VCMNTEncodeCompleteCallback::SkipCnt()
}
// Decoded Frame Callback Implementation
VCMNTDecodeCompleCallback::~VCMNTDecodeCompleCallback()
VCMNTDecodeCompleteCallback::~VCMNTDecodeCompleteCallback()
{
if (_decodedFile)
fclose(_decodedFile);
}
int32_t
VCMNTDecodeCompleCallback::FrameToRender(webrtc::I420VideoFrame& videoFrame)
VCMNTDecodeCompleteCallback::FrameToRender(webrtc::I420VideoFrame& videoFrame)
{
if (videoFrame.width() != _currentWidth ||
videoFrame.height() != _currentHeight)
@ -167,13 +167,13 @@ VCMNTDecodeCompleCallback::FrameToRender(webrtc::I420VideoFrame& videoFrame)
if (PrintI420VideoFrame(videoFrame, _decodedFile) < 0) {
return -1;
}
_decodedBytes+= webrtc::CalcBufferSize(webrtc::kI420,
videoFrame.width(), videoFrame.height());
_decodedBytes += webrtc::CalcBufferSize(webrtc::kI420, videoFrame.width(),
videoFrame.height());
return VCM_OK;
}
int32_t
VCMNTDecodeCompleCallback::DecodedBytes()
size_t
VCMNTDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
@ -260,7 +260,7 @@ NormalTest::Perform(const CmdArgs& args)
// register a decoder (same codec for decoder and encoder )
TEST(_vcm->RegisterReceiveCodec(&_sendCodec, 1) == VCM_OK);
/* Callback Settings */
VCMNTDecodeCompleCallback _decodeCallback(_outname);
VCMNTDecodeCompleteCallback _decodeCallback(_outname);
_vcm->RegisterReceiveCallback(&_decodeCallback);
VCMNTEncodeCompleteCallback _encodeCompleteCallback(_encodedFile, *this);
_vcm->RegisterTransportCallback(&_encodeCompleteCallback);

View File

@ -33,12 +33,12 @@ class VCMNTEncodeCompleteCallback : public webrtc::VCMPacketizationCallback
// process encoded data received from the encoder,
// pass stream to the VCMReceiver module
virtual int32_t SendData(
const webrtc::FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
webrtc::FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const webrtc::RTPFragmentationHeader& fragmentationHeader,
const webrtc::RTPVideoHeader* videoHdr) OVERRIDE;
@ -46,15 +46,15 @@ class VCMNTEncodeCompleteCallback : public webrtc::VCMPacketizationCallback
// Currently - encode and decode with the same vcm module.
void RegisterReceiverVCM(webrtc::VideoCodingModule *vcm);
// Return sum of encoded data (all frames in the sequence)
int32_t EncodedBytes();
size_t EncodedBytes();
// return number of encoder-skipped frames
uint32_t SkipCnt();;
uint32_t SkipCnt();
// conversion function for payload type (needed for the callback function)
// RTPVideoVideoCodecTypes ConvertPayloadType(uint8_t payloadType);
private:
FILE* _encodedFile;
uint32_t _encodedBytes;
size_t _encodedBytes;
uint32_t _skipCnt;
webrtc::VideoCodingModule* _VCMReceiver;
webrtc::FrameType _frameType;
@ -62,29 +62,29 @@ class VCMNTEncodeCompleteCallback : public webrtc::VCMPacketizationCallback
NormalTest& _test;
}; // end of VCMEncodeCompleteCallback
class VCMNTDecodeCompleCallback: public webrtc::VCMReceiveCallback
class VCMNTDecodeCompleteCallback: public webrtc::VCMReceiveCallback
{
public:
VCMNTDecodeCompleCallback(std::string outname): // or should it get a name?
_decodedFile(NULL),
_outname(outname),
_decodedBytes(0),
_currentWidth(0),
_currentHeight(0) {}
virtual ~VCMNTDecodeCompleCallback();
VCMNTDecodeCompleteCallback(std::string outname) // or should it get a name?
: _decodedFile(NULL),
_outname(outname),
_decodedBytes(0),
_currentWidth(0),
_currentHeight(0) {}
virtual ~VCMNTDecodeCompleteCallback();
void SetUserReceiveCallback(webrtc::VCMReceiveCallback* receiveCallback);
// will write decoded frame into file
virtual int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame) OVERRIDE;
int32_t DecodedBytes();
size_t DecodedBytes();
private:
FILE* _decodedFile;
std::string _outname;
int _decodedBytes;
size_t _decodedBytes;
int _currentWidth;
int _currentHeight;
}; // end of VCMDecodeCompleCallback class
}; // end of VCMNTDecodeCompleteCallback class
class NormalTest
{
@ -119,7 +119,7 @@ protected:
std::string _inname;
std::string _outname;
std::string _encodedName;
int32_t _sumEncBytes;
size_t _sumEncBytes;
FILE* _sourceFile;
FILE* _decodedFile;
FILE* _encodedFile;

View File

@ -212,7 +212,7 @@ QualityModesTest::Perform(const CmdArgs& args)
// register a decoder (same codec for decoder and encoder )
TEST(_vcm->RegisterReceiveCodec(&codec, 2) == VCM_OK);
/* Callback Settings */
VCMQMDecodeCompleCallback _decodeCallback(
VCMQMDecodeCompleteCallback _decodeCallback(
_decodedFile, _nativeFrameRate, feature_table_name_);
_vcm->RegisterReceiveCallback(&_decodeCallback);
VCMNTEncodeCompleteCallback _encodeCompleteCallback(_encodedFile, *this);
@ -449,7 +449,7 @@ QMTestVideoSettingsCallback::SetVideoQMSettings(const uint32_t frameRate,
}
// Decoded Frame Callback Implementation
VCMQMDecodeCompleCallback::VCMQMDecodeCompleCallback(
VCMQMDecodeCompleteCallback::VCMQMDecodeCompleteCallback(
FILE* decodedFile, int frame_rate, std::string feature_table_name):
_decodedFile(decodedFile),
_decodedBytes(0),
@ -468,7 +468,7 @@ feature_table_name_(feature_table_name)
//
}
VCMQMDecodeCompleCallback::~VCMQMDecodeCompleCallback()
VCMQMDecodeCompleteCallback::~VCMQMDecodeCompleteCallback()
{
// if (_interpolator != NULL)
// {
@ -483,7 +483,7 @@ VCMQMDecodeCompleCallback::~VCMQMDecodeCompleCallback()
}
int32_t
VCMQMDecodeCompleCallback::FrameToRender(I420VideoFrame& videoFrame)
VCMQMDecodeCompleteCallback::FrameToRender(I420VideoFrame& videoFrame)
{
++frames_cnt_since_drop_;
@ -537,19 +537,19 @@ VCMQMDecodeCompleCallback::FrameToRender(I420VideoFrame& videoFrame)
return VCM_OK;
}
int32_t VCMQMDecodeCompleCallback::DecodedBytes()
size_t VCMQMDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
void VCMQMDecodeCompleCallback::SetOriginalFrameDimensions(int32_t width,
int32_t height)
void VCMQMDecodeCompleteCallback::SetOriginalFrameDimensions(int32_t width,
int32_t height)
{
_origWidth = width;
_origHeight = height;
}
int32_t VCMQMDecodeCompleCallback::buildInterpolator()
int32_t VCMQMDecodeCompleteCallback::buildInterpolator()
{
uint32_t decFrameLength = _origWidth*_origHeight*3 >> 1;
if (_decBuffer != NULL)
@ -569,7 +569,7 @@ int32_t VCMQMDecodeCompleCallback::buildInterpolator()
// frame (or several consecutive frames from the end) must have been dropped. If
// this is the case, the last frame is repeated so that there are as many
// frames rendered as there are number of frames encoded.
void VCMQMDecodeCompleCallback::WriteEnd(int input_frame_count)
void VCMQMDecodeCompleteCallback::WriteEnd(int input_frame_count)
{
int num_missing_frames = input_frame_count - _frameCnt;

View File

@ -51,18 +51,18 @@ private:
}; // end of QualityModesTest class
class VCMQMDecodeCompleCallback: public webrtc::VCMReceiveCallback
class VCMQMDecodeCompleteCallback: public webrtc::VCMReceiveCallback
{
public:
VCMQMDecodeCompleCallback(
VCMQMDecodeCompleteCallback(
FILE* decodedFile,
int frame_rate,
std::string feature_table_name);
virtual ~VCMQMDecodeCompleCallback();
virtual ~VCMQMDecodeCompleteCallback();
void SetUserReceiveCallback(webrtc::VCMReceiveCallback* receiveCallback);
// will write decoded frame into file
int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame);
int32_t DecodedBytes();
size_t DecodedBytes();
void SetOriginalFrameDimensions(int32_t width, int32_t height);
int32_t buildInterpolator();
// Check if last frame is dropped, if so, repeat the last rendered frame.
@ -70,7 +70,7 @@ public:
private:
FILE* _decodedFile;
uint32_t _decodedBytes;
size_t _decodedBytes;
// QualityModesTest& _test;
int _origWidth;
int _origHeight;
@ -86,7 +86,7 @@ private:
}; // end of VCMQMDecodeCompleCallback class
}; // end of VCMQMDecodeCompleteCallback class
class QMTestVideoSettingsCallback : public webrtc::VCMQMSettingsCallback
{

View File

@ -29,7 +29,7 @@ class RtpDataCallback : public webrtc::NullRtpData {
virtual int32_t OnReceivedPayloadData(
const uint8_t* payload_data,
const uint16_t payload_size,
const size_t payload_size,
const webrtc::WebRtcRTPHeader* rtp_header) OVERRIDE {
return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
}

View File

@ -41,7 +41,7 @@ enum {
class RawRtpPacket {
public:
RawRtpPacket(const uint8_t* data, uint32_t length, uint32_t ssrc,
RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
uint16_t seq_num)
: data_(new uint8_t[length]),
length_(length),
@ -53,7 +53,7 @@ class RawRtpPacket {
}
const uint8_t* data() const { return data_.get(); }
uint32_t length() const { return length_; }
size_t length() const { return length_; }
int64_t resend_time_ms() const { return resend_time_ms_; }
void set_resend_time_ms(int64_t timeMs) { resend_time_ms_ = timeMs; }
uint32_t ssrc() const { return ssrc_; }
@ -61,7 +61,7 @@ class RawRtpPacket {
private:
scoped_ptr<uint8_t[]> data_;
uint32_t length_;
size_t length_;
int64_t resend_time_ms_;
uint32_t ssrc_;
uint16_t seq_num_;
@ -251,7 +251,7 @@ class SsrcHandlers {
return 0;
}
void IncomingPacket(const uint8_t* data, uint32_t length) {
void IncomingPacket(const uint8_t* data, size_t length) {
for (HandlerMapIt it = handlers_.begin(); it != handlers_.end(); ++it) {
if (!it->second->rtp_header_parser_->IsRtcp(data, length)) {
RTPHeader header;
@ -375,14 +375,10 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (reordering_ && reorder_buffer_.get() == NULL) {
reorder_buffer_.reset(
new RawRtpPacket(next_packet_.data,
static_cast<uint32_t>(next_packet_.length),
0,
0));
new RawRtpPacket(next_packet_.data, next_packet_.length, 0, 0));
return 0;
}
int ret = SendPacket(next_packet_.data,
static_cast<uint32_t>(next_packet_.length));
int ret = SendPacket(next_packet_.data, next_packet_.length);
if (reorder_buffer_.get()) {
SendPacket(reorder_buffer_->data(), reorder_buffer_->length());
reorder_buffer_.reset(NULL);
@ -421,7 +417,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
}
private:
int SendPacket(const uint8_t* data, uint32_t length) {
int SendPacket(const uint8_t* data, size_t length) {
assert(data);
assert(length > 0);

View File

@ -57,7 +57,7 @@ VCMEncodeCompleteCallback::SendData(
const uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr)
{
@ -106,7 +106,7 @@ VCMEncodeCompleteCallback::SendData(
return ret;
}
float
size_t
VCMEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
@ -147,12 +147,12 @@ VCMEncodeCompleteCallback::ResetByteCount()
int32_t
VCMRTPEncodeCompleteCallback::SendData(
const FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr)
{
@ -169,11 +169,11 @@ VCMRTPEncodeCompleteCallback::SendData(
videoHdr);
}
float
size_t
VCMRTPEncodeCompleteCallback::EncodedBytes()
{
// only good for one call - after which will reset value;
float tmp = _encodedBytes;
size_t tmp = _encodedBytes;
_encodedBytes = 0;
return tmp;
}
@ -197,12 +197,12 @@ VCMDecodeCompleteCallback::FrameToRender(I420VideoFrame& videoFrame)
if (PrintI420VideoFrame(videoFrame, _decodedFile) < 0) {
return -1;
}
_decodedBytes+= CalcBufferSize(kI420, videoFrame.width(),
videoFrame.height());
_decodedBytes += CalcBufferSize(kI420, videoFrame.width(),
videoFrame.height());
return VCM_OK;
}
int32_t
size_t
VCMDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
@ -248,7 +248,7 @@ RTPSendCompleteCallback::~RTPSendCompleteCallback()
}
int
RTPSendCompleteCallback::SendPacket(int channel, const void *data, int len)
RTPSendCompleteCallback::SendPacket(int channel, const void *data, size_t len)
{
_sendCount++;
_totalSentLength += len;
@ -319,11 +319,13 @@ RTPSendCompleteCallback::SendPacket(int channel, const void *data, int len)
delete packet;
packet = NULL;
}
return len; // OK
return static_cast<int>(len); // OK
}
int
RTPSendCompleteCallback::SendRTCPPacket(int channel, const void *data, int len)
RTPSendCompleteCallback::SendRTCPPacket(int channel,
const void *data,
size_t len)
{
// Incorporate network conditions
return SendPacket(channel, data, len);

View File

@ -44,12 +44,12 @@ public:
void RegisterTransportCallback(VCMPacketizationCallback* transport);
// Process encoded data received from the encoder, pass stream to the
// VCMReceiver module
virtual int32_t SendData(const FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
virtual int32_t SendData(FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr) OVERRIDE;
// Register exisitng VCM. Currently - encode and decode under same module.
@ -57,7 +57,7 @@ public:
// Return size of last encoded frame data (all frames in the sequence)
// Good for only one call - after which will reset value
// (to allow detection of frame drop)
float EncodedBytes();
size_t EncodedBytes();
// Return encode complete (true/false)
bool EncodeComplete();
// Inform callback of codec used
@ -77,7 +77,7 @@ public:
private:
FILE* _encodedFile;
float _encodedBytes;
size_t _encodedBytes;
VideoCodingModule* _VCMReceiver;
FrameType _frameType;
uint16_t _seqNo;
@ -101,17 +101,17 @@ public:
virtual ~VCMRTPEncodeCompleteCallback() {}
// Process encoded data received from the encoder, pass stream to the
// RTP module
virtual int32_t SendData(const FrameType frameType,
const uint8_t payloadType,
const uint32_t timeStamp,
virtual int32_t SendData(FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr) OVERRIDE;
// Return size of last encoded frame. Value good for one call
// (resets to zero after call to inform test of frame drop)
float EncodedBytes();
size_t EncodedBytes();
// Return encode complete (true/false)
bool EncodeComplete();
// Inform callback of codec used
@ -126,7 +126,7 @@ public:
}
private:
float _encodedBytes;
size_t _encodedBytes;
FrameType _frameType;
bool _encodeComplete;
RtpRtcp* _RTPModule;
@ -145,10 +145,10 @@ public:
virtual ~VCMDecodeCompleteCallback() {}
// Write decoded frame into file
virtual int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame) OVERRIDE;
int32_t DecodedBytes();
size_t DecodedBytes();
private:
FILE* _decodedFile;
uint32_t _decodedBytes;
FILE* _decodedFile;
size_t _decodedBytes;
}; // end of VCMDecodeCompleCallback class
// Transport callback
@ -165,9 +165,11 @@ public:
void SetRtpModule(RtpRtcp* rtp_module) { _rtp = rtp_module; }
// Send Packet to receive side RTP module
virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
// Send RTCP Packet to receive side RTP module
virtual int SendRTCPPacket(int channel, const void *data, int len) OVERRIDE;
virtual int SendRTCPPacket(int channel,
const void *data,
size_t len) OVERRIDE;
// Set percentage of channel loss in the network
void SetLossPct(double lossPct);
// Set average size of burst loss
@ -181,7 +183,7 @@ public:
// Return send count
int SendCount() {return _sendCount; }
// Return accumulated length in bytes of transmitted packets
uint32_t TotalSentLength() {return _totalSentLength;}
size_t TotalSentLength() {return _totalSentLength;}
protected:
// Randomly decide whether to drop packets, based on the channel model
bool PacketLoss();
@ -198,7 +200,7 @@ protected:
uint32_t _networkDelayMs;
double _jitterVar;
bool _prevLossState;
uint32_t _totalSentLength;
size_t _totalSentLength;
std::list<RtpPacket*> _rtpPackets;
RtpDump* _rtpDump;
};

View File

@ -51,7 +51,7 @@ double NormalDist(double mean, double stdDev);
struct RtpPacket {
uint8_t data[1650]; // max packet size
int32_t length;
size_t length;
int64_t receiveTime;
};

View File

@ -53,13 +53,13 @@ class VcmPayloadSinkFactory::VcmPayloadSink
// PayloadSinkInterface
virtual int32_t OnReceivedPayloadData(
const uint8_t* payload_data,
const uint16_t payload_size,
const size_t payload_size,
const WebRtcRTPHeader* rtp_header) OVERRIDE {
return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
}
virtual bool OnRecoveredPacket(const uint8_t* packet,
int packet_length) OVERRIDE {
size_t packet_length) OVERRIDE {
// We currently don't handle FEC.
return true;
}

View File

@ -55,7 +55,7 @@ _frameRate(frameRate)
assert(frameRate > 0);
}
int32_t
size_t
VideoSource::GetFrameLength() const
{
return webrtc::CalcBufferSize(_type, _width, _height);

View File

@ -69,7 +69,7 @@ public:
// Returns the filename with the path (including the leading slash) removed.
std::string GetName() const;
int32_t GetFrameLength() const;
size_t GetFrameLength() const;
private:
std::string _fileName;