Report encoded frame size in VideoSendStream.

Implements reporting transmitted frame size in WebRtcVideoEngine2.

R=mflodman@webrtc.org, stefan@webrtc.org
BUG=4033

Review URL: https://webrtc-codereview.appspot.com/33399004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7772 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
pbos@webrtc.org
2014-12-01 15:23:21 +00:00
parent 1db20a4180
commit 273a414b0e
55 changed files with 442 additions and 251 deletions

View File

@ -21,7 +21,7 @@ namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback {
public:
MOCK_METHOD3(Encoded, int32_t(EncodedImage& encodedImage,
MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation));
};

View File

@ -36,7 +36,6 @@ PacketManipulatorImpl::~PacketManipulatorImpl() {
int PacketManipulatorImpl::ManipulatePackets(
webrtc::EncodedImage* encoded_image) {
assert(encoded_image);
int nbr_packets_dropped = 0;
// There's no need to build a copy of the image data since viewing an
// EncodedImage object, setting the length to a new lower value represents

View File

@ -82,8 +82,7 @@ class PacketManipulator {
// If packets are dropped from frame data, the completedFrame field will be
// set to false.
// Returns the number of packets being dropped.
virtual int
ManipulatePackets(webrtc::EncodedImage* encoded_image) = 0;
virtual int ManipulatePackets(webrtc::EncodedImage* encoded_image) = 0;
};
class PacketManipulatorImpl : public PacketManipulator {

View File

@ -223,12 +223,12 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
}
}
void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
// Timestamp is frame number, so this gives us #dropped frames.
int num_dropped_from_prev_encode = encoded_image->_timeStamp -
int num_dropped_from_prev_encode = encoded_image._timeStamp -
prev_time_stamp_ - 1;
num_dropped_frames_ += num_dropped_from_prev_encode;
prev_time_stamp_ = encoded_image->_timeStamp;
prev_time_stamp_ = encoded_image._timeStamp;
if (num_dropped_from_prev_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid getting
// out of sync for the computation of PSNR and SSIM.
@ -238,25 +238,25 @@ void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
}
// Frame is not dropped, so update the encoded frame size
// (encoder callback is only called for non-zero length frames).
encoded_frame_size_ = encoded_image->_length;
encoded_frame_size_ = encoded_image._length;
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image->_timeStamp;
int frame_number = encoded_image._timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
encode_stop);
stat.encoding_successful = true;
stat.encoded_frame_length_in_bytes = encoded_image->_length;
stat.frame_number = encoded_image->_timeStamp;
stat.frame_type = encoded_image->_frameType;
stat.bit_rate_in_kbps = encoded_image->_length * bit_rate_factor_;
stat.total_packets = encoded_image->_length /
stat.encoded_frame_length_in_bytes = encoded_image._length;
stat.frame_number = encoded_image._timeStamp;
stat.frame_type = encoded_image._frameType;
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
stat.total_packets = encoded_image._length /
config_.networking_config.packet_size_in_bytes + 1;
// Perform packet loss if criteria is fullfilled:
bool exclude_this_frame = false;
// Only keyframes can be excluded
if (encoded_image->_frameType == kKeyFrame) {
if (encoded_image._frameType == kKeyFrame) {
switch (config_.exclude_frame_types) {
case kExcludeOnlyFirstKeyFrame:
if (!first_key_frame_has_been_excluded_) {
@ -271,9 +271,15 @@ void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
assert(false);
}
}
scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[encoded_image._length]);
memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
EncodedImage copied_image;
memcpy(&copied_image, &encoded_image, sizeof(copied_image));
copied_image._size = copied_image._length;
copied_image._buffer = copied_buffer.get();
if (!exclude_this_frame) {
stat.packets_dropped =
packet_manipulator_->ManipulatePackets(encoded_image);
packet_manipulator_->ManipulatePackets(&copied_image);
}
// Keep track of if frames are lost due to packet loss so we can tell
@ -281,8 +287,8 @@ void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
decode_start_ = TickTime::Now();
// TODO(kjellander): Pass fragmentation header to the decoder when
// CL 172001 has been submitted and PacketManipulator supports this.
int32_t decode_result = decoder_->Decode(*encoded_image, last_frame_missing_,
NULL);
int32_t decode_result =
decoder_->Decode(copied_image, last_frame_missing_, NULL);
stat.decode_return_code = decode_result;
if (decode_result != WEBRTC_VIDEO_CODEC_OK) {
// Write the last successful frame the output file to avoid getting it out
@ -290,7 +296,7 @@ void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
frame_writer_->WriteFrame(last_successful_frame_buffer_);
}
// save status for losses so we can inform the decoder for the next frame:
last_frame_missing_ = encoded_image->_length == 0;
last_frame_missing_ = copied_image._length == 0;
}
void VideoProcessorImpl::FrameDecoded(const I420VideoFrame& image) {
@ -399,10 +405,10 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
// Callbacks
int32_t
VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
EncodedImage& encoded_image,
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
video_processor_->FrameEncoded(&encoded_image); // Forward to parent class.
video_processor_->FrameEncoded(encoded_image); // Forward to parent class.
return 0;
}
int32_t

View File

@ -168,7 +168,7 @@ class VideoProcessorImpl : public VideoProcessor {
private:
// Invoked by the callback when a frame has completed encoding.
void FrameEncoded(webrtc::EncodedImage* encodedImage);
void FrameEncoded(const webrtc::EncodedImage& encodedImage);
// Invoked by the callback when a frame has completed decoding.
void FrameDecoded(const webrtc::I420VideoFrame& image);
// Used for getting a 32-bit integer representing time
@ -226,9 +226,9 @@ class VideoProcessorImpl : public VideoProcessor {
explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
: video_processor_(vp) {}
virtual int32_t Encoded(
webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info = NULL,
const webrtc::RTPFragmentationHeader* fragmentation = NULL) OVERRIDE;
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) OVERRIDE;
private:
VideoProcessorImpl* video_processor_;

View File

@ -223,12 +223,10 @@ size_t VideoEncodeCompleteCallback::EncodedBytes()
return _encodedBytes;
}
int32_t
VideoEncodeCompleteCallback::Encoded(EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader*
fragmentation)
{
int32_t VideoEncodeCompleteCallback::Encoded(
const EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
_test.Encoded(encodedImage);
VideoFrame *newBuffer = new VideoFrame();
newBuffer->VerifyAndAllocate(encodedImage._size);
@ -564,7 +562,7 @@ void NormalAsyncTest::CodecSpecific_InitBitrate()
}
void NormalAsyncTest::CopyEncodedImage(VideoFrame& dest,
EncodedImage& src,
const EncodedImage& src,
void* /*codecSpecificInfo*/) const
{
dest.CopyFrame(src._length, src._buffer);

View File

@ -85,7 +85,7 @@ public:
CopyCodecSpecificInfo(
const webrtc::CodecSpecificInfo* codecSpecificInfo) const;
virtual void CopyEncodedImage(webrtc::VideoFrame& dest,
webrtc::EncodedImage& src,
const webrtc::EncodedImage& src,
void* /*codecSpecificInfo*/) const;
virtual webrtc::CodecSpecificInfo* CreateEncoderSpecificInfo() const
{
@ -149,10 +149,9 @@ public:
_encodedBytes(0)
{}
int32_t
Encoded(webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
const webrtc::RTPFragmentationHeader* fragmentation = NULL);
int32_t Encoded(const webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation);
size_t EncodedBytes();
private:
FILE* _encodedFile;

View File

@ -91,7 +91,7 @@ UnitTest::~UnitTest()
}
int32_t
UnitTestEncodeCompleteCallback::Encoded(EncodedImage& encodedImage,
UnitTestEncodeCompleteCallback::Encoded(const EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader*
fragmentation)

View File

@ -79,9 +79,9 @@ public:
void* decoderSpecificInfo = NULL) :
_encodedVideoBuffer(buffer),
_encodeComplete(false) {}
int32_t Encoded(webrtc::EncodedImage& encodedImage,
int32_t Encoded(const webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation = NULL);
const webrtc::RTPFragmentationHeader* fragmentation);
bool EncodeComplete();
// Note that this only makes sense if an encode has been completed
webrtc::VideoFrameType EncodedFrameType() const;

View File

@ -33,7 +33,7 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
void* decoderSpecificInfo)
: encoded_video_frame_(frame),
encode_complete_(false) {}
int Encoded(EncodedImage& encodedImage,
int Encoded(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader*);
bool EncodeComplete();
@ -46,7 +46,7 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
VideoFrameType encoded_frame_type_;
};
int Vp8UnitTestEncodeCompleteCallback::Encoded(EncodedImage& encodedImage,
int Vp8UnitTestEncodeCompleteCallback::Encoded(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
encoded_video_frame_->VerifyAndAllocate(encodedImage._size);

View File

@ -25,7 +25,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
: encoded_file_(encoded_file),
encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(webrtc::EncodedImage& encoded_image,
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader*);
// Returns the encoded image.
@ -42,7 +42,7 @@ Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
webrtc::EncodedImage& encoded_image,
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
if (encoded_image_._size < encoded_image._size) {