Lint fix for webrtc/modules/video_coding PART 2!

Trying to submit all changes at once proved impossible since there were
too many changes in too many files. The changes to PRESUBMIT.py
will be uploaded in the last CL.
(original CL: https://codereview.webrtc.org/1528503003/)

BUG=webrtc:5309
TBR=mflodman@webrtc.org

Review URL: https://codereview.webrtc.org/1543503002

Cr-Commit-Position: refs/heads/master@{#11102}
This commit is contained in:
philipel
2015-12-21 04:12:39 -08:00
committed by Commit bot
parent ff483617a4
commit 9d3ab61325
48 changed files with 3744 additions and 10210 deletions

View File

@ -38,15 +38,15 @@ int VCMContentMetricsProcessing::Reset() {
recursive_avg_->Reset(); recursive_avg_->Reset();
uniform_avg_->Reset(); uniform_avg_->Reset();
frame_cnt_uniform_avg_ = 0; frame_cnt_uniform_avg_ = 0;
avg_motion_level_ = 0.0f; avg_motion_level_ = 0.0f;
avg_spatial_level_ = 0.0f; avg_spatial_level_ = 0.0f;
return VCM_OK; return VCM_OK;
} }
void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) { void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
// Update factor for recursive averaging. // Update factor for recursive averaging.
recursive_avg_factor_ = static_cast<float> (1000.0f) / recursive_avg_factor_ = static_cast<float>(1000.0f) /
static_cast<float>(frameRate * kQmMinIntervalMs); static_cast<float>(frameRate * kQmMinIntervalMs);
} }
VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() { VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
@ -58,10 +58,10 @@ VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
return NULL; return NULL;
} }
// Two metrics are used: motion and spatial level. // Two metrics are used: motion and spatial level.
uniform_avg_->motion_magnitude = avg_motion_level_ / uniform_avg_->motion_magnitude =
static_cast<float>(frame_cnt_uniform_avg_); avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
uniform_avg_->spatial_pred_err = avg_spatial_level_ / uniform_avg_->spatial_pred_err =
static_cast<float>(frame_cnt_uniform_avg_); avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
return uniform_avg_; return uniform_avg_;
} }
@ -73,7 +73,7 @@ void VCMContentMetricsProcessing::ResetShortTermAvgData() {
} }
int VCMContentMetricsProcessing::UpdateContentData( int VCMContentMetricsProcessing::UpdateContentData(
const VideoContentMetrics *contentMetrics) { const VideoContentMetrics* contentMetrics) {
if (contentMetrics == NULL) { if (contentMetrics == NULL) {
return VCM_OK; return VCM_OK;
} }
@ -81,7 +81,7 @@ int VCMContentMetricsProcessing::UpdateContentData(
} }
int VCMContentMetricsProcessing::ProcessContent( int VCMContentMetricsProcessing::ProcessContent(
const VideoContentMetrics *contentMetrics) { const VideoContentMetrics* contentMetrics) {
// Update the recursive averaged metrics: average is over longer window // Update the recursive averaged metrics: average is over longer window
// of time: over QmMinIntervalMs ms. // of time: over QmMinIntervalMs ms.
UpdateRecursiveAvg(contentMetrics); UpdateRecursiveAvg(contentMetrics);
@ -92,34 +92,33 @@ int VCMContentMetricsProcessing::ProcessContent(
} }
void VCMContentMetricsProcessing::UpdateUniformAvg( void VCMContentMetricsProcessing::UpdateUniformAvg(
const VideoContentMetrics *contentMetrics) { const VideoContentMetrics* contentMetrics) {
// Update frame counter. // Update frame counter.
frame_cnt_uniform_avg_ += 1; frame_cnt_uniform_avg_ += 1;
// Update averaged metrics: motion and spatial level are used. // Update averaged metrics: motion and spatial level are used.
avg_motion_level_ += contentMetrics->motion_magnitude; avg_motion_level_ += contentMetrics->motion_magnitude;
avg_spatial_level_ += contentMetrics->spatial_pred_err; avg_spatial_level_ += contentMetrics->spatial_pred_err;
return; return;
} }
void VCMContentMetricsProcessing::UpdateRecursiveAvg( void VCMContentMetricsProcessing::UpdateRecursiveAvg(
const VideoContentMetrics *contentMetrics) { const VideoContentMetrics* contentMetrics) {
// Spatial metrics: 2x2, 1x2(H), 2x1(V). // Spatial metrics: 2x2, 1x2(H), 2x1(V).
recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err =
recursive_avg_->spatial_pred_err + (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
recursive_avg_factor_ * contentMetrics->spatial_pred_err; recursive_avg_factor_ * contentMetrics->spatial_pred_err;
recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h =
recursive_avg_->spatial_pred_err_h + (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_h; recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v =
recursive_avg_->spatial_pred_err_v + (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_v; recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
// Motion metric: Derived from NFD (normalized frame difference). // Motion metric: Derived from NFD (normalized frame difference).
recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude =
recursive_avg_->motion_magnitude + (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
recursive_avg_factor_ * contentMetrics->motion_magnitude; recursive_avg_factor_ * contentMetrics->motion_magnitude;
} }
} // namespace } // namespace webrtc

View File

@ -18,14 +18,10 @@ namespace webrtc {
struct VideoContentMetrics; struct VideoContentMetrics;
// QM interval time (in ms) // QM interval time (in ms)
enum { enum { kQmMinIntervalMs = 10000 };
kQmMinIntervalMs = 10000
};
// Flag for NFD metric vs motion metric // Flag for NFD metric vs motion metric
enum { enum { kNfdMetric = 1 };
kNfdMetric = 1
};
/**********************************/ /**********************************/
/* Content Metrics Processing */ /* Content Metrics Processing */
@ -36,7 +32,7 @@ class VCMContentMetricsProcessing {
~VCMContentMetricsProcessing(); ~VCMContentMetricsProcessing();
// Update class with latest metrics. // Update class with latest metrics.
int UpdateContentData(const VideoContentMetrics *contentMetrics); int UpdateContentData(const VideoContentMetrics* contentMetrics);
// Reset the short-term averaged content data. // Reset the short-term averaged content data.
void ResetShortTermAvgData(); void ResetShortTermAvgData();
@ -57,13 +53,13 @@ class VCMContentMetricsProcessing {
private: private:
// Compute working average. // Compute working average.
int ProcessContent(const VideoContentMetrics *contentMetrics); int ProcessContent(const VideoContentMetrics* contentMetrics);
// Update the recursive averaged metrics: longer time average (~5/10 secs). // Update the recursive averaged metrics: longer time average (~5/10 secs).
void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics); void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
// Update the uniform averaged metrics: shorter time average (~RTCP report). // Update the uniform averaged metrics: shorter time average (~RTCP report).
void UpdateUniformAvg(const VideoContentMetrics *contentMetrics); void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
VideoContentMetrics* recursive_avg_; VideoContentMetrics* recursive_avg_;
VideoContentMetrics* uniform_avg_; VideoContentMetrics* uniform_avg_;

View File

@ -166,8 +166,8 @@ void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
full_sync_ = ContinuousPictureId(frame->PictureId()); full_sync_ = ContinuousPictureId(frame->PictureId());
} }
} else { } else {
full_sync_ = ContinuousSeqNum(static_cast<uint16_t>( full_sync_ =
frame->GetLowSeqNum())); ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
} }
} }
} }
@ -229,8 +229,7 @@ bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
return seq_num == static_cast<uint16_t>(sequence_num_ + 1); return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
} }
bool VCMDecodingState::ContinuousLayer(int temporal_id, bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
int tl0_pic_id) const {
// First, check if applicable. // First, check if applicable.
if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx) if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
return false; return false;

View File

@ -64,13 +64,13 @@ class VCMDecodingState {
// Keep state of last decoded frame. // Keep state of last decoded frame.
// TODO(mikhal/stefan): create designated classes to handle these types. // TODO(mikhal/stefan): create designated classes to handle these types.
uint16_t sequence_num_; uint16_t sequence_num_;
uint32_t time_stamp_; uint32_t time_stamp_;
int picture_id_; int picture_id_;
int temporal_id_; int temporal_id_;
int tl0_pic_id_; int tl0_pic_id_;
bool full_sync_; // Sync flag when temporal layers are used. bool full_sync_; // Sync flag when temporal layers are used.
bool in_initial_state_; bool in_initial_state_;
// Used to check references in flexible mode. // Used to check references in flexible mode.
bool frame_decoded_[kFrameDecodedLength]; bool frame_decoded_[kFrameDecodedLength];

View File

@ -24,7 +24,7 @@ VCMEncodedFrame::VCMEncodedFrame()
_fragmentation(), _fragmentation(),
_rotation(kVideoRotation_0), _rotation(kVideoRotation_0),
_rotation_set(false) { _rotation_set(false) {
_codecSpecificInfo.codecType = kVideoCodecUnknown; _codecSpecificInfo.codecType = kVideoCodecUnknown;
} }
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs) VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
@ -36,15 +36,14 @@ VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
_fragmentation(), _fragmentation(),
_rotation(kVideoRotation_0), _rotation(kVideoRotation_0),
_rotation_set(false) { _rotation_set(false) {
_codecSpecificInfo.codecType = kVideoCodecUnknown; _codecSpecificInfo.codecType = kVideoCodecUnknown;
_buffer = NULL; _buffer = NULL;
_size = 0; _size = 0;
_length = 0; _length = 0;
if (rhs._buffer != NULL) if (rhs._buffer != NULL) {
{ VerifyAndAllocate(rhs._length);
VerifyAndAllocate(rhs._length); memcpy(_buffer, rhs._buffer, rhs._length);
memcpy(_buffer, rhs._buffer, rhs._length); }
}
} }
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs) VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
@ -60,49 +59,43 @@ VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
_buffer = NULL; _buffer = NULL;
_size = 0; _size = 0;
_length = 0; _length = 0;
if (rhs._buffer != NULL) if (rhs._buffer != NULL) {
{ VerifyAndAllocate(rhs._length);
VerifyAndAllocate(rhs._length); memcpy(_buffer, rhs._buffer, rhs._length);
memcpy(_buffer, rhs._buffer, rhs._length); _length = rhs._length;
_length = rhs._length;
} }
_fragmentation.CopyFrom(rhs._fragmentation); _fragmentation.CopyFrom(rhs._fragmentation);
} }
VCMEncodedFrame::~VCMEncodedFrame() VCMEncodedFrame::~VCMEncodedFrame() {
{ Free();
Free();
} }
void VCMEncodedFrame::Free() void VCMEncodedFrame::Free() {
{ Reset();
Reset(); if (_buffer != NULL) {
if (_buffer != NULL) delete[] _buffer;
{ _buffer = NULL;
delete [] _buffer; }
_buffer = NULL;
}
} }
void VCMEncodedFrame::Reset() void VCMEncodedFrame::Reset() {
{ _renderTimeMs = -1;
_renderTimeMs = -1; _timeStamp = 0;
_timeStamp = 0; _payloadType = 0;
_payloadType = 0; _frameType = kVideoFrameDelta;
_frameType = kVideoFrameDelta; _encodedWidth = 0;
_encodedWidth = 0; _encodedHeight = 0;
_encodedHeight = 0; _completeFrame = false;
_completeFrame = false; _missingFrame = false;
_missingFrame = false; _length = 0;
_length = 0; _codecSpecificInfo.codecType = kVideoCodecUnknown;
_codecSpecificInfo.codecType = kVideoCodecUnknown; _codec = kVideoCodecUnknown;
_codec = kVideoCodecUnknown; _rotation = kVideoRotation_0;
_rotation = kVideoRotation_0; _rotation_set = false;
_rotation_set = false;
} }
void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
{
if (header) { if (header) {
switch (header->codec) { switch (header->codec) {
case kRtpVideoVp8: { case kRtpVideoVp8: {
@ -215,21 +208,18 @@ const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
return &_fragmentation; return &_fragmentation;
} }
void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
{ if (minimumSize > _size) {
if(minimumSize > _size) // create buffer of sufficient size
{ uint8_t* newBuffer = new uint8_t[minimumSize];
// create buffer of sufficient size if (_buffer) {
uint8_t* newBuffer = new uint8_t[minimumSize]; // copy old data
if(_buffer) memcpy(newBuffer, _buffer, _size);
{ delete[] _buffer;
// copy old data
memcpy(newBuffer, _buffer, _size);
delete [] _buffer;
}
_buffer = newBuffer;
_size = minimumSize;
} }
_buffer = newBuffer;
_size = minimumSize;
}
} }
} // namespace webrtc } // namespace webrtc

View File

@ -19,109 +19,114 @@
#include "webrtc/modules/video_coding/include/video_codec_interface.h" #include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h" #include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc namespace webrtc {
{
class VCMEncodedFrame : protected EncodedImage class VCMEncodedFrame : protected EncodedImage {
{ public:
public: VCMEncodedFrame();
VCMEncodedFrame(); explicit VCMEncodedFrame(const webrtc::EncodedImage& rhs);
VCMEncodedFrame(const webrtc::EncodedImage& rhs); VCMEncodedFrame(const VCMEncodedFrame& rhs);
VCMEncodedFrame(const VCMEncodedFrame& rhs);
~VCMEncodedFrame(); ~VCMEncodedFrame();
/** /**
* Delete VideoFrame and resets members to zero * Delete VideoFrame and resets members to zero
*/ */
void Free(); void Free();
/** /**
* Set render time in milliseconds * Set render time in milliseconds
*/ */
void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;} void SetRenderTime(const int64_t renderTimeMs) {
_renderTimeMs = renderTimeMs;
}
/** /**
* Set the encoded frame size * Set the encoded frame size
*/ */
void SetEncodedSize(uint32_t width, uint32_t height) void SetEncodedSize(uint32_t width, uint32_t height) {
{ _encodedWidth = width; _encodedHeight = height; } _encodedWidth = width;
/** _encodedHeight = height;
* Get the encoded image }
*/ /**
const webrtc::EncodedImage& EncodedImage() const * Get the encoded image
{ return static_cast<const webrtc::EncodedImage&>(*this); } */
/** const webrtc::EncodedImage& EncodedImage() const {
* Get pointer to frame buffer return static_cast<const webrtc::EncodedImage&>(*this);
*/ }
const uint8_t* Buffer() const {return _buffer;} /**
/** * Get pointer to frame buffer
* Get frame length */
*/ const uint8_t* Buffer() const { return _buffer; }
size_t Length() const {return _length;} /**
/** * Get frame length
* Get frame timestamp (90kHz) */
*/ size_t Length() const { return _length; }
uint32_t TimeStamp() const {return _timeStamp;} /**
/** * Get frame timestamp (90kHz)
* Get render time in milliseconds */
*/ uint32_t TimeStamp() const { return _timeStamp; }
int64_t RenderTimeMs() const {return _renderTimeMs;} /**
/** * Get render time in milliseconds
* Get frame type */
*/ int64_t RenderTimeMs() const { return _renderTimeMs; }
webrtc::FrameType FrameType() const { return _frameType; } /**
/** * Get frame type
* Get frame rotation */
*/ webrtc::FrameType FrameType() const { return _frameType; }
VideoRotation rotation() const { return _rotation; } /**
/** * Get frame rotation
* True if this frame is complete, false otherwise */
*/ VideoRotation rotation() const { return _rotation; }
bool Complete() const { return _completeFrame; } /**
/** * True if this frame is complete, false otherwise
* True if there's a frame missing before this frame */
*/ bool Complete() const { return _completeFrame; }
bool MissingFrame() const { return _missingFrame; } /**
/** * True if there's a frame missing before this frame
* Payload type of the encoded payload */
*/ bool MissingFrame() const { return _missingFrame; }
uint8_t PayloadType() const { return _payloadType; } /**
/** * Payload type of the encoded payload
* Get codec specific info. */
* The returned pointer is only valid as long as the VCMEncodedFrame uint8_t PayloadType() const { return _payloadType; }
* is valid. Also, VCMEncodedFrame owns the pointer and will delete /**
* the object. * Get codec specific info.
*/ * The returned pointer is only valid as long as the VCMEncodedFrame
const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;} * is valid. Also, VCMEncodedFrame owns the pointer and will delete
* the object.
*/
const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
const RTPFragmentationHeader* FragmentationHeader() const; const RTPFragmentationHeader* FragmentationHeader() const;
protected: protected:
/** /**
* Verifies that current allocated buffer size is larger than or equal to the input size. * Verifies that current allocated buffer size is larger than or equal to the
* If the current buffer size is smaller, a new allocation is made and the old buffer data * input size.
* is copied to the new buffer. * If the current buffer size is smaller, a new allocation is made and the old
* Buffer size is updated to minimumSize. * buffer data
*/ * is copied to the new buffer.
void VerifyAndAllocate(size_t minimumSize); * Buffer size is updated to minimumSize.
*/
void VerifyAndAllocate(size_t minimumSize);
void Reset(); void Reset();
void CopyCodecSpecific(const RTPVideoHeader* header); void CopyCodecSpecific(const RTPVideoHeader* header);
int64_t _renderTimeMs; int64_t _renderTimeMs;
uint8_t _payloadType; uint8_t _payloadType;
bool _missingFrame; bool _missingFrame;
CodecSpecificInfo _codecSpecificInfo; CodecSpecificInfo _codecSpecificInfo;
webrtc::VideoCodecType _codec; webrtc::VideoCodecType _codec;
RTPFragmentationHeader _fragmentation; RTPFragmentationHeader _fragmentation;
VideoRotation _rotation; VideoRotation _rotation;
// Video rotation is only set along with the last packet for each frame // Video rotation is only set along with the last packet for each frame
// (same as marker bit). This |_rotation_set| is only for debugging purpose // (same as marker bit). This |_rotation_set| is only for debugging purpose
// to ensure we don't set it twice for a frame. // to ensure we don't set it twice for a frame.
bool _rotation_set; bool _rotation_set;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_

File diff suppressed because it is too large Load Diff

View File

@ -20,39 +20,30 @@
namespace webrtc { namespace webrtc {
VCMFrameBuffer::VCMFrameBuffer() VCMFrameBuffer::VCMFrameBuffer()
: : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
_state(kStateEmpty),
_nackCount(0),
_latestPacketTimeMs(-1) {
}
VCMFrameBuffer::~VCMFrameBuffer() { VCMFrameBuffer::~VCMFrameBuffer() {}
}
VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs) VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
: : VCMEncodedFrame(rhs),
VCMEncodedFrame(rhs), _state(rhs._state),
_state(rhs._state), _sessionInfo(),
_sessionInfo(), _nackCount(rhs._nackCount),
_nackCount(rhs._nackCount), _latestPacketTimeMs(rhs._latestPacketTimeMs) {
_latestPacketTimeMs(rhs._latestPacketTimeMs) { _sessionInfo = rhs._sessionInfo;
_sessionInfo = rhs._sessionInfo; _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
_sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
} }
webrtc::FrameType webrtc::FrameType VCMFrameBuffer::FrameType() const {
VCMFrameBuffer::FrameType() const { return _sessionInfo.FrameType();
return _sessionInfo.FrameType();
} }
int32_t int32_t VCMFrameBuffer::GetLowSeqNum() const {
VCMFrameBuffer::GetLowSeqNum() const { return _sessionInfo.LowSequenceNumber();
return _sessionInfo.LowSequenceNumber();
} }
int32_t int32_t VCMFrameBuffer::GetHighSeqNum() const {
VCMFrameBuffer::GetHighSeqNum() const { return _sessionInfo.HighSequenceNumber();
return _sessionInfo.HighSequenceNumber();
} }
int VCMFrameBuffer::PictureId() const { int VCMFrameBuffer::PictureId() const {
@ -84,214 +75,196 @@ void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
gof_info.temporal_up_switch[idx]; gof_info.temporal_up_switch[idx];
} }
bool bool VCMFrameBuffer::IsSessionComplete() const {
VCMFrameBuffer::IsSessionComplete() const { return _sessionInfo.complete();
return _sessionInfo.complete();
} }
// Insert packet // Insert packet
VCMFrameBufferEnum VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
VCMFrameBuffer::InsertPacket(const VCMPacket& packet, const VCMPacket& packet,
int64_t timeInMs, int64_t timeInMs,
VCMDecodeErrorMode decode_error_mode, VCMDecodeErrorMode decode_error_mode,
const FrameData& frame_data) { const FrameData& frame_data) {
assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0)); assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
if (packet.dataPtr != NULL) { if (packet.dataPtr != NULL) {
_payloadType = packet.payloadType; _payloadType = packet.payloadType;
}
if (kStateEmpty == _state) {
// First packet (empty and/or media) inserted into this frame.
// store some info and set some initial values.
_timeStamp = packet.timestamp;
// We only take the ntp timestamp of the first packet of a frame.
ntp_time_ms_ = packet.ntp_time_ms_;
_codec = packet.codec;
if (packet.frameType != kEmptyFrame) {
// first media packet
SetState(kStateIncomplete);
} }
}
if (kStateEmpty == _state) { uint32_t requiredSizeBytes =
// First packet (empty and/or media) inserted into this frame. Length() + packet.sizeBytes +
// store some info and set some initial values. (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
_timeStamp = packet.timestamp; if (requiredSizeBytes >= _size) {
// We only take the ntp timestamp of the first packet of a frame. const uint8_t* prevBuffer = _buffer;
ntp_time_ms_ = packet.ntp_time_ms_; const uint32_t increments =
_codec = packet.codec; requiredSizeBytes / kBufferIncStepSizeBytes +
if (packet.frameType != kEmptyFrame) { (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
// first media packet const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes;
SetState(kStateIncomplete); if (newSize > kMaxJBFrameSizeBytes) {
} LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
"big.";
return kSizeError;
} }
VerifyAndAllocate(newSize);
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
}
uint32_t requiredSizeBytes = Length() + packet.sizeBytes + if (packet.width > 0 && packet.height > 0) {
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0); _encodedWidth = packet.width;
if (requiredSizeBytes >= _size) { _encodedHeight = packet.height;
const uint8_t* prevBuffer = _buffer; }
const uint32_t increments = requiredSizeBytes /
kBufferIncStepSizeBytes +
(requiredSizeBytes %
kBufferIncStepSizeBytes > 0);
const uint32_t newSize = _size +
increments * kBufferIncStepSizeBytes;
if (newSize > kMaxJBFrameSizeBytes) {
LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
"big.";
return kSizeError;
}
VerifyAndAllocate(newSize);
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
}
if (packet.width > 0 && packet.height > 0) { // Don't copy payload specific data for empty packets (e.g padding packets).
_encodedWidth = packet.width; if (packet.sizeBytes > 0)
_encodedHeight = packet.height; CopyCodecSpecific(&packet.codecSpecificHeader);
}
// Don't copy payload specific data for empty packets (e.g padding packets). int retVal =
if (packet.sizeBytes > 0) _sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
CopyCodecSpecific(&packet.codecSpecificHeader); if (retVal == -1) {
return kSizeError;
} else if (retVal == -2) {
return kDuplicatePacket;
} else if (retVal == -3) {
return kOutOfBoundsPacket;
}
// update length
_length = Length() + static_cast<uint32_t>(retVal);
int retVal = _sessionInfo.InsertPacket(packet, _buffer, _latestPacketTimeMs = timeInMs;
decode_error_mode,
frame_data);
if (retVal == -1) {
return kSizeError;
} else if (retVal == -2) {
return kDuplicatePacket;
} else if (retVal == -3) {
return kOutOfBoundsPacket;
}
// update length
_length = Length() + static_cast<uint32_t>(retVal);
_latestPacketTimeMs = timeInMs; // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
// ts_126114v120700p.pdf Section 7.4.5.
// The MTSI client shall add the payload bytes as defined in this clause
// onto the last RTP packet in each group of packets which make up a key
// frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
// (HEVC)).
if (packet.markerBit) {
RTC_DCHECK(!_rotation_set);
_rotation = packet.codecSpecificHeader.rotation;
_rotation_set = true;
}
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/ if (_sessionInfo.complete()) {
// ts_126114v120700p.pdf Section 7.4.5. SetState(kStateComplete);
// The MTSI client shall add the payload bytes as defined in this clause return kCompleteSession;
// onto the last RTP packet in each group of packets which make up a key } else if (_sessionInfo.decodable()) {
// frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265 SetState(kStateDecodable);
// (HEVC)). return kDecodableSession;
if (packet.markerBit) { }
RTC_DCHECK(!_rotation_set); return kIncomplete;
_rotation = packet.codecSpecificHeader.rotation;
_rotation_set = true;
}
if (_sessionInfo.complete()) {
SetState(kStateComplete);
return kCompleteSession;
} else if (_sessionInfo.decodable()) {
SetState(kStateDecodable);
return kDecodableSession;
}
return kIncomplete;
} }
int64_t int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
VCMFrameBuffer::LatestPacketTimeMs() const { return _latestPacketTimeMs;
return _latestPacketTimeMs;
} }
void void VCMFrameBuffer::IncrementNackCount() {
VCMFrameBuffer::IncrementNackCount() { _nackCount++;
_nackCount++;
} }
int16_t int16_t VCMFrameBuffer::GetNackCount() const {
VCMFrameBuffer::GetNackCount() const { return _nackCount;
return _nackCount;
} }
bool bool VCMFrameBuffer::HaveFirstPacket() const {
VCMFrameBuffer::HaveFirstPacket() const { return _sessionInfo.HaveFirstPacket();
return _sessionInfo.HaveFirstPacket();
} }
bool bool VCMFrameBuffer::HaveLastPacket() const {
VCMFrameBuffer::HaveLastPacket() const { return _sessionInfo.HaveLastPacket();
return _sessionInfo.HaveLastPacket();
} }
int int VCMFrameBuffer::NumPackets() const {
VCMFrameBuffer::NumPackets() const { return _sessionInfo.NumPackets();
return _sessionInfo.NumPackets();
} }
void void VCMFrameBuffer::Reset() {
VCMFrameBuffer::Reset() { _length = 0;
_length = 0; _timeStamp = 0;
_timeStamp = 0; _sessionInfo.Reset();
_sessionInfo.Reset(); _payloadType = 0;
_payloadType = 0; _nackCount = 0;
_nackCount = 0; _latestPacketTimeMs = -1;
_latestPacketTimeMs = -1; _state = kStateEmpty;
_state = kStateEmpty; VCMEncodedFrame::Reset();
VCMEncodedFrame::Reset();
} }
// Set state of frame // Set state of frame
void void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) { if (_state == state) {
if (_state == state) { return;
return; }
} switch (state) {
switch (state) {
case kStateIncomplete: case kStateIncomplete:
// we can go to this state from state kStateEmpty // we can go to this state from state kStateEmpty
assert(_state == kStateEmpty); assert(_state == kStateEmpty);
// Do nothing, we received a packet // Do nothing, we received a packet
break; break;
case kStateComplete: case kStateComplete:
assert(_state == kStateEmpty || assert(_state == kStateEmpty || _state == kStateIncomplete ||
_state == kStateIncomplete || _state == kStateDecodable);
_state == kStateDecodable);
break; break;
case kStateEmpty: case kStateEmpty:
// Should only be set to empty through Reset(). // Should only be set to empty through Reset().
assert(false); assert(false);
break; break;
case kStateDecodable: case kStateDecodable:
assert(_state == kStateEmpty || assert(_state == kStateEmpty || _state == kStateIncomplete);
_state == kStateIncomplete); break;
break; }
} _state = state;
_state = state;
} }
// Get current state of frame // Get current state of frame
VCMFrameBufferStateEnum VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
VCMFrameBuffer::GetState() const { return _state;
return _state;
} }
// Get current state of frame // Get current state of frame
VCMFrameBufferStateEnum VCMFrameBufferStateEnum VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
VCMFrameBuffer::GetState(uint32_t& timeStamp) const { timeStamp = TimeStamp();
timeStamp = TimeStamp(); return GetState();
return GetState();
} }
bool bool VCMFrameBuffer::IsRetransmitted() const {
VCMFrameBuffer::IsRetransmitted() const { return _sessionInfo.session_nack();
return _sessionInfo.session_nack();
} }
void void VCMFrameBuffer::PrepareForDecode(bool continuous) {
VCMFrameBuffer::PrepareForDecode(bool continuous) {
#ifdef INDEPENDENT_PARTITIONS #ifdef INDEPENDENT_PARTITIONS
if (_codec == kVideoCodecVP8) { if (_codec == kVideoCodecVP8) {
_length = _length = _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length, &_fragmentation);
&_fragmentation); } else {
} else {
size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
}
#else
size_t bytes_removed = _sessionInfo.MakeDecodable(); size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed; _length -= bytes_removed;
}
#else
size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
#endif #endif
// Transfer frame information to EncodedFrame and create any codec // Transfer frame information to EncodedFrame and create any codec
// specific information. // specific information.
_frameType = _sessionInfo.FrameType(); _frameType = _sessionInfo.FrameType();
_completeFrame = _sessionInfo.complete(); _completeFrame = _sessionInfo.complete();
_missingFrame = !continuous; _missingFrame = !continuous;
} }
} // namespace webrtc } // namespace webrtc

View File

@ -17,7 +17,7 @@
namespace webrtc { namespace webrtc {
VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing, VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing,
Clock* clock) Clock* clock)
: _critSect(CriticalSectionWrapper::CreateCriticalSection()), : _critSect(CriticalSectionWrapper::CreateCriticalSection()),
_clock(clock), _clock(clock),
@ -26,22 +26,19 @@ VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
_timestampMap(kDecoderFrameMemoryLength), _timestampMap(kDecoderFrameMemoryLength),
_lastReceivedPictureID(0) {} _lastReceivedPictureID(0) {}
VCMDecodedFrameCallback::~VCMDecodedFrameCallback() VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
{ delete _critSect;
delete _critSect;
} }
void VCMDecodedFrameCallback::SetUserReceiveCallback( void VCMDecodedFrameCallback::SetUserReceiveCallback(
VCMReceiveCallback* receiveCallback) VCMReceiveCallback* receiveCallback) {
{ CriticalSectionScoped cs(_critSect);
CriticalSectionScoped cs(_critSect); _receiveCallback = receiveCallback;
_receiveCallback = receiveCallback;
} }
VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
{ CriticalSectionScoped cs(_critSect);
CriticalSectionScoped cs(_critSect); return _receiveCallback;
return _receiveCallback;
} }
int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) { int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
@ -50,66 +47,57 @@ int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage, int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
int64_t decode_time_ms) { int64_t decode_time_ms) {
TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded", TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
"timestamp", decodedImage.timestamp()); "timestamp", decodedImage.timestamp());
// TODO(holmer): We should improve this so that we can handle multiple // TODO(holmer): We should improve this so that we can handle multiple
// callbacks from one call to Decode(). // callbacks from one call to Decode().
VCMFrameInformation* frameInfo; VCMFrameInformation* frameInfo;
VCMReceiveCallback* callback; VCMReceiveCallback* callback;
{ {
CriticalSectionScoped cs(_critSect);
frameInfo = _timestampMap.Pop(decodedImage.timestamp());
callback = _receiveCallback;
}
if (frameInfo == NULL) {
LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
"this one.";
return WEBRTC_VIDEO_CODEC_OK;
}
const int64_t now_ms = _clock->TimeInMilliseconds();
if (decode_time_ms < 0) {
decode_time_ms =
static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
}
_timing.StopDecodeTimer(
decodedImage.timestamp(),
decode_time_ms,
now_ms,
frameInfo->renderTimeMs);
if (callback != NULL)
{
decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
decodedImage.set_rotation(frameInfo->rotation);
callback->FrameToRender(decodedImage);
}
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
const uint64_t pictureId)
{
CriticalSectionScoped cs(_critSect); CriticalSectionScoped cs(_critSect);
if (_receiveCallback != NULL) frameInfo = _timestampMap.Pop(decodedImage.timestamp());
{ callback = _receiveCallback;
return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId); }
}
return -1; if (frameInfo == NULL) {
LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
"this one.";
return WEBRTC_VIDEO_CODEC_OK;
}
const int64_t now_ms = _clock->TimeInMilliseconds();
if (decode_time_ms < 0) {
decode_time_ms =
static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
}
_timing->StopDecodeTimer(decodedImage.timestamp(), decode_time_ms, now_ms,
frameInfo->renderTimeMs);
if (callback != NULL) {
decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
decodedImage.set_rotation(frameInfo->rotation);
callback->FrameToRender(decodedImage);
}
return WEBRTC_VIDEO_CODEC_OK;
} }
int32_t int32_t VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
VCMDecodedFrameCallback::ReceivedDecodedFrame(const uint64_t pictureId) const uint64_t pictureId) {
{ CriticalSectionScoped cs(_critSect);
_lastReceivedPictureID = pictureId; if (_receiveCallback != NULL) {
return 0; return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
}
return -1;
} }
uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const int32_t VCMDecodedFrameCallback::ReceivedDecodedFrame(
{ const uint64_t pictureId) {
return _lastReceivedPictureID; _lastReceivedPictureID = pictureId;
return 0;
}
uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const {
return _lastReceivedPictureID;
} }
void VCMDecodedFrameCallback::OnDecoderImplementationName( void VCMDecodedFrameCallback::OnDecoderImplementationName(
@ -125,14 +113,12 @@ void VCMDecodedFrameCallback::Map(uint32_t timestamp,
_timestampMap.Add(timestamp, frameInfo); _timestampMap.Add(timestamp, frameInfo);
} }
int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) {
{ CriticalSectionScoped cs(_critSect);
CriticalSectionScoped cs(_critSect); if (_timestampMap.Pop(timestamp) == NULL) {
if (_timestampMap.Pop(timestamp) == NULL) return VCM_GENERAL_ERROR;
{ }
return VCM_GENERAL_ERROR; return VCM_OK;
}
return VCM_OK;
} }
VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal) VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
@ -147,12 +133,11 @@ VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
VCMGenericDecoder::~VCMGenericDecoder() {} VCMGenericDecoder::~VCMGenericDecoder() {}
int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings, int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
int32_t numberOfCores) int32_t numberOfCores) {
{ TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode");
TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode"); _codecType = settings->codecType;
_codecType = settings->codecType;
return _decoder->InitDecode(settings, numberOfCores); return _decoder->InitDecode(settings, numberOfCores);
} }
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) { int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
@ -169,16 +154,13 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
frame.CodecSpecific(), frame.RenderTimeMs()); frame.CodecSpecific(), frame.RenderTimeMs());
_callback->OnDecoderImplementationName(_decoder->ImplementationName()); _callback->OnDecoderImplementationName(_decoder->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) if (ret < WEBRTC_VIDEO_CODEC_OK) {
{
LOG(LS_WARNING) << "Failed to decode frame with timestamp " LOG(LS_WARNING) << "Failed to decode frame with timestamp "
<< frame.TimeStamp() << ", error code: " << ret; << frame.TimeStamp() << ", error code: " << ret;
_callback->Pop(frame.TimeStamp()); _callback->Pop(frame.TimeStamp());
return ret; return ret;
} } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT || ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
{
// No output // No output
_callback->Pop(frame.TimeStamp()); _callback->Pop(frame.TimeStamp());
} }
@ -207,4 +189,4 @@ bool VCMGenericDecoder::PrefersLateDecoding() const {
return _decoder->PrefersLateDecoding(); return _decoder->PrefersLateDecoding();
} }
} // namespace } // namespace webrtc

View File

@ -17,31 +17,29 @@
#include "webrtc/modules/video_coding/timestamp_map.h" #include "webrtc/modules/video_coding/timestamp_map.h"
#include "webrtc/modules/video_coding/timing.h" #include "webrtc/modules/video_coding/timing.h"
namespace webrtc namespace webrtc {
{
class VCMReceiveCallback; class VCMReceiveCallback;
enum { kDecoderFrameMemoryLength = 10 }; enum { kDecoderFrameMemoryLength = 10 };
struct VCMFrameInformation struct VCMFrameInformation {
{ int64_t renderTimeMs;
int64_t renderTimeMs; int64_t decodeStartTimeMs;
int64_t decodeStartTimeMs; void* userData;
void* userData; VideoRotation rotation;
VideoRotation rotation;
}; };
class VCMDecodedFrameCallback : public DecodedImageCallback class VCMDecodedFrameCallback : public DecodedImageCallback {
{ public:
public: VCMDecodedFrameCallback(VCMTiming* timing, Clock* clock);
VCMDecodedFrameCallback(VCMTiming& timing, Clock* clock);
virtual ~VCMDecodedFrameCallback(); virtual ~VCMDecodedFrameCallback();
void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback); void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
VCMReceiveCallback* UserReceiveCallback(); VCMReceiveCallback* UserReceiveCallback();
virtual int32_t Decoded(VideoFrame& decodedImage); virtual int32_t Decoded(VideoFrame& decodedImage); // NOLINT
virtual int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms); virtual int32_t Decoded(VideoFrame& decodedImage, // NOLINT
int64_t decode_time_ms);
virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId); virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId); virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
@ -51,65 +49,63 @@ public:
void Map(uint32_t timestamp, VCMFrameInformation* frameInfo); void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
int32_t Pop(uint32_t timestamp); int32_t Pop(uint32_t timestamp);
private: private:
// Protect |_receiveCallback| and |_timestampMap|. // Protect |_receiveCallback| and |_timestampMap|.
CriticalSectionWrapper* _critSect; CriticalSectionWrapper* _critSect;
Clock* _clock; Clock* _clock;
VCMReceiveCallback* _receiveCallback GUARDED_BY(_critSect); VCMReceiveCallback* _receiveCallback GUARDED_BY(_critSect);
VCMTiming& _timing; VCMTiming* _timing;
VCMTimestampMap _timestampMap GUARDED_BY(_critSect); VCMTimestampMap _timestampMap GUARDED_BY(_critSect);
uint64_t _lastReceivedPictureID; uint64_t _lastReceivedPictureID;
}; };
class VCMGenericDecoder {
friend class VCMCodecDataBase;
class VCMGenericDecoder public:
{ explicit VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
friend class VCMCodecDataBase; ~VCMGenericDecoder();
public:
VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
~VCMGenericDecoder();
/** /**
* Initialize the decoder with the information from the VideoCodec * Initialize the decoder with the information from the VideoCodec
*/ */
int32_t InitDecode(const VideoCodec* settings, int32_t InitDecode(const VideoCodec* settings, int32_t numberOfCores);
int32_t numberOfCores);
/** /**
* Decode to a raw I420 frame, * Decode to a raw I420 frame,
* *
* inputVideoBuffer reference to encoded video frame * inputVideoBuffer reference to encoded video frame
*/ */
int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs); int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
/** /**
* Free the decoder memory * Free the decoder memory
*/ */
int32_t Release(); int32_t Release();
/** /**
* Reset the decoder state, prepare for a new call * Reset the decoder state, prepare for a new call
*/ */
int32_t Reset(); int32_t Reset();
/** /**
* Set decode callback. Deregistering while decoding is illegal. * Set decode callback. Deregistering while decoding is illegal.
*/ */
int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback); int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
bool External() const; bool External() const;
bool PrefersLateDecoding() const; bool PrefersLateDecoding() const;
private: private:
VCMDecodedFrameCallback* _callback; VCMDecodedFrameCallback* _callback;
VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength]; VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
uint32_t _nextFrameInfoIdx; uint32_t _nextFrameInfoIdx;
VideoDecoder* const _decoder; VideoDecoder* const _decoder;
VideoCodecType _codecType; VideoCodecType _codecType;
bool _isExternal; bool _isExternal;
bool _keyFrameDecoded; bool _keyFrameDecoded;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_

View File

@ -8,12 +8,15 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "webrtc/modules/video_coding/generic_encoder.h"
#include <vector>
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/logging.h" #include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h" #include "webrtc/base/trace_event.h"
#include "webrtc/engine_configurations.h" #include "webrtc/engine_configurations.h"
#include "webrtc/modules/video_coding/encoded_frame.h" #include "webrtc/modules/video_coding/encoded_frame.h"
#include "webrtc/modules/video_coding/generic_encoder.h"
#include "webrtc/modules/video_coding/media_optimization.h" #include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h" #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@ -28,8 +31,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codec = kRtpVideoVp8; rtp->codec = kRtpVideoVp8;
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8(); rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId; rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
rtp->codecHeader.VP8.nonReference = rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx; rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync; rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx; rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
@ -89,7 +91,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
} }
} // namespace } // namespace
//#define DEBUG_ENCODER_BIT_STREAM // #define DEBUG_ENCODER_BIT_STREAM
VCMGenericEncoder::VCMGenericEncoder( VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder, VideoEncoder* encoder,
@ -195,10 +197,8 @@ EncoderParameters VCMGenericEncoder::GetEncoderParameters() const {
return encoder_params_; return encoder_params_;
} }
int32_t int32_t VCMGenericEncoder::SetPeriodicKeyFrames(bool enable) {
VCMGenericEncoder::SetPeriodicKeyFrames(bool enable) return encoder_->SetPeriodicKeyFrames(enable);
{
return encoder_->SetPeriodicKeyFrames(enable);
} }
int32_t VCMGenericEncoder::RequestFrame( int32_t VCMGenericEncoder::RequestFrame(
@ -207,10 +207,8 @@ int32_t VCMGenericEncoder::RequestFrame(
return encoder_->Encode(image, NULL, &frame_types); return encoder_->Encode(image, NULL, &frame_types);
} }
bool bool VCMGenericEncoder::InternalSource() const {
VCMGenericEncoder::InternalSource() const return internal_source_;
{
return internal_source_;
} }
void VCMGenericEncoder::OnDroppedFrame() { void VCMGenericEncoder::OnDroppedFrame() {
@ -225,9 +223,9 @@ int VCMGenericEncoder::GetTargetFramerate() {
return encoder_->GetTargetFramerate(); return encoder_->GetTargetFramerate();
} }
/*************************** /***************************
* Callback Implementation * Callback Implementation
***************************/ ***************************/
VCMEncodedFrameCallback::VCMEncodedFrameCallback( VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback) EncodedImageCallback* post_encode_callback)
: send_callback_(), : send_callback_(),
@ -242,22 +240,20 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
#endif #endif
{ {
#ifdef DEBUG_ENCODER_BIT_STREAM #ifdef DEBUG_ENCODER_BIT_STREAM
_bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb"); _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
#endif #endif
} }
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {
{
#ifdef DEBUG_ENCODER_BIT_STREAM #ifdef DEBUG_ENCODER_BIT_STREAM
fclose(_bitStreamAfterEncoder); fclose(_bitStreamAfterEncoder);
#endif #endif
} }
int32_t int32_t VCMEncodedFrameCallback::SetTransportCallback(
VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport) VCMPacketizationCallback* transport) {
{ send_callback_ = transport;
send_callback_ = transport; return VCM_OK;
return VCM_OK;
} }
int32_t VCMEncodedFrameCallback::Encoded( int32_t VCMEncodedFrameCallback::Encoded(

View File

@ -11,11 +11,12 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_ #ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_ #define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#include <stdio.h>
#include <vector>
#include "webrtc/modules/video_coding/include/video_codec_interface.h" #include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h" #include "webrtc/modules/video_coding/include/video_coding_defines.h"
#include <stdio.h>
#include "webrtc/base/criticalsection.h" #include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h" #include "webrtc/base/scoped_ptr.h"
@ -36,10 +37,10 @@ struct EncoderParameters {
/*************************************/ /*************************************/
/* VCMEncodeFrameCallback class */ /* VCMEncodeFrameCallback class */
/***********************************/ /***********************************/
class VCMEncodedFrameCallback : public EncodedImageCallback class VCMEncodedFrameCallback : public EncodedImageCallback {
{ public:
public: explicit VCMEncodedFrameCallback(
VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback); EncodedImageCallback* post_encode_callback);
virtual ~VCMEncodedFrameCallback(); virtual ~VCMEncodedFrameCallback();
/* /*
@ -56,16 +57,21 @@ public:
/** /**
* Set media Optimization * Set media Optimization
*/ */
void SetMediaOpt (media_optimization::MediaOptimization* mediaOpt); void SetMediaOpt(media_optimization::MediaOptimization* mediaOpt);
void SetPayloadType(uint8_t payloadType) { _payloadType = payloadType; }; void SetPayloadType(uint8_t payloadType) {
void SetInternalSource(bool internalSource) { _internalSource = internalSource; }; _payloadType = payloadType;
}
void SetInternalSource(bool internalSource) {
_internalSource = internalSource;
}
void SetRotation(VideoRotation rotation) { _rotation = rotation; } void SetRotation(VideoRotation rotation) { _rotation = rotation; }
void SignalLastEncoderImplementationUsed( void SignalLastEncoderImplementationUsed(
const char* encoder_implementation_name); const char* encoder_implementation_name);
private: private:
VCMPacketizationCallback* send_callback_; VCMPacketizationCallback* send_callback_;
media_optimization::MediaOptimization* _mediaOpt; media_optimization::MediaOptimization* _mediaOpt;
uint8_t _payloadType; uint8_t _payloadType;
@ -77,68 +83,67 @@ private:
#ifdef DEBUG_ENCODER_BIT_STREAM #ifdef DEBUG_ENCODER_BIT_STREAM
FILE* _bitStreamAfterEncoder; FILE* _bitStreamAfterEncoder;
#endif #endif
};// end of VCMEncodeFrameCallback class }; // end of VCMEncodeFrameCallback class
/******************************/ /******************************/
/* VCMGenericEncoder class */ /* VCMGenericEncoder class */
/******************************/ /******************************/
class VCMGenericEncoder class VCMGenericEncoder {
{ friend class VCMCodecDataBase;
friend class VCMCodecDataBase;
public:
VCMGenericEncoder(VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internalSource);
~VCMGenericEncoder();
/**
* Free encoder memory
*/
int32_t Release();
/**
* Initialize the encoder with the information from the VideoCodec
*/
int32_t InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image
* codecSpecificInfo : Specific codec data
* cameraFrameRate : Request or information from the remote side
* frameType : The requested frame type to encode
*/
int32_t Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes);
void SetEncoderParameters(const EncoderParameters& params); public:
EncoderParameters GetEncoderParameters() const; VCMGenericEncoder(VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internalSource);
~VCMGenericEncoder();
/**
* Free encoder memory
*/
int32_t Release();
/**
* Initialize the encoder with the information from the VideoCodec
*/
int32_t InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image
* codecSpecificInfo : Specific codec data
* cameraFrameRate : Request or information from the remote side
* frameType : The requested frame type to encode
*/
int32_t Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes);
int32_t SetPeriodicKeyFrames(bool enable); void SetEncoderParameters(const EncoderParameters& params);
EncoderParameters GetEncoderParameters() const;
int32_t RequestFrame(const std::vector<FrameType>& frame_types); int32_t SetPeriodicKeyFrames(bool enable);
bool InternalSource() const; int32_t RequestFrame(const std::vector<FrameType>& frame_types);
void OnDroppedFrame(); bool InternalSource() const;
bool SupportsNativeHandle() const; void OnDroppedFrame();
int GetTargetFramerate(); bool SupportsNativeHandle() const;
private: int GetTargetFramerate();
VideoEncoder* const encoder_;
VideoEncoderRateObserver* const rate_observer_; private:
VCMEncodedFrameCallback* const vcm_encoded_frame_callback_; VideoEncoder* const encoder_;
const bool internal_source_; VideoEncoderRateObserver* const rate_observer_;
mutable rtc::CriticalSection params_lock_; VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
EncoderParameters encoder_params_ GUARDED_BY(params_lock_); const bool internal_source_;
VideoRotation rotation_; mutable rtc::CriticalSection params_lock_;
bool is_screenshare_; EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
}; // end of VCMGenericEncoder class VideoRotation rotation_;
bool is_screenshare_;
}; // end of VCMGenericEncoder class
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_

View File

@ -20,14 +20,13 @@ namespace webrtc {
class MockVCMFrameTypeCallback : public VCMFrameTypeCallback { class MockVCMFrameTypeCallback : public VCMFrameTypeCallback {
public: public:
MOCK_METHOD0(RequestKeyFrame, int32_t()); MOCK_METHOD0(RequestKeyFrame, int32_t());
MOCK_METHOD1(SliceLossIndicationRequest, MOCK_METHOD1(SliceLossIndicationRequest, int32_t(const uint64_t pictureId));
int32_t(const uint64_t pictureId));
}; };
class MockPacketRequestCallback : public VCMPacketRequestCallback { class MockPacketRequestCallback : public VCMPacketRequestCallback {
public: public:
MOCK_METHOD2(ResendPackets, int32_t(const uint16_t* sequenceNumbers, MOCK_METHOD2(ResendPackets,
uint16_t length)); int32_t(const uint16_t* sequenceNumbers, uint16_t length));
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -12,6 +12,7 @@
#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_ #define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
#include <string> #include <string>
#include <vector>
#include "testing/gmock/include/gmock/gmock.h" #include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h" #include "webrtc/modules/video_coding/include/video_codec_interface.h"
@ -21,17 +22,19 @@ namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback { class MockEncodedImageCallback : public EncodedImageCallback {
public: public:
MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage, MOCK_METHOD3(Encoded,
const CodecSpecificInfo* codecSpecificInfo, int32_t(const EncodedImage& encodedImage,
const RTPFragmentationHeader* fragmentation)); const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation));
}; };
class MockVideoEncoder : public VideoEncoder { class MockVideoEncoder : public VideoEncoder {
public: public:
MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length)); MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings, MOCK_METHOD3(InitEncode,
int32_t numberOfCores, int32_t(const VideoCodec* codecSettings,
size_t maxPayloadSize)); int32_t numberOfCores,
size_t maxPayloadSize));
MOCK_METHOD3(Encode, MOCK_METHOD3(Encode,
int32_t(const VideoFrame& inputImage, int32_t(const VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo, const CodecSpecificInfo* codecSpecificInfo,
@ -47,24 +50,25 @@ class MockVideoEncoder : public VideoEncoder {
class MockDecodedImageCallback : public DecodedImageCallback { class MockDecodedImageCallback : public DecodedImageCallback {
public: public:
MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage)); MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage)); // NOLINT
MOCK_METHOD2(Decoded, int32_t(VideoFrame& decodedImage, MOCK_METHOD2(Decoded,
int64_t decode_time_ms)); int32_t(VideoFrame& decodedImage, // NOLINT
int64_t decode_time_ms));
MOCK_METHOD1(ReceivedDecodedReferenceFrame, MOCK_METHOD1(ReceivedDecodedReferenceFrame,
int32_t(const uint64_t pictureId)); int32_t(const uint64_t pictureId));
MOCK_METHOD1(ReceivedDecodedFrame, MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
int32_t(const uint64_t pictureId));
}; };
class MockVideoDecoder : public VideoDecoder { class MockVideoDecoder : public VideoDecoder {
public: public:
MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings, MOCK_METHOD2(InitDecode,
int32_t numberOfCores)); int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage, MOCK_METHOD5(Decode,
bool missingFrames, int32_t(const EncodedImage& inputImage,
const RTPFragmentationHeader* fragmentation, bool missingFrames,
const CodecSpecificInfo* codecSpecificInfo, const RTPFragmentationHeader* fragmentation,
int64_t renderTimeMs)); const CodecSpecificInfo* codecSpecificInfo,
int64_t renderTimeMs));
MOCK_METHOD1(RegisterDecodeCompleteCallback, MOCK_METHOD1(RegisterDecodeCompleteCallback,
int32_t(DecodedImageCallback* callback)); int32_t(DecodedImageCallback* callback));
MOCK_METHOD0(Release, int32_t()); MOCK_METHOD0(Release, int32_t());

View File

@ -21,10 +21,9 @@
#include "webrtc/video_encoder.h" #include "webrtc/video_encoder.h"
#include "webrtc/video_frame.h" #include "webrtc/video_frame.h"
namespace webrtc namespace webrtc {
{
class RTPFragmentationHeader; // forward declaration class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted // Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below. // with a copy-constructor. See below.
@ -90,12 +89,11 @@ union CodecSpecificInfoUnion {
// Note: if any pointers are added to this struct or its sub-structs, it // Note: if any pointers are added to this struct or its sub-structs, it
// must be fitted with a copy-constructor. This is because it is copied // must be fitted with a copy-constructor. This is because it is copied
// in the copy-constructor of VCMEncodedFrame. // in the copy-constructor of VCMEncodedFrame.
struct CodecSpecificInfo struct CodecSpecificInfo {
{ VideoCodecType codecType;
VideoCodecType codecType; CodecSpecificInfoUnion codecSpecific;
CodecSpecificInfoUnion codecSpecific;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_

View File

@ -27,8 +27,7 @@
#include "webrtc/system_wrappers/include/event_wrapper.h" #include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/video_frame.h" #include "webrtc/video_frame.h"
namespace webrtc namespace webrtc {
{
class Clock; class Clock;
class EncodedImageCallback; class EncodedImageCallback;
@ -47,494 +46,524 @@ class EventFactoryImpl : public EventFactory {
public: public:
virtual ~EventFactoryImpl() {} virtual ~EventFactoryImpl() {}
virtual EventWrapper* CreateEvent() { virtual EventWrapper* CreateEvent() { return EventWrapper::Create(); }
return EventWrapper::Create();
}
}; };
// Used to indicate which decode with errors mode should be used. // Used to indicate which decode with errors mode should be used.
enum VCMDecodeErrorMode { enum VCMDecodeErrorMode {
kNoErrors, // Never decode with errors. Video will freeze kNoErrors, // Never decode with errors. Video will freeze
// if nack is disabled. // if nack is disabled.
kSelectiveErrors, // Frames that are determined decodable in kSelectiveErrors, // Frames that are determined decodable in
// VCMSessionInfo may be decoded with missing // VCMSessionInfo may be decoded with missing
// packets. As not all incomplete frames will be // packets. As not all incomplete frames will be
// decodable, video will freeze if nack is disabled. // decodable, video will freeze if nack is disabled.
kWithErrors // Release frames as needed. Errors may be kWithErrors // Release frames as needed. Errors may be
// introduced as some encoded frames may not be // introduced as some encoded frames may not be
// complete. // complete.
}; };
class VideoCodingModule : public Module class VideoCodingModule : public Module {
{ public:
public: enum SenderNackMode { kNackNone, kNackAll, kNackSelective };
enum SenderNackMode {
kNackNone,
kNackAll,
kNackSelective
};
enum ReceiverRobustness { enum ReceiverRobustness { kNone, kHardNack, kSoftNack, kReferenceSelection };
kNone,
kHardNack,
kSoftNack,
kReferenceSelection
};
static VideoCodingModule* Create( static VideoCodingModule* Create(
Clock* clock, Clock* clock,
VideoEncoderRateObserver* encoder_rate_observer, VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback); VCMQMSettingsCallback* qm_settings_callback);
static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory); static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
static void Destroy(VideoCodingModule* module); static void Destroy(VideoCodingModule* module);
// Get number of supported codecs // Get number of supported codecs
// //
// Return value : Number of supported codecs // Return value : Number of supported codecs
static uint8_t NumberOfCodecs(); static uint8_t NumberOfCodecs();
// Get supported codec settings with using id // Get supported codec settings with using id
// //
// Input: // Input:
// - listId : Id or index of the codec to look up // - listId : Id or index of the codec to look up
// - codec : Memory where the codec settings will be stored // - codec : Memory where the codec settings will be stored
// //
// Return value : VCM_OK, on success // Return value : VCM_OK, on success
// VCM_PARAMETER_ERROR if codec not supported or id too high // VCM_PARAMETER_ERROR if codec not supported or id too
static int32_t Codec(const uint8_t listId, VideoCodec* codec); // high
static int32_t Codec(const uint8_t listId, VideoCodec* codec);
// Get supported codec settings using codec type // Get supported codec settings using codec type
// //
// Input: // Input:
// - codecType : The codec type to get settings for // - codecType : The codec type to get settings for
// - codec : Memory where the codec settings will be stored // - codec : Memory where the codec settings will be stored
// //
// Return value : VCM_OK, on success // Return value : VCM_OK, on success
// VCM_PARAMETER_ERROR if codec not supported // VCM_PARAMETER_ERROR if codec not supported
static int32_t Codec(VideoCodecType codecType, VideoCodec* codec); static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
/* /*
* Sender * Sender
*/ */
// Registers a codec to be used for encoding. Calling this // Registers a codec to be used for encoding. Calling this
// API multiple times overwrites any previously registered codecs. // API multiple times overwrites any previously registered codecs.
// //
// NOTE: Must be called on the thread that constructed the VCM instance. // NOTE: Must be called on the thread that constructed the VCM instance.
// //
// Input: // Input:
// - sendCodec : Settings for the codec to be registered. // - sendCodec : Settings for the codec to be registered.
// - numberOfCores : The number of cores the codec is allowed // - numberOfCores : The number of cores the codec is allowed
// to use. // to use.
// - maxPayloadSize : The maximum size each payload is allowed // - maxPayloadSize : The maximum size each payload is allowed
// to have. Usually MTU - overhead. // to have. Usually MTU - overhead.
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec, virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores, uint32_t numberOfCores,
uint32_t maxPayloadSize) = 0; uint32_t maxPayloadSize) = 0;
// Get the current send codec in use. // Get the current send codec in use.
// //
// If a codec has not been set yet, the |id| property of the return value // If a codec has not been set yet, the |id| property of the return value
// will be 0 and |name| empty. // will be 0 and |name| empty.
// //
// NOTE: This method intentionally does not hold locks and minimizes data // NOTE: This method intentionally does not hold locks and minimizes data
// copying. It must be called on the thread where the VCM was constructed. // copying. It must be called on the thread where the VCM was constructed.
virtual const VideoCodec& GetSendCodec() const = 0; virtual const VideoCodec& GetSendCodec() const = 0;
// DEPRECATED: Use GetSendCodec() instead. // DEPRECATED: Use GetSendCodec() instead.
// //
// API to get the current send codec in use. // API to get the current send codec in use.
// //
// Input: // Input:
// - currentSendCodec : Address where the sendCodec will be written. // - currentSendCodec : Address where the sendCodec will be written.
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
// //
// NOTE: The returned codec information is not guaranteed to be current when // NOTE: The returned codec information is not guaranteed to be current when
// the call returns. This method acquires a lock that is aligned with // the call returns. This method acquires a lock that is aligned with
// video encoding, so it should be assumed to be allowed to block for // video encoding, so it should be assumed to be allowed to block for
// several milliseconds. // several milliseconds.
virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0; virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
// DEPRECATED: Use GetSendCodec() instead. // DEPRECATED: Use GetSendCodec() instead.
// //
// API to get the current send codec type // API to get the current send codec type
// //
// Return value : Codec type, on success. // Return value : Codec type, on success.
// kVideoCodecUnknown, on error or if no send codec is set // kVideoCodecUnknown, on error or if no send codec is set
// NOTE: Same notes apply as for SendCodec() above. // NOTE: Same notes apply as for SendCodec() above.
virtual VideoCodecType SendCodec() const = 0; virtual VideoCodecType SendCodec() const = 0;
// Register an external encoder object. This can not be used together with // Register an external encoder object. This can not be used together with
// external decoder callbacks. // external decoder callbacks.
// //
// Input: // Input:
// - externalEncoder : Encoder object to be used for encoding frames inserted // - externalEncoder : Encoder object to be used for encoding frames
// with the AddVideoFrame API. // inserted
// - payloadType : The payload type bound which this encoder is bound to. // with the AddVideoFrame API.
// // - payloadType : The payload type bound which this encoder is bound
// Return value : VCM_OK, on success. // to.
// < 0, on error. //
// TODO(pbos): Remove return type when unused elsewhere. // Return value : VCM_OK, on success.
virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder, // < 0, on error.
uint8_t payloadType, // TODO(pbos): Remove return type when unused elsewhere.
bool internalSource = false) = 0; virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource = false) = 0;
// API to get currently configured encoder target bitrate in bits/s. // API to get currently configured encoder target bitrate in bits/s.
// //
// Return value : 0, on success. // Return value : 0, on success.
// < 0, on error. // < 0, on error.
virtual int Bitrate(unsigned int* bitrate) const = 0; virtual int Bitrate(unsigned int* bitrate) const = 0;
// API to get currently configured encoder target frame rate. // API to get currently configured encoder target frame rate.
// //
// Return value : 0, on success. // Return value : 0, on success.
// < 0, on error. // < 0, on error.
virtual int FrameRate(unsigned int* framerate) const = 0; virtual int FrameRate(unsigned int* framerate) const = 0;
// Sets the parameters describing the send channel. These parameters are inputs to the // Sets the parameters describing the send channel. These parameters are
// Media Optimization inside the VCM and also specifies the target bit rate for the // inputs to the
// encoder. Bit rate used by NACK should already be compensated for by the user. // Media Optimization inside the VCM and also specifies the target bit rate
// // for the
// Input: // encoder. Bit rate used by NACK should already be compensated for by the
// - target_bitrate : The target bitrate for VCM in bits/s. // user.
// - lossRate : Fractions of lost packets the past second. //
// (loss rate in percent = 100 * packetLoss / 255) // Input:
// - rtt : Current round-trip time in ms. // - target_bitrate : The target bitrate for VCM in bits/s.
// // - lossRate : Fractions of lost packets the past second.
// Return value : VCM_OK, on success. // (loss rate in percent = 100 * packetLoss /
// < 0, on error. // 255)
virtual int32_t SetChannelParameters(uint32_t target_bitrate, // - rtt : Current round-trip time in ms.
uint8_t lossRate, //
int64_t rtt) = 0; // Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t SetChannelParameters(uint32_t target_bitrate,
uint8_t lossRate,
int64_t rtt) = 0;
// Sets the parameters describing the receive channel. These parameters are inputs to the // Sets the parameters describing the receive channel. These parameters are
// Media Optimization inside the VCM. // inputs to the
// // Media Optimization inside the VCM.
// Input: //
// - rtt : Current round-trip time in ms. // Input:
// with the most amount available bandwidth in a conference // - rtt : Current round-trip time in ms.
// scenario // with the most amount available bandwidth in
// // a conference
// Return value : VCM_OK, on success. // scenario
// < 0, on error. //
virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0; // Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
// Register a transport callback which will be called to deliver the encoded data and // Register a transport callback which will be called to deliver the encoded
// side information. // data and
// // side information.
// Input: //
// - transport : The callback object to register. // Input:
// // - transport : The callback object to register.
// Return value : VCM_OK, on success. //
// < 0, on error. // Return value : VCM_OK, on success.
virtual int32_t RegisterTransportCallback(VCMPacketizationCallback* transport) = 0; // < 0, on error.
virtual int32_t RegisterTransportCallback(
VCMPacketizationCallback* transport) = 0;
// Register video output information callback which will be called to deliver information // Register video output information callback which will be called to deliver
// about the video stream produced by the encoder, for instance the average frame rate and // information
// bit rate. // about the video stream produced by the encoder, for instance the average
// // frame rate and
// Input: // bit rate.
// - outputInformation : The callback object to register. //
// // Input:
// Return value : VCM_OK, on success. // - outputInformation : The callback object to register.
// < 0, on error. //
virtual int32_t RegisterSendStatisticsCallback( // Return value : VCM_OK, on success.
VCMSendStatisticsCallback* sendStats) = 0; // < 0, on error.
virtual int32_t RegisterSendStatisticsCallback(
VCMSendStatisticsCallback* sendStats) = 0;
// Register a video protection callback which will be called to deliver // Register a video protection callback which will be called to deliver
// the requested FEC rate and NACK status (on/off). // the requested FEC rate and NACK status (on/off).
// //
// Input: // Input:
// - protection : The callback object to register. // - protection : The callback object to register.
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
virtual int32_t RegisterProtectionCallback(VCMProtectionCallback* protection) = 0; virtual int32_t RegisterProtectionCallback(
VCMProtectionCallback* protection) = 0;
// Enable or disable a video protection method. // Enable or disable a video protection method.
// //
// Input: // Input:
// - videoProtection : The method to enable or disable. // - videoProtection : The method to enable or disable.
// - enable : True if the method should be enabled, false if // - enable : True if the method should be enabled, false if
// it should be disabled. // it should be disabled.
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection, virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
bool enable) = 0; bool enable) = 0;
// Add one raw video frame to the encoder. This function does all the necessary // Add one raw video frame to the encoder. This function does all the
// processing, then decides what frame type to encode, or if the frame should be // necessary
// dropped. If the frame should be encoded it passes the frame to the encoder // processing, then decides what frame type to encode, or if the frame should
// before it returns. // be
// // dropped. If the frame should be encoded it passes the frame to the encoder
// Input: // before it returns.
// - videoFrame : Video frame to encode. //
// - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling. // Input:
// // - videoFrame : Video frame to encode.
// Return value : VCM_OK, on success. // - codecSpecificInfo : Extra codec information, e.g., pre-parsed
// < 0, on error. // in-band signaling.
virtual int32_t AddVideoFrame( //
const VideoFrame& videoFrame, // Return value : VCM_OK, on success.
const VideoContentMetrics* contentMetrics = NULL, // < 0, on error.
const CodecSpecificInfo* codecSpecificInfo = NULL) = 0; virtual int32_t AddVideoFrame(
const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics = NULL,
const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
// Next frame encoded should be an intra frame (keyframe). // Next frame encoded should be an intra frame (keyframe).
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
virtual int32_t IntraFrameRequest(int stream_index) = 0; virtual int32_t IntraFrameRequest(int stream_index) = 0;
// Frame Dropper enable. Can be used to disable the frame dropping when the encoder // Frame Dropper enable. Can be used to disable the frame dropping when the
// over-uses its bit rate. This API is designed to be used when the encoded frames // encoder
// are supposed to be stored to an AVI file, or when the I420 codec is used and the // over-uses its bit rate. This API is designed to be used when the encoded
// target bit rate shouldn't affect the frame rate. // frames
// // are supposed to be stored to an AVI file, or when the I420 codec is used
// Input: // and the
// - enable : True to enable the setting, false to disable it. // target bit rate shouldn't affect the frame rate.
// //
// Return value : VCM_OK, on success. // Input:
// < 0, on error. // - enable : True to enable the setting, false to disable it.
virtual int32_t EnableFrameDropper(bool enable) = 0; //
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t EnableFrameDropper(bool enable) = 0;
/*
* Receiver
*/
/* // Register possible receive codecs, can be called multiple times for
* Receiver // different codecs.
*/ // The module will automatically switch between registered codecs depending on
// the
// payload type of incoming frames. The actual decoder will be created when
// needed.
//
// Input:
// - receiveCodec : Settings for the codec to be registered.
// - numberOfCores : Number of CPU cores that the decoder is allowed
// to use.
// - requireKeyFrame : Set this to true if you don't want any delta
// frames
// to be decoded until the first key frame has been
// decoded.
//
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
int32_t numberOfCores,
bool requireKeyFrame = false) = 0;
// Register possible receive codecs, can be called multiple times for different codecs. // Register an externally defined decoder/renderer object. Can be a decoder
// The module will automatically switch between registered codecs depending on the // only or a
// payload type of incoming frames. The actual decoder will be created when needed. // decoder coupled with a renderer. Note that RegisterReceiveCodec must be
// // called to
// Input: // be used for decoding incoming streams.
// - receiveCodec : Settings for the codec to be registered. //
// - numberOfCores : Number of CPU cores that the decoder is allowed to use. // Input:
// - requireKeyFrame : Set this to true if you don't want any delta frames // - externalDecoder : The external decoder/renderer object.
// to be decoded until the first key frame has been decoded. // - payloadType : The payload type which this decoder should
// // be
// Return value : VCM_OK, on success. // registered to.
// < 0, on error. //
virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec, virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
int32_t numberOfCores, uint8_t payloadType) = 0;
bool requireKeyFrame = false) = 0;
// Register an externally defined decoder/renderer object. Can be a decoder only or a // Register a receive callback. Will be called whenever there is a new frame
// decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to // ready
// be used for decoding incoming streams. // for rendering.
// //
// Input: // Input:
// - externalDecoder : The external decoder/renderer object. // - receiveCallback : The callback object to be used by the
// - payloadType : The payload type which this decoder should be // module when a
// registered to. // frame is ready for rendering.
// // De-register with a NULL pointer.
virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder, //
uint8_t payloadType) = 0; // Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t RegisterReceiveCallback(
VCMReceiveCallback* receiveCallback) = 0;
// Register a receive callback. Will be called whenever there is a new frame ready // Register a receive statistics callback which will be called to deliver
// for rendering. // information
// // about the video stream received by the receiving side of the VCM, for
// Input: // instance the
// - receiveCallback : The callback object to be used by the module when a // average frame rate and bit rate.
// frame is ready for rendering. //
// De-register with a NULL pointer. // Input:
// // - receiveStats : The callback object to register.
// Return value : VCM_OK, on success. //
// < 0, on error. // Return value : VCM_OK, on success.
virtual int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0; // < 0, on error.
virtual int32_t RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats) = 0;
// Register a receive statistics callback which will be called to deliver information // Register a decoder timing callback which will be called to deliver
// about the video stream received by the receiving side of the VCM, for instance the // information about the timing of the decoder in the receiving side of the
// average frame rate and bit rate. // VCM, for instance the current and maximum frame decode latency.
// //
// Input: // Input:
// - receiveStats : The callback object to register. // - decoderTiming : The callback object to register.
// //
// Return value : VCM_OK, on success. // Return value : VCM_OK, on success.
// < 0, on error. // < 0, on error.
virtual int32_t RegisterReceiveStatisticsCallback( virtual int32_t RegisterDecoderTimingCallback(
VCMReceiveStatisticsCallback* receiveStats) = 0; VCMDecoderTimingCallback* decoderTiming) = 0;
// Register a decoder timing callback which will be called to deliver // Register a frame type request callback. This callback will be called when
// information about the timing of the decoder in the receiving side of the // the
// VCM, for instance the current and maximum frame decode latency. // module needs to request specific frame types from the send side.
// //
// Input: // Input:
// - decoderTiming : The callback object to register. // - frameTypeCallback : The callback object to be used by the
// // module when
// Return value : VCM_OK, on success. // requesting a specific type of frame from
// < 0, on error. // the send side.
virtual int32_t RegisterDecoderTimingCallback( // De-register with a NULL pointer.
VCMDecoderTimingCallback* decoderTiming) = 0; //
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t RegisterFrameTypeCallback(
VCMFrameTypeCallback* frameTypeCallback) = 0;
// Register a frame type request callback. This callback will be called when the // Registers a callback which is called whenever the receive side of the VCM
// module needs to request specific frame types from the send side. // encounters holes in the packet sequence and needs packets to be
// // retransmitted.
// Input: //
// - frameTypeCallback : The callback object to be used by the module when // Input:
// requesting a specific type of frame from the send side. // - callback : The callback to be registered in the VCM.
// De-register with a NULL pointer. //
// // Return value : VCM_OK, on success.
// Return value : VCM_OK, on success. // <0, on error.
// < 0, on error. virtual int32_t RegisterPacketRequestCallback(
virtual int32_t RegisterFrameTypeCallback( VCMPacketRequestCallback* callback) = 0;
VCMFrameTypeCallback* frameTypeCallback) = 0;
// Registers a callback which is called whenever the receive side of the VCM // Waits for the next frame in the jitter buffer to become complete
// encounters holes in the packet sequence and needs packets to be retransmitted. // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
// // decoding.
// Input: // Should be called as often as possible to get the most out of the decoder.
// - callback : The callback to be registered in the VCM. //
// // Return value : VCM_OK, on success.
// Return value : VCM_OK, on success. // < 0, on error.
// <0, on error. virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
virtual int32_t RegisterPacketRequestCallback(
VCMPacketRequestCallback* callback) = 0;
// Waits for the next frame in the jitter buffer to become complete // Registers a callback which conveys the size of the render buffer.
// (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding. virtual int RegisterRenderBufferSizeCallback(
// Should be called as often as possible to get the most out of the decoder. VCMRenderBufferSizeCallback* callback) = 0;
//
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
// Registers a callback which conveys the size of the render buffer. // Reset the decoder state to the initial state.
virtual int RegisterRenderBufferSizeCallback( //
VCMRenderBufferSizeCallback* callback) = 0; // Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t ResetDecoder() = 0;
// Reset the decoder state to the initial state. // API to get the codec which is currently used for decoding by the module.
// //
// Return value : VCM_OK, on success. // Input:
// < 0, on error. // - currentReceiveCodec : Settings for the codec to be registered.
virtual int32_t ResetDecoder() = 0; //
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
// API to get the codec which is currently used for decoding by the module. // API to get the codec type currently used for decoding by the module.
// //
// Input: // Return value : codecy type, on success.
// - currentReceiveCodec : Settings for the codec to be registered. // kVideoCodecUnknown, on error or if no receive codec is
// // registered
// Return value : VCM_OK, on success. virtual VideoCodecType ReceiveCodec() const = 0;
// < 0, on error.
virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
// API to get the codec type currently used for decoding by the module. // Insert a parsed packet into the receiver side of the module. Will be placed
// // in the
// Return value : codecy type, on success. // jitter buffer waiting for the frame to become complete. Returns as soon as
// kVideoCodecUnknown, on error or if no receive codec is registered // the packet
virtual VideoCodecType ReceiveCodec() const = 0; // has been placed in the jitter buffer.
//
// Input:
// - incomingPayload : Payload of the packet.
// - payloadLength : Length of the payload.
// - rtpInfo : The parsed header.
//
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) = 0;
// Insert a parsed packet into the receiver side of the module. Will be placed in the // Minimum playout delay (Used for lip-sync). This is the minimum delay
// jitter buffer waiting for the frame to become complete. Returns as soon as the packet // required
// has been placed in the jitter buffer. // to sync with audio. Not included in VideoCodingModule::Delay()
// // Defaults to 0 ms.
// Input: //
// - incomingPayload : Payload of the packet. // Input:
// - payloadLength : Length of the payload. // - minPlayoutDelayMs : Additional delay in ms.
// - rtpInfo : The parsed header. //
// // Return value : VCM_OK, on success.
// Return value : VCM_OK, on success. // < 0, on error.
// < 0, on error. virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) = 0;
// Minimum playout delay (Used for lip-sync). This is the minimum delay required // Set the time required by the renderer to render a frame.
// to sync with audio. Not included in VideoCodingModule::Delay() //
// Defaults to 0 ms. // Input:
// // - timeMS : The time in ms required by the renderer to render a
// Input: // frame.
// - minPlayoutDelayMs : Additional delay in ms. //
// // Return value : VCM_OK, on success.
// Return value : VCM_OK, on success. // < 0, on error.
// < 0, on error. virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
// Set the time required by the renderer to render a frame. // The total delay desired by the VCM. Can be less than the minimum
// // delay set with SetMinimumPlayoutDelay.
// Input: //
// - timeMS : The time in ms required by the renderer to render a frame. // Return value : Total delay in ms, on success.
// // < 0, on error.
// Return value : VCM_OK, on success. virtual int32_t Delay() const = 0;
// < 0, on error.
virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
// The total delay desired by the VCM. Can be less than the minimum // Returns the number of packets discarded by the jitter buffer due to being
// delay set with SetMinimumPlayoutDelay. // too late. This can include duplicated packets which arrived after the
// // frame was sent to the decoder. Therefore packets which were prematurely
// Return value : Total delay in ms, on success. // NACKed will be counted.
// < 0, on error. virtual uint32_t DiscardedPackets() const = 0;
virtual int32_t Delay() const = 0;
// Returns the number of packets discarded by the jitter buffer due to being // Robustness APIs
// too late. This can include duplicated packets which arrived after the
// frame was sent to the decoder. Therefore packets which were prematurely
// NACKed will be counted.
virtual uint32_t DiscardedPackets() const = 0;
// Set the receiver robustness mode. The mode decides how the receiver
// responds to losses in the stream. The type of counter-measure (soft or
// hard NACK, dual decoder, RPS, etc.) is selected through the
// robustnessMode parameter. The errorMode parameter decides if it is
// allowed to display frames corrupted by losses. Note that not all
// combinations of the two parameters are feasible. An error will be
// returned for invalid combinations.
// Input:
// - robustnessMode : selected robustness mode.
// - errorMode : selected error mode.
//
// Return value : VCM_OK, on success;
// < 0, on error.
virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
VCMDecodeErrorMode errorMode) = 0;
// Robustness APIs // Set the decode error mode. The mode decides which errors (if any) are
// allowed in decodable frames. Note that setting decode_error_mode to
// anything other than kWithErrors without enabling nack will cause
// long-term freezes (resulting from frequent key frame requests) if
// packet loss occurs.
virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
// Set the receiver robustness mode. The mode decides how the receiver // Sets the maximum number of sequence numbers that we are allowed to NACK
// responds to losses in the stream. The type of counter-measure (soft or // and the oldest sequence number that we will consider to NACK. If a
// hard NACK, dual decoder, RPS, etc.) is selected through the // sequence number older than |max_packet_age_to_nack| is missing
// robustnessMode parameter. The errorMode parameter decides if it is // a key frame will be requested. A key frame will also be requested if the
// allowed to display frames corrupted by losses. Note that not all // time of incomplete or non-continuous frames in the jitter buffer is above
// combinations of the two parameters are feasible. An error will be // |max_incomplete_time_ms|.
// returned for invalid combinations. virtual void SetNackSettings(size_t max_nack_list_size,
// Input: int max_packet_age_to_nack,
// - robustnessMode : selected robustness mode. int max_incomplete_time_ms) = 0;
// - errorMode : selected error mode.
//
// Return value : VCM_OK, on success;
// < 0, on error.
virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
VCMDecodeErrorMode errorMode) = 0;
// Set the decode error mode. The mode decides which errors (if any) are // Setting a desired delay to the VCM receiver. Video rendering will be
// allowed in decodable frames. Note that setting decode_error_mode to // delayed by at least desired_delay_ms.
// anything other than kWithErrors without enabling nack will cause virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
// long-term freezes (resulting from frequent key frame requests) if
// packet loss occurs.
virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
// Sets the maximum number of sequence numbers that we are allowed to NACK // Lets the sender suspend video when the rate drops below
// and the oldest sequence number that we will consider to NACK. If a // |threshold_bps|, and turns back on when the rate goes back up above
// sequence number older than |max_packet_age_to_nack| is missing // |threshold_bps| + |window_bps|.
// a key frame will be requested. A key frame will also be requested if the virtual void SuspendBelowMinBitrate() = 0;
// time of incomplete or non-continuous frames in the jitter buffer is above
// |max_incomplete_time_ms|.
virtual void SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) = 0;
// Setting a desired delay to the VCM receiver. Video rendering will be // Returns true if SuspendBelowMinBitrate is engaged and the video has been
// delayed by at least desired_delay_ms. // suspended due to bandwidth limitations; otherwise false.
virtual int SetMinReceiverDelay(int desired_delay_ms) = 0; virtual bool VideoSuspended() const = 0;
// Lets the sender suspend video when the rate drops below virtual void RegisterPreDecodeImageCallback(
// |threshold_bps|, and turns back on when the rate goes back up above EncodedImageCallback* observer) = 0;
// |threshold_bps| + |window_bps|. virtual void RegisterPostEncodeImageCallback(
virtual void SuspendBelowMinBitrate() = 0; EncodedImageCallback* post_encode_callback) = 0;
// Releases pending decode calls, permitting faster thread shutdown.
// Returns true if SuspendBelowMinBitrate is engaged and the video has been virtual void TriggerDecoderShutdown() = 0;
// suspended due to bandwidth limitations; otherwise false.
virtual bool VideoSuspended() const = 0;
virtual void RegisterPreDecodeImageCallback(
EncodedImageCallback* observer) = 0;
virtual void RegisterPostEncodeImageCallback(
EncodedImageCallback* post_encode_callback) = 0;
// Releases pending decode calls, permitting faster thread shutdown.
virtual void TriggerDecoderShutdown() = 0;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_

View File

@ -18,23 +18,23 @@
namespace webrtc { namespace webrtc {
// Error codes // Error codes
#define VCM_FRAME_NOT_READY 3 #define VCM_FRAME_NOT_READY 3
#define VCM_REQUEST_SLI 2 #define VCM_REQUEST_SLI 2
#define VCM_MISSING_CALLBACK 1 #define VCM_MISSING_CALLBACK 1
#define VCM_OK 0 #define VCM_OK 0
#define VCM_GENERAL_ERROR -1 #define VCM_GENERAL_ERROR -1
#define VCM_LEVEL_EXCEEDED -2 #define VCM_LEVEL_EXCEEDED -2
#define VCM_MEMORY -3 #define VCM_MEMORY -3
#define VCM_PARAMETER_ERROR -4 #define VCM_PARAMETER_ERROR -4
#define VCM_UNKNOWN_PAYLOAD -5 #define VCM_UNKNOWN_PAYLOAD -5
#define VCM_CODEC_ERROR -6 #define VCM_CODEC_ERROR -6
#define VCM_UNINITIALIZED -7 #define VCM_UNINITIALIZED -7
#define VCM_NO_CODEC_REGISTERED -8 #define VCM_NO_CODEC_REGISTERED -8
#define VCM_JITTER_BUFFER_ERROR -9 #define VCM_JITTER_BUFFER_ERROR -9
#define VCM_OLD_PACKET_ERROR -10 #define VCM_OLD_PACKET_ERROR -10
#define VCM_NO_FRAME_DECODED -11 #define VCM_NO_FRAME_DECODED -11
#define VCM_ERROR_REQUEST_SLI -12 #define VCM_ERROR_REQUEST_SLI -12
#define VCM_NOT_IMPLEMENTED -20 #define VCM_NOT_IMPLEMENTED -20
enum { kDefaultStartBitrateKbps = 300 }; enum { kDefaultStartBitrateKbps = 300 };
@ -65,16 +65,15 @@ class VCMPacketizationCallback {
virtual void OnEncoderImplementationName(const char* implementation_name) {} virtual void OnEncoderImplementationName(const char* implementation_name) {}
protected: protected:
virtual ~VCMPacketizationCallback() { virtual ~VCMPacketizationCallback() {}
}
}; };
// Callback class used for passing decoded frames which are ready to be rendered. // Callback class used for passing decoded frames which are ready to be
// rendered.
class VCMReceiveCallback { class VCMReceiveCallback {
public: public:
virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0; virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0; // NOLINT
virtual int32_t ReceivedDecodedReferenceFrame( virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {
const uint64_t pictureId) {
return -1; return -1;
} }
// Called when the current receive codec changes. // Called when the current receive codec changes.
@ -82,23 +81,23 @@ class VCMReceiveCallback {
virtual void OnDecoderImplementationName(const char* implementation_name) {} virtual void OnDecoderImplementationName(const char* implementation_name) {}
protected: protected:
virtual ~VCMReceiveCallback() { virtual ~VCMReceiveCallback() {}
}
}; };
// Callback class used for informing the user of the bit rate and frame rate produced by the // Callback class used for informing the user of the bit rate and frame rate
// produced by the
// encoder. // encoder.
class VCMSendStatisticsCallback { class VCMSendStatisticsCallback {
public: public:
virtual int32_t SendStatistics(const uint32_t bitRate, virtual int32_t SendStatistics(const uint32_t bitRate,
const uint32_t frameRate) = 0; const uint32_t frameRate) = 0;
protected: protected:
virtual ~VCMSendStatisticsCallback() { virtual ~VCMSendStatisticsCallback() {}
}
}; };
// Callback class used for informing the user of the incoming bit rate and frame rate. // Callback class used for informing the user of the incoming bit rate and frame
// rate.
class VCMReceiveStatisticsCallback { class VCMReceiveStatisticsCallback {
public: public:
virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0; virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0;
@ -106,8 +105,7 @@ class VCMReceiveStatisticsCallback {
virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0; virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0;
protected: protected:
virtual ~VCMReceiveStatisticsCallback() { virtual ~VCMReceiveStatisticsCallback() {}
}
}; };
// Callback class used for informing the user of decode timing info. // Callback class used for informing the user of decode timing info.
@ -136,8 +134,7 @@ class VCMProtectionCallback {
uint32_t* sent_fec_rate_bps) = 0; uint32_t* sent_fec_rate_bps) = 0;
protected: protected:
virtual ~VCMProtectionCallback() { virtual ~VCMProtectionCallback() {}
}
}; };
class VideoEncoderRateObserver { class VideoEncoderRateObserver {
@ -146,31 +143,30 @@ class VideoEncoderRateObserver {
virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0; virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0;
}; };
// Callback class used for telling the user about what frame type needed to continue decoding. // Callback class used for telling the user about what frame type needed to
// continue decoding.
// Typically a key frame when the stream has been corrupted in some way. // Typically a key frame when the stream has been corrupted in some way.
class VCMFrameTypeCallback { class VCMFrameTypeCallback {
public: public:
virtual int32_t RequestKeyFrame() = 0; virtual int32_t RequestKeyFrame() = 0;
virtual int32_t SliceLossIndicationRequest( virtual int32_t SliceLossIndicationRequest(const uint64_t pictureId) {
const uint64_t pictureId) {
return -1; return -1;
} }
protected: protected:
virtual ~VCMFrameTypeCallback() { virtual ~VCMFrameTypeCallback() {}
}
}; };
// Callback class used for telling the user about which packet sequence numbers are currently // Callback class used for telling the user about which packet sequence numbers
// are currently
// missing and need to be resent. // missing and need to be resent.
class VCMPacketRequestCallback { class VCMPacketRequestCallback {
public: public:
virtual int32_t ResendPackets(const uint16_t* sequenceNumbers, virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
uint16_t length) = 0; uint16_t length) = 0;
protected: protected:
virtual ~VCMPacketRequestCallback() { virtual ~VCMPacketRequestCallback() {}
}
}; };
// Callback used to inform the user of the the desired resolution // Callback used to inform the user of the the desired resolution
@ -178,14 +174,13 @@ class VCMPacketRequestCallback {
class VCMQMSettingsCallback { class VCMQMSettingsCallback {
public: public:
virtual int32_t SetVideoQMSettings(const uint32_t frameRate, virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
const uint32_t width, const uint32_t width,
const uint32_t height) = 0; const uint32_t height) = 0;
virtual void SetTargetFramerate(int frame_rate) = 0; virtual void SetTargetFramerate(int frame_rate) = 0;
protected: protected:
virtual ~VCMQMSettingsCallback() { virtual ~VCMQMSettingsCallback() {}
}
}; };
// Callback class used for telling the user about the size (in time) of the // Callback class used for telling the user about the size (in time) of the
@ -195,10 +190,9 @@ class VCMRenderBufferSizeCallback {
virtual void RenderBufferSizeMs(int buffer_size_ms) = 0; virtual void RenderBufferSizeMs(int buffer_size_ms) = 0;
protected: protected:
virtual ~VCMRenderBufferSizeCallback() { virtual ~VCMRenderBufferSizeCallback() {}
}
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_

View File

@ -29,4 +29,4 @@
#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13 #define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14 #define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_

View File

@ -12,103 +12,96 @@
namespace webrtc { namespace webrtc {
VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock) VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock) {
{ Reset(currentWallClock);
Reset(currentWallClock);
} }
// Resets the delay estimate // Resets the delay estimate
void void VCMInterFrameDelay::Reset(int64_t currentWallClock) {
VCMInterFrameDelay::Reset(int64_t currentWallClock) _zeroWallClock = currentWallClock;
{ _wrapArounds = 0;
_zeroWallClock = currentWallClock; _prevWallClock = 0;
_wrapArounds = 0; _prevTimestamp = 0;
_prevWallClock = 0; _dTS = 0;
_prevTimestamp = 0;
_dTS = 0;
} }
// Calculates the delay of a frame with the given timestamp. // Calculates the delay of a frame with the given timestamp.
// This method is called when the frame is complete. // This method is called when the frame is complete.
bool bool VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
VCMInterFrameDelay::CalculateDelay(uint32_t timestamp, int64_t* delay,
int64_t *delay, int64_t currentWallClock) {
int64_t currentWallClock) if (_prevWallClock == 0) {
{ // First set of data, initialization, wait for next frame
if (_prevWallClock == 0)
{
// First set of data, initialization, wait for next frame
_prevWallClock = currentWallClock;
_prevTimestamp = timestamp;
*delay = 0;
return true;
}
int32_t prevWrapArounds = _wrapArounds;
CheckForWrapArounds(timestamp);
// This will be -1 for backward wrap arounds and +1 for forward wrap arounds
int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
// Account for reordering in jitter variance estimate in the future?
// Note that this also captures incomplete frames which are grabbed
// for decoding after a later frame has been complete, i.e. real
// packet losses.
if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
{
*delay = 0;
return false;
}
// Compute the compensated timestamp difference and convert it to ms and
// round it to closest integer.
_dTS = static_cast<int64_t>((timestamp + wrapAroundsSincePrev *
(static_cast<int64_t>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
// frameDelay is the difference of dT and dTS -- i.e. the difference of
// the wall clock time difference and the timestamp difference between
// two following frames.
*delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
_prevTimestamp = timestamp;
_prevWallClock = currentWallClock; _prevWallClock = currentWallClock;
_prevTimestamp = timestamp;
*delay = 0;
return true; return true;
}
int32_t prevWrapArounds = _wrapArounds;
CheckForWrapArounds(timestamp);
// This will be -1 for backward wrap arounds and +1 for forward wrap arounds
int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
// Account for reordering in jitter variance estimate in the future?
// Note that this also captures incomplete frames which are grabbed
// for decoding after a later frame has been complete, i.e. real
// packet losses.
if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) ||
wrapAroundsSincePrev < 0) {
*delay = 0;
return false;
}
// Compute the compensated timestamp difference and convert it to ms and
// round it to closest integer.
_dTS = static_cast<int64_t>(
(timestamp + wrapAroundsSincePrev * (static_cast<int64_t>(1) << 32) -
_prevTimestamp) /
90.0 +
0.5);
// frameDelay is the difference of dT and dTS -- i.e. the difference of
// the wall clock time difference and the timestamp difference between
// two following frames.
*delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
_prevTimestamp = timestamp;
_prevWallClock = currentWallClock;
return true;
} }
// Returns the current difference between incoming timestamps // Returns the current difference between incoming timestamps
uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const {
{ if (_dTS < 0) {
if (_dTS < 0) return 0;
{ }
return 0; return static_cast<uint32_t>(_dTS);
}
return static_cast<uint32_t>(_dTS);
} }
// Investigates if the timestamp clock has overflowed since the last timestamp and // Investigates if the timestamp clock has overflowed since the last timestamp
// and
// keeps track of the number of wrap arounds since reset. // keeps track of the number of wrap arounds since reset.
void void VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp) {
VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp) if (timestamp < _prevTimestamp) {
{ // This difference will probably be less than -2^31 if we have had a wrap
if (timestamp < _prevTimestamp) // around
{ // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to
// This difference will probably be less than -2^31 if we have had a wrap around // a Word32,
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32, // it should be positive.
// it should be positive. if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0) {
if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0) // Forward wrap around
{ _wrapArounds++;
// Forward wrap around
_wrapArounds++;
}
} }
// This difference will probably be less than -2^31 if we have had a backward wrap around. // This difference will probably be less than -2^31 if we have had a
// backward
// wrap around.
// Since it is cast to a Word32, it should be positive. // Since it is cast to a Word32, it should be positive.
else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0) } else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0) {
{ // Backward wrap around
// Backward wrap around _wrapArounds--;
_wrapArounds--; }
}
}
} }
} // namespace webrtc

View File

@ -13,54 +13,55 @@
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace webrtc namespace webrtc {
{
class VCMInterFrameDelay class VCMInterFrameDelay {
{ public:
public: explicit VCMInterFrameDelay(int64_t currentWallClock);
VCMInterFrameDelay(int64_t currentWallClock);
// Resets the estimate. Zeros are given as parameters. // Resets the estimate. Zeros are given as parameters.
void Reset(int64_t currentWallClock); void Reset(int64_t currentWallClock);
// Calculates the delay of a frame with the given timestamp. // Calculates the delay of a frame with the given timestamp.
// This method is called when the frame is complete. // This method is called when the frame is complete.
// //
// Input: // Input:
// - timestamp : RTP timestamp of a received frame // - timestamp : RTP timestamp of a received frame
// - *delay : Pointer to memory where the result should be stored // - *delay : Pointer to memory where the result should be
// - currentWallClock : The current time in milliseconds. // stored
// Should be -1 for normal operation, only used for testing. // - currentWallClock : The current time in milliseconds.
// Return value : true if OK, false when reordered timestamps // Should be -1 for normal operation, only used
bool CalculateDelay(uint32_t timestamp, // for testing.
int64_t *delay, // Return value : true if OK, false when reordered timestamps
int64_t currentWallClock); bool CalculateDelay(uint32_t timestamp,
int64_t* delay,
int64_t currentWallClock);
// Returns the current difference between incoming timestamps // Returns the current difference between incoming timestamps
// //
// Return value : Wrap-around compensated difference between incoming // Return value : Wrap-around compensated difference between
// timestamps. // incoming
uint32_t CurrentTimeStampDiffMs() const; // timestamps.
uint32_t CurrentTimeStampDiffMs() const;
private: private:
// Controls if the RTP timestamp counter has had a wrap around // Controls if the RTP timestamp counter has had a wrap around
// between the current and the previously received frame. // between the current and the previously received frame.
// //
// Input: // Input:
// - timestmap : RTP timestamp of the current frame. // - timestmap : RTP timestamp of the current frame.
void CheckForWrapArounds(uint32_t timestamp); void CheckForWrapArounds(uint32_t timestamp);
int64_t _zeroWallClock; // Local timestamp of the first video packet received int64_t _zeroWallClock; // Local timestamp of the first video packet received
int32_t _wrapArounds; // Number of wrapArounds detected int32_t _wrapArounds; // Number of wrapArounds detected
// The previous timestamp passed to the delay estimate // The previous timestamp passed to the delay estimate
uint32_t _prevTimestamp; uint32_t _prevTimestamp;
// The previous wall clock timestamp used by the delay estimate // The previous wall clock timestamp used by the delay estimate
int64_t _prevWallClock; int64_t _prevWallClock;
// Wrap-around compensated difference between incoming timestamps // Wrap-around compensated difference between incoming timestamps
int64_t _dTS; int64_t _dTS;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_

View File

@ -13,14 +13,12 @@
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace webrtc namespace webrtc {
{
#define MASK_32_BITS(x) (0xFFFFFFFF & (x)) #define MASK_32_BITS(x) (0xFFFFFFFF & (x))
inline uint32_t MaskWord64ToUWord32(int64_t w64) inline uint32_t MaskWord64ToUWord32(int64_t w64) {
{ return static_cast<uint32_t>(MASK_32_BITS(w64));
return static_cast<uint32_t>(MASK_32_BITS(w64));
} }
#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
@ -34,11 +32,10 @@ inline uint32_t MaskWord64ToUWord32(int64_t w64)
#define VCM_NO_RECEIVER_ID 0 #define VCM_NO_RECEIVER_ID 0
inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0) inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0) {
{ return static_cast<int32_t>((vcmId << 16) + receiverId);
return static_cast<int32_t>((vcmId << 16) + receiverId);
} }
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_

View File

@ -93,7 +93,7 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
} }
void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state, void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
UnorderedFrameList* free_frames) { UnorderedFrameList* free_frames) {
while (!empty()) { while (!empty()) {
VCMFrameBuffer* oldest_frame = Front(); VCMFrameBuffer* oldest_frame = Front();
bool remove_frame = false; bool remove_frame = false;
@ -431,8 +431,8 @@ void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
if (incoming_bit_count_ == 0) { if (incoming_bit_count_ == 0) {
*bitrate = 0; *bitrate = 0;
} else { } else {
*bitrate = 10 * ((100 * incoming_bit_count_) / *bitrate =
static_cast<unsigned int>(diff)); 10 * ((100 * incoming_bit_count_) / static_cast<unsigned int>(diff));
} }
incoming_bit_rate_ = *bitrate; incoming_bit_rate_ = *bitrate;
@ -473,8 +473,8 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a // Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
// complete frame, |max_wait_time_ms| decided by caller. // complete frame, |max_wait_time_ms| decided by caller.
bool VCMJitterBuffer::NextCompleteTimestamp( bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
uint32_t max_wait_time_ms, uint32_t* timestamp) { uint32_t* timestamp) {
crit_sect_->Enter(); crit_sect_->Enter();
if (!running_) { if (!running_) {
crit_sect_->Leave(); crit_sect_->Leave();
@ -484,13 +484,13 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
if (decodable_frames_.empty() || if (decodable_frames_.empty() ||
decodable_frames_.Front()->GetState() != kStateComplete) { decodable_frames_.Front()->GetState() != kStateComplete) {
const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() + const int64_t end_wait_time_ms =
max_wait_time_ms; clock_->TimeInMilliseconds() + max_wait_time_ms;
int64_t wait_time_ms = max_wait_time_ms; int64_t wait_time_ms = max_wait_time_ms;
while (wait_time_ms > 0) { while (wait_time_ms > 0) {
crit_sect_->Leave(); crit_sect_->Leave();
const EventTypeWrapper ret = const EventTypeWrapper ret =
frame_event_->Wait(static_cast<uint32_t>(wait_time_ms)); frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
crit_sect_->Enter(); crit_sect_->Enter();
if (ret == kEventSignaled) { if (ret == kEventSignaled) {
// Are we shutting down the jitter buffer? // Are we shutting down the jitter buffer?
@ -548,8 +548,8 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
// If we have exactly one frame in the buffer, release it only if it is // If we have exactly one frame in the buffer, release it only if it is
// complete. We know decodable_frames_ is not empty due to the previous // complete. We know decodable_frames_ is not empty due to the previous
// check. // check.
if (decodable_frames_.size() == 1 && incomplete_frames_.empty() if (decodable_frames_.size() == 1 && incomplete_frames_.empty() &&
&& oldest_frame->GetState() != kStateComplete) { oldest_frame->GetState() != kStateComplete) {
return false; return false;
} }
} }
@ -588,8 +588,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
} else { } else {
// Wait for this one to get complete. // Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length(); waiting_for_completion_.frame_size = frame->Length();
waiting_for_completion_.latest_packet_time = waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
frame->LatestPacketTimeMs();
waiting_for_completion_.timestamp = frame->TimeStamp(); waiting_for_completion_.timestamp = frame->TimeStamp();
} }
} }
@ -742,8 +741,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data); frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) { if (previous_state != kStateComplete) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
"timestamp", frame->TimeStamp()); frame->TimeStamp());
} }
if (buffer_state > 0) { if (buffer_state > 0) {
@ -760,8 +759,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
buffer_state = kFlushIndicator; buffer_state = kFlushIndicator;
} }
latest_received_sequence_number_ = LatestSequenceNumber( latest_received_sequence_number_ =
latest_received_sequence_number_, packet.seqNum); LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
} }
} }
@ -794,8 +793,9 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
} else { } else {
incomplete_frames_.InsertFrame(frame); incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|. // If NACKs are enabled, keyframes are triggered by |GetNackList|.
if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() > if (nack_mode_ == kNoNack &&
90 * kMaxDiscontinuousFramesTime) { NonContinuousOrIncompleteDuration() >
90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator; return kFlushIndicator;
} }
} }
@ -809,8 +809,9 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
} else { } else {
incomplete_frames_.InsertFrame(frame); incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|. // If NACKs are enabled, keyframes are triggered by |GetNackList|.
if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() > if (nack_mode_ == kNoNack &&
90 * kMaxDiscontinuousFramesTime) { NonContinuousOrIncompleteDuration() >
90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator; return kFlushIndicator;
} }
} }
@ -831,12 +832,14 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
case kFlushIndicator: case kFlushIndicator:
free_frames_.push_back(frame); free_frames_.push_back(frame);
return kFlushIndicator; return kFlushIndicator;
default: assert(false); default:
assert(false);
} }
return buffer_state; return buffer_state;
} }
bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame, bool VCMJitterBuffer::IsContinuousInState(
const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const { const VCMDecodingState& decoding_state) const {
// Is this frame (complete or decodable) and continuous? // Is this frame (complete or decodable) and continuous?
// kStateDecodable will never be set when decode_error_mode_ is false // kStateDecodable will never be set when decode_error_mode_ is false
@ -854,7 +857,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
VCMDecodingState decoding_state; VCMDecodingState decoding_state;
decoding_state.CopyFrom(last_decoded_state_); decoding_state.CopyFrom(last_decoded_state_);
for (FrameList::const_iterator it = decodable_frames_.begin(); for (FrameList::const_iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) { it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second; VCMFrameBuffer* decodable_frame = it->second;
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) { if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
break; break;
@ -887,7 +890,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
// 1. Continuous base or sync layer. // 1. Continuous base or sync layer.
// 2. The end of the list was reached. // 2. The end of the list was reached.
for (FrameList::iterator it = incomplete_frames_.begin(); for (FrameList::iterator it = incomplete_frames_.begin();
it != incomplete_frames_.end();) { it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second; VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(), if (IsNewerTimestamp(original_decoded_state.time_stamp(),
frame->TimeStamp())) { frame->TimeStamp())) {
@ -997,16 +1000,18 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
if (last_decoded_state_.in_initial_state()) { if (last_decoded_state_.in_initial_state()) {
VCMFrameBuffer* next_frame = NextFrame(); VCMFrameBuffer* next_frame = NextFrame();
const bool first_frame_is_key = next_frame && const bool first_frame_is_key = next_frame &&
next_frame->FrameType() == kVideoFrameKey && next_frame->FrameType() == kVideoFrameKey &&
next_frame->HaveFirstPacket(); next_frame->HaveFirstPacket();
if (!first_frame_is_key) { if (!first_frame_is_key) {
bool have_non_empty_frame = decodable_frames_.end() != find_if( bool have_non_empty_frame =
decodable_frames_.begin(), decodable_frames_.end(), decodable_frames_.end() != find_if(decodable_frames_.begin(),
HasNonEmptyState); decodable_frames_.end(),
HasNonEmptyState);
if (!have_non_empty_frame) { if (!have_non_empty_frame) {
have_non_empty_frame = incomplete_frames_.end() != find_if( have_non_empty_frame =
incomplete_frames_.begin(), incomplete_frames_.end(), incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
HasNonEmptyState); incomplete_frames_.end(),
HasNonEmptyState);
} }
bool found_key_frame = RecycleFramesUntilKeyFrame(); bool found_key_frame = RecycleFramesUntilKeyFrame();
if (!found_key_frame) { if (!found_key_frame) {
@ -1025,8 +1030,8 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
LOG_F(LS_WARNING) << "Too long non-decodable duration: " LOG_F(LS_WARNING) << "Too long non-decodable duration: "
<< non_continuous_incomplete_duration << " > " << non_continuous_incomplete_duration << " > "
<< 90 * max_incomplete_time_ms_; << 90 * max_incomplete_time_ms_;
FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(), FrameList::reverse_iterator rit = find_if(
incomplete_frames_.rend(), IsKeyFrame); incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
if (rit == incomplete_frames_.rend()) { if (rit == incomplete_frames_.rend()) {
// Request a key frame if we don't have one already. // Request a key frame if we don't have one already.
*request_key_frame = true; *request_key_frame = true;
@ -1066,8 +1071,7 @@ bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
// Make sure we don't add packets which are already too old to be decoded. // Make sure we don't add packets which are already too old to be decoded.
if (!last_decoded_state_.in_initial_state()) { if (!last_decoded_state_.in_initial_state()) {
latest_received_sequence_number_ = LatestSequenceNumber( latest_received_sequence_number_ = LatestSequenceNumber(
latest_received_sequence_number_, latest_received_sequence_number_, last_decoded_state_.sequence_num());
last_decoded_state_.sequence_num());
} }
if (IsNewerSequenceNumber(sequence_number, if (IsNewerSequenceNumber(sequence_number,
latest_received_sequence_number_)) { latest_received_sequence_number_)) {
@ -1117,8 +1121,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
if (missing_sequence_numbers_.empty()) { if (missing_sequence_numbers_.empty()) {
return false; return false;
} }
const uint16_t age_of_oldest_missing_packet = latest_sequence_number - const uint16_t age_of_oldest_missing_packet =
*missing_sequence_numbers_.begin(); latest_sequence_number - *missing_sequence_numbers_.begin();
// Recycle frames if the NACK list contains too old sequence numbers as // Recycle frames if the NACK list contains too old sequence numbers as
// the packets may have already been dropped by the sender. // the packets may have already been dropped by the sender.
return age_of_oldest_missing_packet > max_packet_age_to_nack_; return age_of_oldest_missing_packet > max_packet_age_to_nack_;
@ -1126,8 +1130,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) { bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
bool key_frame_found = false; bool key_frame_found = false;
const uint16_t age_of_oldest_missing_packet = latest_sequence_number - const uint16_t age_of_oldest_missing_packet =
*missing_sequence_numbers_.begin(); latest_sequence_number - *missing_sequence_numbers_.begin();
LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: " LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
<< age_of_oldest_missing_packet << " > " << age_of_oldest_missing_packet << " > "
<< max_packet_age_to_nack_; << max_packet_age_to_nack_;
@ -1141,9 +1145,9 @@ void VCMJitterBuffer::DropPacketsFromNackList(
uint16_t last_decoded_sequence_number) { uint16_t last_decoded_sequence_number) {
// Erase all sequence numbers from the NACK list which we won't need any // Erase all sequence numbers from the NACK list which we won't need any
// longer. // longer.
missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(), missing_sequence_numbers_.erase(
missing_sequence_numbers_.upper_bound( missing_sequence_numbers_.begin(),
last_decoded_sequence_number)); missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
} }
int64_t VCMJitterBuffer::LastDecodedTimestamp() const { int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
@ -1227,11 +1231,11 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
incoming_frame_count_++; incoming_frame_count_++;
if (frame.FrameType() == kVideoFrameKey) { if (frame.FrameType() == kVideoFrameKey) {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
frame.TimeStamp(), "KeyComplete"); "KeyComplete");
} else { } else {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
frame.TimeStamp(), "DeltaComplete"); "DeltaComplete");
} }
// Update receive statistics. We count all layers, thus when you use layers // Update receive statistics. We count all layers, thus when you use layers
@ -1249,13 +1253,13 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) { void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
if (frame_counter_ > kFastConvergeThreshold) { if (frame_counter_ > kFastConvergeThreshold) {
average_packets_per_frame_ = average_packets_per_frame_ average_packets_per_frame_ =
* (1 - kNormalConvergeMultiplier) average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+ current_number_packets * kNormalConvergeMultiplier; current_number_packets * kNormalConvergeMultiplier;
} else if (frame_counter_ > 0) { } else if (frame_counter_ > 0) {
average_packets_per_frame_ = average_packets_per_frame_ average_packets_per_frame_ =
* (1 - kFastConvergeMultiplier) average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+ current_number_packets * kFastConvergeMultiplier; current_number_packets * kFastConvergeMultiplier;
frame_counter_++; frame_counter_++;
} else { } else {
average_packets_per_frame_ = current_number_packets; average_packets_per_frame_ = current_number_packets;
@ -1277,7 +1281,7 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
// Must be called from within |crit_sect_|. // Must be called from within |crit_sect_|.
bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const { bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
return missing_sequence_numbers_.find(packet.seqNum) != return missing_sequence_numbers_.find(packet.seqNum) !=
missing_sequence_numbers_.end(); missing_sequence_numbers_.end();
} }
// Must be called under the critical section |crit_sect_|. Should never be // Must be called under the critical section |crit_sect_|. Should never be
@ -1309,18 +1313,16 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
// Must be called under the critical section |crit_sect_|. Should never be // Must be called under the critical section |crit_sect_|. Should never be
// called with retransmitted frames, they must be filtered out before this // called with retransmitted frames, they must be filtered out before this
// function is called. // function is called.
void VCMJitterBuffer::UpdateJitterEstimate( void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
int64_t latest_packet_time_ms, uint32_t timestamp,
uint32_t timestamp, unsigned int frame_size,
unsigned int frame_size, bool incomplete_frame) {
bool incomplete_frame) {
if (latest_packet_time_ms == -1) { if (latest_packet_time_ms == -1) {
return; return;
} }
int64_t frame_delay; int64_t frame_delay;
bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp, bool not_reordered = inter_frame_delay_.CalculateDelay(
&frame_delay, timestamp, &frame_delay, latest_packet_time_ms);
latest_packet_time_ms);
// Filter out frames which have been reordered in time by the network // Filter out frames which have been reordered in time by the network
if (not_reordered) { if (not_reordered) {
// Update the jitter estimate with the new samples // Update the jitter estimate with the new samples

View File

@ -30,10 +30,7 @@
namespace webrtc { namespace webrtc {
enum VCMNackMode { enum VCMNackMode { kNack, kNoNack };
kNack,
kNoNack
};
// forward declarations // forward declarations
class Clock; class Clock;
@ -54,8 +51,7 @@ struct VCMJitterSample {
class TimestampLessThan { class TimestampLessThan {
public: public:
bool operator() (uint32_t timestamp1, bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
uint32_t timestamp2) const {
return IsNewerTimestamp(timestamp2, timestamp1); return IsNewerTimestamp(timestamp2, timestamp1);
} }
}; };
@ -68,7 +64,7 @@ class FrameList
VCMFrameBuffer* Front() const; VCMFrameBuffer* Front() const;
VCMFrameBuffer* Back() const; VCMFrameBuffer* Back() const;
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it, int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
UnorderedFrameList* free_frames); UnorderedFrameList* free_frames);
void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state, void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
UnorderedFrameList* free_frames); UnorderedFrameList* free_frames);
void Reset(UnorderedFrameList* free_frames); void Reset(UnorderedFrameList* free_frames);
@ -141,8 +137,7 @@ class VCMJitterBuffer {
int num_discarded_packets() const; int num_discarded_packets() const;
// Statistics, Calculate frame and bit rates. // Statistics, Calculate frame and bit rates.
void IncomingRateStatistics(unsigned int* framerate, void IncomingRateStatistics(unsigned int* framerate, unsigned int* bitrate);
unsigned int* bitrate);
// Checks if the packet sequence will be complete if the next frame would be // Checks if the packet sequence will be complete if the next frame would be
// grabbed for decoding. That is, if a frame has been lost between the // grabbed for decoding. That is, if a frame has been lost between the
@ -177,8 +172,7 @@ class VCMJitterBuffer {
// Inserts a packet into a frame returned from GetFrame(). // Inserts a packet into a frame returned from GetFrame().
// If the return value is <= 0, |frame| is invalidated and the pointer must // If the return value is <= 0, |frame| is invalidated and the pointer must
// be dropped after this function returns. // be dropped after this function returns.
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
bool* retransmitted);
// Returns the estimated jitter in milliseconds. // Returns the estimated jitter in milliseconds.
uint32_t EstimatedJitterMs(); uint32_t EstimatedJitterMs();
@ -192,7 +186,8 @@ class VCMJitterBuffer {
// |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect // |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect
// to rely on NACK only, and therefore are using larger buffers to have time // to rely on NACK only, and therefore are using larger buffers to have time
// to wait for retransmissions. // to wait for retransmissions.
void SetNackMode(VCMNackMode mode, int64_t low_rtt_nack_threshold_ms, void SetNackMode(VCMNackMode mode,
int64_t low_rtt_nack_threshold_ms,
int64_t high_rtt_nack_threshold_ms); int64_t high_rtt_nack_threshold_ms);
void SetNackSettings(size_t max_nack_list_size, void SetNackSettings(size_t max_nack_list_size,
@ -209,7 +204,7 @@ class VCMJitterBuffer {
// session. Changes will not influence frames already in the buffer. // session. Changes will not influence frames already in the buffer.
void SetDecodeErrorMode(VCMDecodeErrorMode error_mode); void SetDecodeErrorMode(VCMDecodeErrorMode error_mode);
int64_t LastDecodedTimestamp() const; int64_t LastDecodedTimestamp() const;
VCMDecodeErrorMode decode_error_mode() const {return decode_error_mode_;} VCMDecodeErrorMode decode_error_mode() const { return decode_error_mode_; }
// Used to compute time of complete continuous frames. Returns the timestamps // Used to compute time of complete continuous frames. Returns the timestamps
// corresponding to the start and end of the continuous complete buffer. // corresponding to the start and end of the continuous complete buffer.
@ -220,8 +215,8 @@ class VCMJitterBuffer {
private: private:
class SequenceNumberLessThan { class SequenceNumberLessThan {
public: public:
bool operator() (const uint16_t& sequence_number1, bool operator()(const uint16_t& sequence_number1,
const uint16_t& sequence_number2) const { const uint16_t& sequence_number2) const {
return IsNewerSequenceNumber(sequence_number2, sequence_number1); return IsNewerSequenceNumber(sequence_number2, sequence_number1);
} }
}; };

View File

@ -19,11 +19,11 @@ namespace webrtc {
static const float kFastConvergeMultiplier = 0.4f; static const float kFastConvergeMultiplier = 0.4f;
static const float kNormalConvergeMultiplier = 0.2f; static const float kNormalConvergeMultiplier = 0.2f;
enum { kMaxNumberOfFrames = 300 }; enum { kMaxNumberOfFrames = 300 };
enum { kStartNumberOfFrames = 6 }; enum { kStartNumberOfFrames = 6 };
enum { kMaxVideoDelayMs = 10000 }; enum { kMaxVideoDelayMs = 10000 };
enum { kPacketsPerFrameMultiplier = 5 }; enum { kPacketsPerFrameMultiplier = 5 };
enum { kFastConvergeThreshold = 5}; enum { kFastConvergeThreshold = 5 };
enum VCMJitterBufferEnum { enum VCMJitterBufferEnum {
kMaxConsecutiveOldFrames = 60, kMaxConsecutiveOldFrames = 60,
@ -36,36 +36,36 @@ enum VCMJitterBufferEnum {
}; };
enum VCMFrameBufferEnum { enum VCMFrameBufferEnum {
kOutOfBoundsPacket = -7, kOutOfBoundsPacket = -7,
kNotInitialized = -6, kNotInitialized = -6,
kOldPacket = -5, kOldPacket = -5,
kGeneralError = -4, kGeneralError = -4,
kFlushIndicator = -3, // Indicator that a flush has occurred. kFlushIndicator = -3, // Indicator that a flush has occurred.
kTimeStampError = -2, kTimeStampError = -2,
kSizeError = -1, kSizeError = -1,
kNoError = 0, kNoError = 0,
kIncomplete = 1, // Frame incomplete. kIncomplete = 1, // Frame incomplete.
kCompleteSession = 3, // at least one layer in the frame complete. kCompleteSession = 3, // at least one layer in the frame complete.
kDecodableSession = 4, // Frame incomplete, but ready to be decoded kDecodableSession = 4, // Frame incomplete, but ready to be decoded
kDuplicatePacket = 5 // We're receiving a duplicate packet. kDuplicatePacket = 5 // We're receiving a duplicate packet.
}; };
enum VCMFrameBufferStateEnum { enum VCMFrameBufferStateEnum {
kStateEmpty, // frame popped by the RTP receiver kStateEmpty, // frame popped by the RTP receiver
kStateIncomplete, // frame that have one or more packet(s) stored kStateIncomplete, // frame that have one or more packet(s) stored
kStateComplete, // frame that have all packets kStateComplete, // frame that have all packets
kStateDecodable // Hybrid mode - frame can be decoded kStateDecodable // Hybrid mode - frame can be decoded
}; };
enum { kH264StartCodeLengthBytes = 4}; enum { kH264StartCodeLengthBytes = 4 };
// Used to indicate if a received packet contain a complete NALU (or equivalent) // Used to indicate if a received packet contain a complete NALU (or equivalent)
enum VCMNaluCompleteness { enum VCMNaluCompleteness {
kNaluUnset = 0, // Packet has not been filled. kNaluUnset = 0, // Packet has not been filled.
kNaluComplete = 1, // Packet can be decoded as is. kNaluComplete = 1, // Packet can be decoded as is.
kNaluStart, // Packet contain beginning of NALU kNaluStart, // Packet contain beginning of NALU
kNaluIncomplete, // Packet is not beginning or end of NALU kNaluIncomplete, // Packet is not beginning or end of NALU
kNaluEnd, // Packet is the end of a NALU kNaluEnd, // Packet is the end of a NALU
}; };
} // namespace webrtc } // namespace webrtc

File diff suppressed because it is too large Load Diff

View File

@ -8,16 +8,18 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/jitter_estimator.h" #include "webrtc/modules/video_coding/jitter_estimator.h"
#include "webrtc/modules/video_coding/rtt_filter.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/field_trial.h"
#include <assert.h> #include <assert.h>
#include <math.h> #include <math.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <string>
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/rtt_filter.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/field_trial.h"
namespace webrtc { namespace webrtc {
@ -48,267 +50,243 @@ VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
Reset(); Reset();
} }
VCMJitterEstimator::~VCMJitterEstimator() { VCMJitterEstimator::~VCMJitterEstimator() {}
}
VCMJitterEstimator& VCMJitterEstimator& VCMJitterEstimator::operator=(
VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs) const VCMJitterEstimator& rhs) {
{ if (this != &rhs) {
if (this != &rhs) memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
{ memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
_vcmId = rhs._vcmId; _vcmId = rhs._vcmId;
_receiverId = rhs._receiverId; _receiverId = rhs._receiverId;
_avgFrameSize = rhs._avgFrameSize; _avgFrameSize = rhs._avgFrameSize;
_varFrameSize = rhs._varFrameSize; _varFrameSize = rhs._varFrameSize;
_maxFrameSize = rhs._maxFrameSize; _maxFrameSize = rhs._maxFrameSize;
_fsSum = rhs._fsSum; _fsSum = rhs._fsSum;
_fsCount = rhs._fsCount; _fsCount = rhs._fsCount;
_lastUpdateT = rhs._lastUpdateT; _lastUpdateT = rhs._lastUpdateT;
_prevEstimate = rhs._prevEstimate; _prevEstimate = rhs._prevEstimate;
_prevFrameSize = rhs._prevFrameSize; _prevFrameSize = rhs._prevFrameSize;
_avgNoise = rhs._avgNoise; _avgNoise = rhs._avgNoise;
_alphaCount = rhs._alphaCount; _alphaCount = rhs._alphaCount;
_filterJitterEstimate = rhs._filterJitterEstimate; _filterJitterEstimate = rhs._filterJitterEstimate;
_startupCount = rhs._startupCount; _startupCount = rhs._startupCount;
_latestNackTimestamp = rhs._latestNackTimestamp; _latestNackTimestamp = rhs._latestNackTimestamp;
_nackCount = rhs._nackCount; _nackCount = rhs._nackCount;
_rttFilter = rhs._rttFilter; _rttFilter = rhs._rttFilter;
} }
return *this; return *this;
} }
// Resets the JitterEstimate // Resets the JitterEstimate
void void VCMJitterEstimator::Reset() {
VCMJitterEstimator::Reset() _theta[0] = 1 / (512e3 / 8);
{ _theta[1] = 0;
_theta[0] = 1/(512e3/8); _varNoise = 4.0;
_theta[1] = 0;
_varNoise = 4.0;
_thetaCov[0][0] = 1e-4; _thetaCov[0][0] = 1e-4;
_thetaCov[1][1] = 1e2; _thetaCov[1][1] = 1e2;
_thetaCov[0][1] = _thetaCov[1][0] = 0; _thetaCov[0][1] = _thetaCov[1][0] = 0;
_Qcov[0][0] = 2.5e-10; _Qcov[0][0] = 2.5e-10;
_Qcov[1][1] = 1e-10; _Qcov[1][1] = 1e-10;
_Qcov[0][1] = _Qcov[1][0] = 0; _Qcov[0][1] = _Qcov[1][0] = 0;
_avgFrameSize = 500; _avgFrameSize = 500;
_maxFrameSize = 500; _maxFrameSize = 500;
_varFrameSize = 100; _varFrameSize = 100;
_lastUpdateT = -1; _lastUpdateT = -1;
_prevEstimate = -1.0; _prevEstimate = -1.0;
_prevFrameSize = 0; _prevFrameSize = 0;
_avgNoise = 0.0; _avgNoise = 0.0;
_alphaCount = 1; _alphaCount = 1;
_filterJitterEstimate = 0.0; _filterJitterEstimate = 0.0;
_latestNackTimestamp = 0; _latestNackTimestamp = 0;
_nackCount = 0; _nackCount = 0;
_fsSum = 0; _fsSum = 0;
_fsCount = 0; _fsCount = 0;
_startupCount = 0; _startupCount = 0;
_rttFilter.Reset(); _rttFilter.Reset();
fps_counter_.Reset(); fps_counter_.Reset();
} }
void void VCMJitterEstimator::ResetNackCount() {
VCMJitterEstimator::ResetNackCount() _nackCount = 0;
{
_nackCount = 0;
} }
// Updates the estimates with the new measurements // Updates the estimates with the new measurements
void void VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS,
VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes, uint32_t frameSizeBytes,
bool incompleteFrame /* = false */) bool incompleteFrame /* = false */) {
{ if (frameSizeBytes == 0) {
if (frameSizeBytes == 0) return;
{ }
return; int deltaFS = frameSizeBytes - _prevFrameSize;
} if (_fsCount < kFsAccuStartupSamples) {
int deltaFS = frameSizeBytes - _prevFrameSize; _fsSum += frameSizeBytes;
if (_fsCount < kFsAccuStartupSamples) _fsCount++;
{ } else if (_fsCount == kFsAccuStartupSamples) {
_fsSum += frameSizeBytes; // Give the frame size filter
_fsCount++; _avgFrameSize = static_cast<double>(_fsSum) / static_cast<double>(_fsCount);
} _fsCount++;
else if (_fsCount == kFsAccuStartupSamples) }
{ if (!incompleteFrame || frameSizeBytes > _avgFrameSize) {
// Give the frame size filter double avgFrameSize = _phi * _avgFrameSize + (1 - _phi) * frameSizeBytes;
_avgFrameSize = static_cast<double>(_fsSum) / if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize)) {
static_cast<double>(_fsCount); // Only update the average frame size if this sample wasn't a
_fsCount++; // key frame
} _avgFrameSize = avgFrameSize;
if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
{
double avgFrameSize = _phi * _avgFrameSize +
(1 - _phi) * frameSizeBytes;
if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
{
// Only update the average frame size if this sample wasn't a
// key frame
_avgFrameSize = avgFrameSize;
}
// Update the variance anyway since we want to capture cases where we only get
// key frames.
_varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
(frameSizeBytes - avgFrameSize) *
(frameSizeBytes - avgFrameSize), 1.0);
} }
// Update the variance anyway since we want to capture cases where we only
// get
// key frames.
_varFrameSize = VCM_MAX(_phi * _varFrameSize +
(1 - _phi) * (frameSizeBytes - avgFrameSize) *
(frameSizeBytes - avgFrameSize),
1.0);
}
// Update max frameSize estimate // Update max frameSize estimate
_maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes)); _maxFrameSize =
VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
if (_prevFrameSize == 0) if (_prevFrameSize == 0) {
{
_prevFrameSize = frameSizeBytes;
return;
}
_prevFrameSize = frameSizeBytes; _prevFrameSize = frameSizeBytes;
return;
}
_prevFrameSize = frameSizeBytes;
// Only update the Kalman filter if the sample is not considered // Only update the Kalman filter if the sample is not considered
// an extreme outlier. Even if it is an extreme outlier from a // an extreme outlier. Even if it is an extreme outlier from a
// delay point of view, if the frame size also is large the // delay point of view, if the frame size also is large the
// deviation is probably due to an incorrect line slope. // deviation is probably due to an incorrect line slope.
double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS); double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) || if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize)) frameSizeBytes >
{ _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize)) {
// Update the variance of the deviation from the // Update the variance of the deviation from the
// line given by the Kalman filter // line given by the Kalman filter
EstimateRandomJitter(deviation, incompleteFrame); EstimateRandomJitter(deviation, incompleteFrame);
// Prevent updating with frames which have been congested by a large // Prevent updating with frames which have been congested by a large
// frame, and therefore arrives almost at the same time as that frame. // frame, and therefore arrives almost at the same time as that frame.
// This can occur when we receive a large frame (key frame) which // This can occur when we receive a large frame (key frame) which
// has been delayed. The next frame is of normal size (delta frame), // has been delayed. The next frame is of normal size (delta frame),
// and thus deltaFS will be << 0. This removes all frame samples // and thus deltaFS will be << 0. This removes all frame samples
// which arrives after a key frame. // which arrives after a key frame.
if ((!incompleteFrame || deviation >= 0.0) && if ((!incompleteFrame || deviation >= 0.0) &&
static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize) static_cast<double>(deltaFS) > -0.25 * _maxFrameSize) {
{ // Update the Kalman filter with the new data
// Update the Kalman filter with the new data KalmanEstimateChannel(frameDelayMS, deltaFS);
KalmanEstimateChannel(frameDelayMS, deltaFS);
}
}
else
{
int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
}
// Post process the total estimated jitter
if (_startupCount >= kStartupDelaySamples)
{
PostProcessEstimate();
}
else
{
_startupCount++;
} }
} else {
int nStdDev =
(deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
}
// Post process the total estimated jitter
if (_startupCount >= kStartupDelaySamples) {
PostProcessEstimate();
} else {
_startupCount++;
}
} }
// Updates the nack/packet ratio // Updates the nack/packet ratio
void void VCMJitterEstimator::FrameNacked() {
VCMJitterEstimator::FrameNacked() // Wait until _nackLimit retransmissions has been received,
{ // then always add ~1 RTT delay.
// Wait until _nackLimit retransmissions has been received, // TODO(holmer): Should we ever remove the additional delay if the
// then always add ~1 RTT delay. // the packet losses seem to have stopped? We could for instance scale
// TODO(holmer): Should we ever remove the additional delay if the // the number of RTTs to add with the amount of retransmissions in a given
// the packet losses seem to have stopped? We could for instance scale // time interval, or similar.
// the number of RTTs to add with the amount of retransmissions in a given if (_nackCount < _nackLimit) {
// time interval, or similar. _nackCount++;
if (_nackCount < _nackLimit) }
{
_nackCount++;
}
} }
// Updates Kalman estimate of the channel // Updates Kalman estimate of the channel
// The caller is expected to sanity check the inputs. // The caller is expected to sanity check the inputs.
void void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes) {
int32_t deltaFSBytes) double Mh[2];
{ double hMh_sigma;
double Mh[2]; double kalmanGain[2];
double hMh_sigma; double measureRes;
double kalmanGain[2]; double t00, t01;
double measureRes;
double t00, t01;
// Kalman filtering // Kalman filtering
// Prediction // Prediction
// M = M + Q // M = M + Q
_thetaCov[0][0] += _Qcov[0][0]; _thetaCov[0][0] += _Qcov[0][0];
_thetaCov[0][1] += _Qcov[0][1]; _thetaCov[0][1] += _Qcov[0][1];
_thetaCov[1][0] += _Qcov[1][0]; _thetaCov[1][0] += _Qcov[1][0];
_thetaCov[1][1] += _Qcov[1][1]; _thetaCov[1][1] += _Qcov[1][1];
// Kalman gain // Kalman gain
// K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h') // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
// h = [dFS 1] // h = [dFS 1]
// Mh = M*h' // Mh = M*h'
// hMh_sigma = h*M*h' + R // hMh_sigma = h*M*h' + R
Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1]; Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1]; Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
// sigma weights measurements with a small deltaFS as noisy and // sigma weights measurements with a small deltaFS as noisy and
// measurements with large deltaFS as good // measurements with large deltaFS as good
if (_maxFrameSize < 1.0) if (_maxFrameSize < 1.0) {
{ return;
return; }
} double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) / (1e0 * _maxFrameSize)) +
(1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise); 1) *
if (sigma < 1.0) sqrt(_varNoise);
{ if (sigma < 1.0) {
sigma = 1.0; sigma = 1.0;
} }
hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma; hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0)) if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) ||
{ (hMh_sigma > -1e-9 && hMh_sigma <= 0)) {
assert(false); assert(false);
return; return;
} }
kalmanGain[0] = Mh[0] / hMh_sigma; kalmanGain[0] = Mh[0] / hMh_sigma;
kalmanGain[1] = Mh[1] / hMh_sigma; kalmanGain[1] = Mh[1] / hMh_sigma;
// Correction // Correction
// theta = theta + K*(dT - h*theta) // theta = theta + K*(dT - h*theta)
measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]); measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
_theta[0] += kalmanGain[0] * measureRes; _theta[0] += kalmanGain[0] * measureRes;
_theta[1] += kalmanGain[1] * measureRes; _theta[1] += kalmanGain[1] * measureRes;
if (_theta[0] < _thetaLow) if (_theta[0] < _thetaLow) {
{ _theta[0] = _thetaLow;
_theta[0] = _thetaLow; }
}
// M = (I - K*h)*M // M = (I - K*h)*M
t00 = _thetaCov[0][0]; t00 = _thetaCov[0][0];
t01 = _thetaCov[0][1]; t01 = _thetaCov[0][1];
_thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 - _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
kalmanGain[0] * _thetaCov[1][0]; kalmanGain[0] * _thetaCov[1][0];
_thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 - _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
kalmanGain[0] * _thetaCov[1][1]; kalmanGain[0] * _thetaCov[1][1];
_thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) - _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
kalmanGain[1] * deltaFSBytes * t00; kalmanGain[1] * deltaFSBytes * t00;
_thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) - _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
kalmanGain[1] * deltaFSBytes * t01; kalmanGain[1] * deltaFSBytes * t01;
// Covariance matrix, must be positive semi-definite // Covariance matrix, must be positive semi-definite
assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 && assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
_thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 && _thetaCov[0][0] * _thetaCov[1][1] -
_thetaCov[0][0] >= 0); _thetaCov[0][1] * _thetaCov[1][0] >=
0 &&
_thetaCov[0][0] >= 0);
} }
// Calculate difference in delay between a sample and the // Calculate difference in delay between a sample and the
// expected delay estimated by the Kalman filter // expected delay estimated by the Kalman filter
double double VCMJitterEstimator::DeviationFromExpectedDelay(
VCMJitterEstimator::DeviationFromExpectedDelay(int64_t frameDelayMS, int64_t frameDelayMS,
int32_t deltaFSBytes) const int32_t deltaFSBytes) const {
{ return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
} }
// Estimates the random jitter by calculating the variance of the // Estimates the random jitter by calculating the variance of the
@ -363,61 +341,45 @@ void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
} }
} }
double double VCMJitterEstimator::NoiseThreshold() const {
VCMJitterEstimator::NoiseThreshold() const double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
{ if (noiseThreshold < 1.0) {
double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset; noiseThreshold = 1.0;
if (noiseThreshold < 1.0) }
{ return noiseThreshold;
noiseThreshold = 1.0;
}
return noiseThreshold;
} }
// Calculates the current jitter estimate from the filtered estimates // Calculates the current jitter estimate from the filtered estimates
double double VCMJitterEstimator::CalculateEstimate() {
VCMJitterEstimator::CalculateEstimate() double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
{
double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
// A very low estimate (or negative) is neglected // A very low estimate (or negative) is neglected
if (ret < 1.0) { if (ret < 1.0) {
if (_prevEstimate <= 0.01) if (_prevEstimate <= 0.01) {
{ ret = 1.0;
ret = 1.0; } else {
} ret = _prevEstimate;
else
{
ret = _prevEstimate;
}
} }
if (ret > 10000.0) // Sanity }
{ if (ret > 10000.0) { // Sanity
ret = 10000.0; ret = 10000.0;
} }
_prevEstimate = ret; _prevEstimate = ret;
return ret; return ret;
} }
void void VCMJitterEstimator::PostProcessEstimate() {
VCMJitterEstimator::PostProcessEstimate() _filterJitterEstimate = CalculateEstimate();
{
_filterJitterEstimate = CalculateEstimate();
} }
void void VCMJitterEstimator::UpdateRtt(int64_t rttMs) {
VCMJitterEstimator::UpdateRtt(int64_t rttMs) _rttFilter.Update(rttMs);
{
_rttFilter.Update(rttMs);
} }
void void VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes) {
VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes) if (_maxFrameSize < frameSizeBytes) {
{ _maxFrameSize = frameSizeBytes;
if (_maxFrameSize < frameSizeBytes) }
{
_maxFrameSize = frameSizeBytes;
}
} }
// Returns the current filtered estimate if available, // Returns the current filtered estimate if available,
@ -478,5 +440,4 @@ double VCMJitterEstimator::GetFrameRate() const {
} }
return fps; return fps;
} }
} // namespace webrtc
}

View File

@ -15,151 +15,156 @@
#include "webrtc/modules/video_coding/rtt_filter.h" #include "webrtc/modules/video_coding/rtt_filter.h"
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace webrtc namespace webrtc {
{
class Clock; class Clock;
class VCMJitterEstimator class VCMJitterEstimator {
{ public:
public: VCMJitterEstimator(const Clock* clock,
VCMJitterEstimator(const Clock* clock, int32_t vcmId = 0,
int32_t vcmId = 0, int32_t receiverId = 0);
int32_t receiverId = 0); virtual ~VCMJitterEstimator();
virtual ~VCMJitterEstimator(); VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
// Resets the estimate to the initial state // Resets the estimate to the initial state
void Reset(); void Reset();
void ResetNackCount(); void ResetNackCount();
// Updates the jitter estimate with the new data. // Updates the jitter estimate with the new data.
// //
// Input: // Input:
// - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds // - frameDelay : Delay-delta calculated by UTILDelayEstimate in
// - frameSize : Frame size of the current frame. // milliseconds
// - incompleteFrame : Flags if the frame is used to update the estimate before it // - frameSize : Frame size of the current frame.
// was complete. Default is false. // - incompleteFrame : Flags if the frame is used to update the
void UpdateEstimate(int64_t frameDelayMS, // estimate before it
uint32_t frameSizeBytes, // was complete. Default is false.
bool incompleteFrame = false); void UpdateEstimate(int64_t frameDelayMS,
uint32_t frameSizeBytes,
bool incompleteFrame = false);
// Returns the current jitter estimate in milliseconds and adds // Returns the current jitter estimate in milliseconds and adds
// also adds an RTT dependent term in cases of retransmission. // also adds an RTT dependent term in cases of retransmission.
// Input: // Input:
// - rttMultiplier : RTT param multiplier (when applicable). // - rttMultiplier : RTT param multiplier (when applicable).
// //
// Return value : Jitter estimate in milliseconds // Return value : Jitter estimate in milliseconds
int GetJitterEstimate(double rttMultiplier); int GetJitterEstimate(double rttMultiplier);
// Updates the nack counter. // Updates the nack counter.
void FrameNacked(); void FrameNacked();
// Updates the RTT filter. // Updates the RTT filter.
// //
// Input: // Input:
// - rttMs : RTT in ms // - rttMs : RTT in ms
void UpdateRtt(int64_t rttMs); void UpdateRtt(int64_t rttMs);
void UpdateMaxFrameSize(uint32_t frameSizeBytes); void UpdateMaxFrameSize(uint32_t frameSizeBytes);
// A constant describing the delay from the jitter buffer // A constant describing the delay from the jitter buffer
// to the delay on the receiving side which is not accounted // to the delay on the receiving side which is not accounted
// for by the jitter buffer nor the decoding delay estimate. // for by the jitter buffer nor the decoding delay estimate.
static const uint32_t OPERATING_SYSTEM_JITTER = 10; static const uint32_t OPERATING_SYSTEM_JITTER = 10;
protected: protected:
// These are protected for better testing possibilities // These are protected for better testing possibilities
double _theta[2]; // Estimated line parameters (slope, offset) double _theta[2]; // Estimated line parameters (slope, offset)
double _varNoise; // Variance of the time-deviation from the line double _varNoise; // Variance of the time-deviation from the line
virtual bool LowRateExperimentEnabled(); virtual bool LowRateExperimentEnabled();
private: private:
// Updates the Kalman filter for the line describing // Updates the Kalman filter for the line describing
// the frame size dependent jitter. // the frame size dependent jitter.
// //
// Input: // Input:
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
// - deltaFSBytes : Frame size delta, i.e. // milliseconds
// : frame size at time T minus frame size at time T-1 // - deltaFSBytes : Frame size delta, i.e.
void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes); // : frame size at time T minus frame size at time
// T-1
void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
// Updates the random jitter estimate, i.e. the variance // Updates the random jitter estimate, i.e. the variance
// of the time deviations from the line given by the Kalman filter. // of the time deviations from the line given by the Kalman filter.
// //
// Input: // Input:
// - d_dT : The deviation from the kalman estimate // - d_dT : The deviation from the kalman estimate
// - incompleteFrame : True if the frame used to update the estimate // - incompleteFrame : True if the frame used to update the
// with was incomplete // estimate
void EstimateRandomJitter(double d_dT, bool incompleteFrame); // with was incomplete
void EstimateRandomJitter(double d_dT, bool incompleteFrame);
double NoiseThreshold() const; double NoiseThreshold() const;
// Calculates the current jitter estimate. // Calculates the current jitter estimate.
// //
// Return value : The current jitter estimate in milliseconds // Return value : The current jitter estimate in milliseconds
double CalculateEstimate(); double CalculateEstimate();
// Post process the calculated estimate // Post process the calculated estimate
void PostProcessEstimate(); void PostProcessEstimate();
// Calculates the difference in delay between a sample and the // Calculates the difference in delay between a sample and the
// expected delay estimated by the Kalman filter. // expected delay estimated by the Kalman filter.
// //
// Input: // Input:
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
// - deltaFS : Frame size delta, i.e. frame size at time // milliseconds
// T minus frame size at time T-1 // - deltaFS : Frame size delta, i.e. frame size at time
// // T minus frame size at time T-1
// Return value : The difference in milliseconds //
double DeviationFromExpectedDelay(int64_t frameDelayMS, // Return value : The difference in milliseconds
int32_t deltaFSBytes) const; double DeviationFromExpectedDelay(int64_t frameDelayMS,
int32_t deltaFSBytes) const;
double GetFrameRate() const; double GetFrameRate() const;
// Constants, filter parameters // Constants, filter parameters
int32_t _vcmId; int32_t _vcmId;
int32_t _receiverId; int32_t _receiverId;
const double _phi; const double _phi;
const double _psi; const double _psi;
const uint32_t _alphaCountMax; const uint32_t _alphaCountMax;
const double _thetaLow; const double _thetaLow;
const uint32_t _nackLimit; const uint32_t _nackLimit;
const int32_t _numStdDevDelayOutlier; const int32_t _numStdDevDelayOutlier;
const int32_t _numStdDevFrameSizeOutlier; const int32_t _numStdDevFrameSizeOutlier;
const double _noiseStdDevs; const double _noiseStdDevs;
const double _noiseStdDevOffset; const double _noiseStdDevOffset;
double _thetaCov[2][2]; // Estimate covariance double _thetaCov[2][2]; // Estimate covariance
double _Qcov[2][2]; // Process noise covariance double _Qcov[2][2]; // Process noise covariance
double _avgFrameSize; // Average frame size double _avgFrameSize; // Average frame size
double _varFrameSize; // Frame size variance double _varFrameSize; // Frame size variance
double _maxFrameSize; // Largest frame size received (descending double _maxFrameSize; // Largest frame size received (descending
// with a factor _psi) // with a factor _psi)
uint32_t _fsSum; uint32_t _fsSum;
uint32_t _fsCount; uint32_t _fsCount;
int64_t _lastUpdateT; int64_t _lastUpdateT;
double _prevEstimate; // The previously returned jitter estimate double _prevEstimate; // The previously returned jitter estimate
uint32_t _prevFrameSize; // Frame size of the previous frame uint32_t _prevFrameSize; // Frame size of the previous frame
double _avgNoise; // Average of the random jitter double _avgNoise; // Average of the random jitter
uint32_t _alphaCount; uint32_t _alphaCount;
double _filterJitterEstimate; // The filtered sum of jitter estimates double _filterJitterEstimate; // The filtered sum of jitter estimates
uint32_t _startupCount; uint32_t _startupCount;
int64_t _latestNackTimestamp; // Timestamp in ms when the latest nack was seen int64_t
uint32_t _nackCount; // Keeps track of the number of nacks received, _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
// but never goes above _nackLimit uint32_t _nackCount; // Keeps track of the number of nacks received,
VCMRttFilter _rttFilter; // but never goes above _nackLimit
VCMRttFilter _rttFilter;
rtc::RollingAccumulator<uint64_t> fps_counter_; rtc::RollingAccumulator<uint64_t> fps_counter_;
enum ExperimentFlag { kInit, kEnabled, kDisabled }; enum ExperimentFlag { kInit, kEnabled, kDisabled };
ExperimentFlag low_rate_experiment_; ExperimentFlag low_rate_experiment_;
const Clock* clock_; const Clock* clock_;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,7 @@ namespace webrtc {
namespace media_optimization { namespace media_optimization {
// Number of time periods used for (max) window filter for packet loss // Number of time periods used for (max) window filter for packet loss
// TODO (marpan): set reasonable window size for filtered packet loss, // TODO(marpan): set reasonable window size for filtered packet loss,
// adjustment should be based on logged/real data of loss stats/correlation. // adjustment should be based on logged/real data of loss stats/correlation.
enum { kLossPrHistorySize = 10 }; enum { kLossPrHistorySize = 10 };
@ -34,331 +34,328 @@ enum { kLossPrShortFilterWinMs = 1000 };
// The type of filter used on the received packet loss reports. // The type of filter used on the received packet loss reports.
enum FilterPacketLossMode { enum FilterPacketLossMode {
kNoFilter, // No filtering on received loss. kNoFilter, // No filtering on received loss.
kAvgFilter, // Recursive average filter. kAvgFilter, // Recursive average filter.
kMaxFilter // Max-window filter, over the time interval of: kMaxFilter // Max-window filter, over the time interval of:
// (kLossPrHistorySize * kLossPrShortFilterWinMs) ms. // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
}; };
// Thresholds for hybrid NACK/FEC // Thresholds for hybrid NACK/FEC
// common to media optimization and the jitter buffer. // common to media optimization and the jitter buffer.
const int64_t kLowRttNackMs = 20; const int64_t kLowRttNackMs = 20;
struct VCMProtectionParameters struct VCMProtectionParameters {
{ VCMProtectionParameters()
VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f), : rtt(0),
packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f), lossPr(0.0f),
keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0), bitRate(0.0f),
codecWidth(0), codecHeight(0), packetsPerFrame(0.0f),
numLayers(1) packetsPerFrameKey(0.0f),
{} frameRate(0.0f),
keyFrameSize(0.0f),
fecRateDelta(0),
fecRateKey(0),
codecWidth(0),
codecHeight(0),
numLayers(1) {}
int64_t rtt; int64_t rtt;
float lossPr; float lossPr;
float bitRate; float bitRate;
float packetsPerFrame; float packetsPerFrame;
float packetsPerFrameKey; float packetsPerFrameKey;
float frameRate; float frameRate;
float keyFrameSize; float keyFrameSize;
uint8_t fecRateDelta; uint8_t fecRateDelta;
uint8_t fecRateKey; uint8_t fecRateKey;
uint16_t codecWidth; uint16_t codecWidth;
uint16_t codecHeight; uint16_t codecHeight;
int numLayers; int numLayers;
}; };
/******************************/ /******************************/
/* VCMProtectionMethod class */ /* VCMProtectionMethod class */
/******************************/ /******************************/
enum VCMProtectionMethodEnum enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
{
kNack, class VCMLossProbabilitySample {
kFec, public:
kNackFec, VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
kNone
uint8_t lossPr255;
int64_t timeMs;
}; };
class VCMLossProbabilitySample class VCMProtectionMethod {
{ public:
public: VCMProtectionMethod();
VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}; virtual ~VCMProtectionMethod();
uint8_t lossPr255; // Updates the efficiency of the method using the parameters provided
int64_t timeMs; //
// Input:
// - parameters : Parameters used to calculate efficiency
//
// Return value : True if this method is recommended in
// the given conditions.
virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
// Returns the protection type
//
// Return value : The protection type
enum VCMProtectionMethodEnum Type() const { return _type; }
// Returns the effective packet loss for ER, required by this protection
// method
//
// Return value : Required effective packet loss
virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
// Extracts the FEC protection factor for Key frame, required by this
// protection method
//
// Return value : Required protectionFactor for Key frame
virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
// Extracts the FEC protection factor for Delta frame, required by this
// protection method
//
// Return value : Required protectionFactor for delta frame
virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
// Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
//
// Return value : Required Unequal protection on/off state.
virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
// Extracts whether the the FEC Unequal protection (UEP) is used for Delta
// frame.
//
// Return value : Required Unequal protection on/off state.
virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
virtual int MaxFramesFec() const { return 1; }
// Updates content metrics
void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
protected:
uint8_t _effectivePacketLoss;
uint8_t _protectionFactorK;
uint8_t _protectionFactorD;
// Estimation of residual loss after the FEC
float _scaleProtKey;
int32_t _maxPayloadSize;
VCMQmRobustness* _qmRobustness;
bool _useUepProtectionK;
bool _useUepProtectionD;
float _corrFecCost;
enum VCMProtectionMethodEnum _type;
}; };
class VCMNackMethod : public VCMProtectionMethod {
class VCMProtectionMethod public:
{ VCMNackMethod();
public: virtual ~VCMNackMethod();
VCMProtectionMethod(); virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
virtual ~VCMProtectionMethod(); // Get the effective packet loss
bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
// Updates the efficiency of the method using the parameters provided
//
// Input:
// - parameters : Parameters used to calculate efficiency
//
// Return value : True if this method is recommended in
// the given conditions.
virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
// Returns the protection type
//
// Return value : The protection type
enum VCMProtectionMethodEnum Type() const { return _type; }
// Returns the effective packet loss for ER, required by this protection method
//
// Return value : Required effective packet loss
virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
// Extracts the FEC protection factor for Key frame, required by this protection method
//
// Return value : Required protectionFactor for Key frame
virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
// Extracts the FEC protection factor for Delta frame, required by this protection method
//
// Return value : Required protectionFactor for delta frame
virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
// Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
//
// Return value : Required Unequal protection on/off state.
virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
// Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
//
// Return value : Required Unequal protection on/off state.
virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
virtual int MaxFramesFec() const { return 1; }
// Updates content metrics
void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
protected:
uint8_t _effectivePacketLoss;
uint8_t _protectionFactorK;
uint8_t _protectionFactorD;
// Estimation of residual loss after the FEC
float _scaleProtKey;
int32_t _maxPayloadSize;
VCMQmRobustness* _qmRobustness;
bool _useUepProtectionK;
bool _useUepProtectionD;
float _corrFecCost;
enum VCMProtectionMethodEnum _type;
}; };
class VCMNackMethod : public VCMProtectionMethod class VCMFecMethod : public VCMProtectionMethod {
{ public:
public: VCMFecMethod();
VCMNackMethod(); virtual ~VCMFecMethod();
virtual ~VCMNackMethod(); virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
virtual bool UpdateParameters(const VCMProtectionParameters* parameters); // Get the effective packet loss for ER
// Get the effective packet loss bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
bool EffectivePacketLoss(const VCMProtectionParameters* parameter); // Get the FEC protection factors
bool ProtectionFactor(const VCMProtectionParameters* parameters);
// Get the boost for key frame protection
uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
uint8_t packetFrameKey) const;
// Convert the rates: defined relative to total# packets or source# packets
uint8_t ConvertFECRate(uint8_t codeRate) const;
// Get the average effective recovery from FEC: for random loss model
float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
// Update FEC with protectionFactorD
void UpdateProtectionFactorD(uint8_t protectionFactorD);
// Update FEC with protectionFactorK
void UpdateProtectionFactorK(uint8_t protectionFactorK);
// Compute the bits per frame. Account for temporal layers when applicable.
int BitsPerFrame(const VCMProtectionParameters* parameters);
protected:
enum { kUpperLimitFramesFec = 6 };
// Thresholds values for the bytes/frame and round trip time, below which we
// may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
// Max bytes/frame for VGA, corresponds to ~140k at 25fps.
enum { kMaxBytesPerFrameForFec = 700 };
// Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
enum { kMaxBytesPerFrameForFecLow = 400 };
// Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
enum { kMaxBytesPerFrameForFecHigh = 1000 };
}; };
class VCMFecMethod : public VCMProtectionMethod class VCMNackFecMethod : public VCMFecMethod {
{ public:
public: VCMNackFecMethod(int64_t lowRttNackThresholdMs,
VCMFecMethod(); int64_t highRttNackThresholdMs);
virtual ~VCMFecMethod(); virtual ~VCMNackFecMethod();
virtual bool UpdateParameters(const VCMProtectionParameters* parameters); virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
// Get the effective packet loss for ER // Get the effective packet loss for ER
bool EffectivePacketLoss(const VCMProtectionParameters* parameters); bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
// Get the FEC protection factors // Get the protection factors
bool ProtectionFactor(const VCMProtectionParameters* parameters); bool ProtectionFactor(const VCMProtectionParameters* parameters);
// Get the boost for key frame protection // Get the max number of frames the FEC is allowed to be based on.
uint8_t BoostCodeRateKey(uint8_t packetFrameDelta, int MaxFramesFec() const;
uint8_t packetFrameKey) const; // Turn off the FEC based on low bitrate and other factors.
// Convert the rates: defined relative to total# packets or source# packets bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
uint8_t ConvertFECRate(uint8_t codeRate) const;
// Get the average effective recovery from FEC: for random loss model
float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
// Update FEC with protectionFactorD
void UpdateProtectionFactorD(uint8_t protectionFactorD);
// Update FEC with protectionFactorK
void UpdateProtectionFactorK(uint8_t protectionFactorK);
// Compute the bits per frame. Account for temporal layers when applicable.
int BitsPerFrame(const VCMProtectionParameters* parameters);
protected: private:
enum { kUpperLimitFramesFec = 6 }; int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
// Thresholds values for the bytes/frame and round trip time, below which we
// may turn off FEC, depending on |_numLayers| and |_maxFramesFec|. int64_t _lowRttNackMs;
// Max bytes/frame for VGA, corresponds to ~140k at 25fps. int64_t _highRttNackMs;
enum { kMaxBytesPerFrameForFec = 700 }; int _maxFramesFec;
// Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
enum { kMaxBytesPerFrameForFecLow = 400 };
// Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
enum { kMaxBytesPerFrameForFecHigh = 1000 };
}; };
class VCMLossProtectionLogic {
public:
explicit VCMLossProtectionLogic(int64_t nowMs);
~VCMLossProtectionLogic();
class VCMNackFecMethod : public VCMFecMethod // Set the protection method to be used
{ //
public: // Input:
VCMNackFecMethod(int64_t lowRttNackThresholdMs, // - newMethodType : New requested protection method type. If one
int64_t highRttNackThresholdMs); // is already set, it will be deleted and replaced
virtual ~VCMNackFecMethod(); void SetMethod(VCMProtectionMethodEnum newMethodType);
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
// Get the effective packet loss for ER
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
// Get the protection factors
bool ProtectionFactor(const VCMProtectionParameters* parameters);
// Get the max number of frames the FEC is allowed to be based on.
int MaxFramesFec() const;
// Turn off the FEC based on low bitrate and other factors.
bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
private:
int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
int64_t _lowRttNackMs; // Update the round-trip time
int64_t _highRttNackMs; //
int _maxFramesFec; // Input:
}; // - rtt : Round-trip time in seconds.
void UpdateRtt(int64_t rtt);
class VCMLossProtectionLogic // Update the filtered packet loss.
{ //
public: // Input:
VCMLossProtectionLogic(int64_t nowMs); // - packetLossEnc : The reported packet loss filtered
~VCMLossProtectionLogic(); // (max window or average)
void UpdateFilteredLossPr(uint8_t packetLossEnc);
// Set the protection method to be used // Update the current target bit rate.
// //
// Input: // Input:
// - newMethodType : New requested protection method type. If one // - bitRate : The current target bit rate in kbits/s
// is already set, it will be deleted and replaced void UpdateBitRate(float bitRate);
void SetMethod(VCMProtectionMethodEnum newMethodType);
// Update the round-trip time // Update the number of packets per frame estimate, for delta frames
// //
// Input: // Input:
// - rtt : Round-trip time in seconds. // - nPackets : Number of packets in the latest sent frame.
void UpdateRtt(int64_t rtt); void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
// Update the filtered packet loss. // Update the number of packets per frame estimate, for key frames
// //
// Input: // Input:
// - packetLossEnc : The reported packet loss filtered // - nPackets : umber of packets in the latest sent frame.
// (max window or average) void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
void UpdateFilteredLossPr(uint8_t packetLossEnc);
// Update the current target bit rate. // Update the keyFrameSize estimate
// //
// Input: // Input:
// - bitRate : The current target bit rate in kbits/s // - keyFrameSize : The size of the latest sent key frame.
void UpdateBitRate(float bitRate); void UpdateKeyFrameSize(float keyFrameSize);
// Update the number of packets per frame estimate, for delta frames // Update the frame rate
// //
// Input: // Input:
// - nPackets : Number of packets in the latest sent frame. // - frameRate : The current target frame rate.
void UpdatePacketsPerFrame(float nPackets, int64_t nowMs); void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
// Update the number of packets per frame estimate, for key frames // Update the frame size
// //
// Input: // Input:
// - nPackets : umber of packets in the latest sent frame. // - width : The codec frame width.
void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs); // - height : The codec frame height.
void UpdateFrameSize(uint16_t width, uint16_t height);
// Update the keyFrameSize estimate // Update the number of active layers
// //
// Input: // Input:
// - keyFrameSize : The size of the latest sent key frame. // - numLayers : Number of layers used.
void UpdateKeyFrameSize(float keyFrameSize); void UpdateNumLayers(int numLayers);
// Update the frame rate // The amount of packet loss to cover for with FEC.
// //
// Input: // Input:
// - frameRate : The current target frame rate. // - fecRateKey : Packet loss to cover for with FEC when
void UpdateFrameRate(float frameRate) { _frameRate = frameRate; } // sending key frames.
// - fecRateDelta : Packet loss to cover for with FEC when
// sending delta frames.
void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
_fecRateKey = fecRateKey;
_fecRateDelta = fecRateDelta;
}
// Update the frame size // Update the protection methods with the current VCMProtectionParameters
// // and set the requested protection settings.
// Input: // Return value : Returns true on update
// - width : The codec frame width. bool UpdateMethod();
// - height : The codec frame height.
void UpdateFrameSize(uint16_t width, uint16_t height);
// Update the number of active layers // Returns the method currently selected.
// //
// Input: // Return value : The protection method currently selected.
// - numLayers : Number of layers used. VCMProtectionMethod* SelectedMethod() const;
void UpdateNumLayers(int numLayers);
// The amount of packet loss to cover for with FEC. // Return the protection type of the currently selected method
// VCMProtectionMethodEnum SelectedType() const;
// Input:
// - fecRateKey : Packet loss to cover for with FEC when
// sending key frames.
// - fecRateDelta : Packet loss to cover for with FEC when
// sending delta frames.
void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta)
{ _fecRateKey = fecRateKey;
_fecRateDelta = fecRateDelta; }
// Update the protection methods with the current VCMProtectionParameters // Updates the filtered loss for the average and max window packet loss,
// and set the requested protection settings. // and returns the filtered loss probability in the interval [0, 255].
// Return value : Returns true on update // The returned filtered loss value depends on the parameter |filter_mode|.
bool UpdateMethod(); // The input parameter |lossPr255| is the received packet loss.
// Returns the method currently selected. // Return value : The filtered loss probability
// uint8_t FilteredLoss(int64_t nowMs,
// Return value : The protection method currently selected. FilterPacketLossMode filter_mode,
VCMProtectionMethod* SelectedMethod() const; uint8_t lossPr255);
// Return the protection type of the currently selected method void Reset(int64_t nowMs);
VCMProtectionMethodEnum SelectedType() const;
// Updates the filtered loss for the average and max window packet loss, void Release();
// and returns the filtered loss probability in the interval [0, 255].
// The returned filtered loss value depends on the parameter |filter_mode|.
// The input parameter |lossPr255| is the received packet loss.
// Return value : The filtered loss probability private:
uint8_t FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode, // Sets the available loss protection methods.
uint8_t lossPr255); void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
uint8_t MaxFilteredLossPr(int64_t nowMs) const;
void Reset(int64_t nowMs); rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
VCMProtectionParameters _currentParameters;
void Release(); int64_t _rtt;
float _lossPr;
private: float _bitRate;
// Sets the available loss protection methods. float _frameRate;
void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now); float _keyFrameSize;
uint8_t MaxFilteredLossPr(int64_t nowMs) const; uint8_t _fecRateKey;
rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod; uint8_t _fecRateDelta;
VCMProtectionParameters _currentParameters; int64_t _lastPrUpdateT;
int64_t _rtt; int64_t _lastPacketPerFrameUpdateT;
float _lossPr; int64_t _lastPacketPerFrameUpdateTKey;
float _bitRate; rtc::ExpFilter _lossPr255;
float _frameRate; VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
float _keyFrameSize; uint8_t _shortMaxLossPr255;
uint8_t _fecRateKey; rtc::ExpFilter _packetsPerFrame;
uint8_t _fecRateDelta; rtc::ExpFilter _packetsPerFrameKey;
int64_t _lastPrUpdateT; uint16_t _codecWidth;
int64_t _lastPacketPerFrameUpdateT; uint16_t _codecHeight;
int64_t _lastPacketPerFrameUpdateTKey; int _numLayers;
rtc::ExpFilter _lossPr255;
VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
uint8_t _shortMaxLossPr255;
rtc::ExpFilter _packetsPerFrame;
rtc::ExpFilter _packetsPerFrameKey;
uint16_t _codecWidth;
uint16_t _codecHeight;
int _numLayers;
}; };
} // namespace media_optimization } // namespace media_optimization
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_

View File

@ -53,11 +53,9 @@ void UpdateProtectionCallback(
key_fec_params.fec_mask_type = kFecMaskRandom; key_fec_params.fec_mask_type = kFecMaskRandom;
// TODO(Marco): Pass FEC protection values per layer. // TODO(Marco): Pass FEC protection values per layer.
video_protection_callback->ProtectionRequest(&delta_fec_params, video_protection_callback->ProtectionRequest(
&key_fec_params, &delta_fec_params, &key_fec_params, video_rate_bps,
video_rate_bps, nack_overhead_rate_bps, fec_overhead_rate_bps);
nack_overhead_rate_bps,
fec_overhead_rate_bps);
} }
} // namespace } // namespace
@ -115,8 +113,8 @@ MediaOptimization::~MediaOptimization(void) {
void MediaOptimization::Reset() { void MediaOptimization::Reset() {
CriticalSectionScoped lock(crit_sect_.get()); CriticalSectionScoped lock(crit_sect_.get());
SetEncodingDataInternal( SetEncodingDataInternal(kVideoCodecUnknown, 0, 0, 0, 0, 0, 0,
kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_); max_payload_size_);
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_)); memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
incoming_frame_rate_ = 0.0; incoming_frame_rate_ = 0.0;
frame_dropper_->Reset(); frame_dropper_->Reset();
@ -149,14 +147,8 @@ void MediaOptimization::SetEncodingData(VideoCodecType send_codec_type,
int num_layers, int num_layers,
int32_t mtu) { int32_t mtu) {
CriticalSectionScoped lock(crit_sect_.get()); CriticalSectionScoped lock(crit_sect_.get());
SetEncodingDataInternal(send_codec_type, SetEncodingDataInternal(send_codec_type, max_bit_rate, frame_rate,
max_bit_rate, target_bitrate, width, height, num_layers, mtu);
frame_rate,
target_bitrate,
width,
height,
num_layers,
mtu);
} }
void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type, void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
@ -190,11 +182,8 @@ void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
codec_height_ = height; codec_height_ = height;
num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero. num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero.
max_payload_size_ = mtu; max_payload_size_ = mtu;
qm_resolution_->Initialize(target_bitrate_kbps, qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
user_frame_rate_, codec_width_, codec_height_, num_layers_);
codec_width_,
codec_height_,
num_layers_);
} }
uint32_t MediaOptimization::SetTargetRates( uint32_t MediaOptimization::SetTargetRates(
@ -256,10 +245,8 @@ uint32_t MediaOptimization::SetTargetRates(
// overhead data actually transmitted (including headers) the last // overhead data actually transmitted (including headers) the last
// second. // second.
if (protection_callback) { if (protection_callback) {
UpdateProtectionCallback(selected_method, UpdateProtectionCallback(selected_method, &sent_video_rate_bps,
&sent_video_rate_bps, &sent_nack_rate_bps, &sent_fec_rate_bps,
&sent_nack_rate_bps,
&sent_fec_rate_bps,
protection_callback); protection_callback);
} }
uint32_t sent_total_rate_bps = uint32_t sent_total_rate_bps =
@ -296,10 +283,8 @@ uint32_t MediaOptimization::SetTargetRates(
if (enable_qm_ && qmsettings_callback) { if (enable_qm_ && qmsettings_callback) {
// Update QM with rates. // Update QM with rates.
qm_resolution_->UpdateRates(target_video_bitrate_kbps, qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
sent_video_rate_kbps, incoming_frame_rate_, fraction_lost_);
incoming_frame_rate_,
fraction_lost_);
// Check for QM selection. // Check for QM selection.
bool select_qm = CheckStatusForQMchange(); bool select_qm = CheckStatusForQMchange();
if (select_qm) { if (select_qm) {
@ -514,8 +499,7 @@ void MediaOptimization::UpdateSentBitrate(int64_t now_ms) {
} }
size_t framesize_sum = 0; size_t framesize_sum = 0;
for (FrameSampleList::iterator it = encoded_frame_samples_.begin(); for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
it != encoded_frame_samples_.end(); it != encoded_frame_samples_.end(); ++it) {
++it) {
framesize_sum += it->size_bytes; framesize_sum += it->size_bytes;
} }
float denom = static_cast<float>( float denom = static_cast<float>(
@ -565,7 +549,8 @@ bool MediaOptimization::QMUpdate(
} }
LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed " LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
"to " << qm->codec_width << "x" << qm->codec_height << "@" "to "
<< qm->codec_width << "x" << qm->codec_height << "@"
<< qm->frame_rate; << qm->frame_rate;
// Update VPM with new target frame rate and frame size. // Update VPM with new target frame rate and frame size.
@ -574,11 +559,11 @@ bool MediaOptimization::QMUpdate(
// will vary/fluctuate, and since we don't want to change the state of the // will vary/fluctuate, and since we don't want to change the state of the
// VPM frame dropper, unless a temporal action was selected, we use the // VPM frame dropper, unless a temporal action was selected, we use the
// quantity |qm->frame_rate| for updating. // quantity |qm->frame_rate| for updating.
video_qmsettings_callback->SetVideoQMSettings( video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
qm->frame_rate, codec_width_, codec_height_); codec_height_);
content_->UpdateFrameRate(qm->frame_rate); content_->UpdateFrameRate(qm->frame_rate);
qm_resolution_->UpdateCodecParameters( qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
qm->frame_rate, codec_width_, codec_height_); codec_height_);
return true; return true;
} }

View File

@ -85,15 +85,9 @@ class MediaOptimization {
uint32_t SentBitRate(); uint32_t SentBitRate();
private: private:
enum { enum { kFrameCountHistorySize = 90 };
kFrameCountHistorySize = 90 enum { kFrameHistoryWinMs = 2000 };
}; enum { kBitrateAverageWinMs = 1000 };
enum {
kFrameHistoryWinMs = 2000
};
enum {
kBitrateAverageWinMs = 1000
};
struct EncodedFrameSample; struct EncodedFrameSample;
typedef std::list<EncodedFrameSample> FrameSampleList; typedef std::list<EncodedFrameSample> FrameSampleList;

View File

@ -51,7 +51,6 @@ class TestMediaOptimization : public ::testing::Test {
uint32_t next_timestamp_; uint32_t next_timestamp_;
}; };
TEST_F(TestMediaOptimization, VerifyMuting) { TEST_F(TestMediaOptimization, VerifyMuting) {
// Enable video suspension with these limits. // Enable video suspension with these limits.
// Suspend the video when the rate is below 50 kbps and resume when it gets // Suspend the video when the rate is below 50 kbps and resume when it gets

View File

@ -11,116 +11,21 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_ #ifndef WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
#define WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_ #define WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
namespace webrtc namespace webrtc {
{
// Table for adjusting FEC rate for NACK/FEC protection method // Table for adjusting FEC rate for NACK/FEC protection method
// Table values are built as a sigmoid function, ranging from 0 to 100, based on // Table values are built as a sigmoid function, ranging from 0 to 100, based on
// the HybridNackTH values defined in media_opt_util.h. // the HybridNackTH values defined in media_opt_util.h.
const uint16_t VCMNackFecTable[100] = { const uint16_t VCMNackFecTable[100] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
0, 1, 2, 2, 2, 3, 3, 4, 5, 6, 7, 9, 10, 12, 15, 18,
0, 21, 24, 28, 32, 37, 41, 46, 51, 56, 61, 66, 70, 74, 78, 81,
0, 84, 86, 89, 90, 92, 93, 95, 95, 96, 97, 97, 98, 98, 99, 99,
0, 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
0, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
0, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
0,
0,
0,
0,
1,
1,
1,
1,
1,
2,
2,
2,
3,
3,
4,
5,
6,
7,
9,
10,
12,
15,
18,
21,
24,
28,
32,
37,
41,
46,
51,
56,
61,
66,
70,
74,
78,
81,
84,
86,
89,
90,
92,
93,
95,
95,
96,
97,
97,
98,
98,
99,
99,
99,
99,
99,
99,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
100,
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_

View File

@ -8,11 +8,12 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/packet.h" #include "webrtc/modules/video_coding/packet.h"
#include <assert.h> #include <assert.h>
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc { namespace webrtc {
VCMPacket::VCMPacket() VCMPacket::VCMPacket()
@ -34,49 +35,47 @@ VCMPacket::VCMPacket()
VCMPacket::VCMPacket(const uint8_t* ptr, VCMPacket::VCMPacket(const uint8_t* ptr,
const size_t size, const size_t size,
const WebRtcRTPHeader& rtpHeader) : const WebRtcRTPHeader& rtpHeader)
payloadType(rtpHeader.header.payloadType), : payloadType(rtpHeader.header.payloadType),
timestamp(rtpHeader.header.timestamp), timestamp(rtpHeader.header.timestamp),
ntp_time_ms_(rtpHeader.ntp_time_ms), ntp_time_ms_(rtpHeader.ntp_time_ms),
seqNum(rtpHeader.header.sequenceNumber), seqNum(rtpHeader.header.sequenceNumber),
dataPtr(ptr), dataPtr(ptr),
sizeBytes(size), sizeBytes(size),
markerBit(rtpHeader.header.markerBit), markerBit(rtpHeader.header.markerBit),
frameType(rtpHeader.frameType), frameType(rtpHeader.frameType),
codec(kVideoCodecUnknown), codec(kVideoCodecUnknown),
isFirstPacket(rtpHeader.type.Video.isFirstPacket), isFirstPacket(rtpHeader.type.Video.isFirstPacket),
completeNALU(kNaluComplete), completeNALU(kNaluComplete),
insertStartCode(false), insertStartCode(false),
width(rtpHeader.type.Video.width), width(rtpHeader.type.Video.width),
height(rtpHeader.type.Video.height), height(rtpHeader.type.Video.height),
codecSpecificHeader(rtpHeader.type.Video) codecSpecificHeader(rtpHeader.type.Video) {
{ CopyCodecSpecifics(rtpHeader.type.Video);
CopyCodecSpecifics(rtpHeader.type.Video);
} }
VCMPacket::VCMPacket(const uint8_t* ptr, VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size, size_t size,
uint16_t seq, uint16_t seq,
uint32_t ts, uint32_t ts,
bool mBit) : bool mBit)
payloadType(0), : payloadType(0),
timestamp(ts), timestamp(ts),
ntp_time_ms_(0), ntp_time_ms_(0),
seqNum(seq), seqNum(seq),
dataPtr(ptr), dataPtr(ptr),
sizeBytes(size), sizeBytes(size),
markerBit(mBit), markerBit(mBit),
frameType(kVideoFrameDelta), frameType(kVideoFrameDelta),
codec(kVideoCodecUnknown), codec(kVideoCodecUnknown),
isFirstPacket(false), isFirstPacket(false),
completeNALU(kNaluComplete), completeNALU(kNaluComplete),
insertStartCode(false), insertStartCode(false),
width(0), width(0),
height(0), height(0),
codecSpecificHeader() codecSpecificHeader() {}
{}
void VCMPacket::Reset() { void VCMPacket::Reset() {
payloadType = 0; payloadType = 0;

View File

@ -18,42 +18,42 @@
namespace webrtc { namespace webrtc {
class VCMPacket { class VCMPacket {
public: public:
VCMPacket(); VCMPacket();
VCMPacket(const uint8_t* ptr, VCMPacket(const uint8_t* ptr,
const size_t size, const size_t size,
const WebRtcRTPHeader& rtpHeader); const WebRtcRTPHeader& rtpHeader);
VCMPacket(const uint8_t* ptr, VCMPacket(const uint8_t* ptr,
size_t size, size_t size,
uint16_t seqNum, uint16_t seqNum,
uint32_t timestamp, uint32_t timestamp,
bool markerBit); bool markerBit);
void Reset(); void Reset();
uint8_t payloadType; uint8_t payloadType;
uint32_t timestamp; uint32_t timestamp;
// NTP time of the capture time in local timebase in milliseconds. // NTP time of the capture time in local timebase in milliseconds.
int64_t ntp_time_ms_; int64_t ntp_time_ms_;
uint16_t seqNum; uint16_t seqNum;
const uint8_t* dataPtr; const uint8_t* dataPtr;
size_t sizeBytes; size_t sizeBytes;
bool markerBit; bool markerBit;
FrameType frameType; FrameType frameType;
VideoCodecType codec; VideoCodecType codec;
bool isFirstPacket; // Is this first packet in a frame. bool isFirstPacket; // Is this first packet in a frame.
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete. VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
bool insertStartCode; // True if a start code should be inserted before this bool insertStartCode; // True if a start code should be inserted before this
// packet. // packet.
int width; int width;
int height; int height;
RTPVideoHeader codecSpecificHeader; RTPVideoHeader codecSpecificHeader;
protected: protected:
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader); void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_

View File

@ -36,8 +36,7 @@ VCMQmMethod::VCMQmMethod()
ResetQM(); ResetQM();
} }
VCMQmMethod::~VCMQmMethod() { VCMQmMethod::~VCMQmMethod() {}
}
void VCMQmMethod::ResetQM() { void VCMQmMethod::ResetQM() {
aspect_ratio_ = 1.0f; aspect_ratio_ = 1.0f;
@ -52,7 +51,7 @@ uint8_t VCMQmMethod::ComputeContentClass() {
return content_class_ = 3 * motion_.level + spatial_.level; return content_class_ = 3 * motion_.level + spatial_.level;
} }
void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) { void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
content_metrics_ = contentMetrics; content_metrics_ = contentMetrics;
} }
@ -64,7 +63,7 @@ void VCMQmMethod::ComputeMotionNFD() {
if (motion_.value < kLowMotionNfd) { if (motion_.value < kLowMotionNfd) {
motion_.level = kLow; motion_.level = kLow;
} else if (motion_.value > kHighMotionNfd) { } else if (motion_.value > kHighMotionNfd) {
motion_.level = kHigh; motion_.level = kHigh;
} else { } else {
motion_.level = kDefault; motion_.level = kDefault;
} }
@ -75,7 +74,7 @@ void VCMQmMethod::ComputeSpatial() {
float spatial_err_h = 0.0; float spatial_err_h = 0.0;
float spatial_err_v = 0.0; float spatial_err_v = 0.0;
if (content_metrics_) { if (content_metrics_) {
spatial_err = content_metrics_->spatial_pred_err; spatial_err = content_metrics_->spatial_pred_err;
spatial_err_h = content_metrics_->spatial_pred_err_h; spatial_err_h = content_metrics_->spatial_pred_err_h;
spatial_err_v = content_metrics_->spatial_pred_err_v; spatial_err_v = content_metrics_->spatial_pred_err_v;
} }
@ -94,8 +93,7 @@ void VCMQmMethod::ComputeSpatial() {
} }
} }
ImageType VCMQmMethod::GetImageType(uint16_t width, ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
uint16_t height) {
// Get the image type for the encoder frame size. // Get the image type for the encoder frame size.
uint32_t image_size = width * height; uint32_t image_size = width * height;
if (image_size == kSizeOfImageType[kQCIF]) { if (image_size == kSizeOfImageType[kQCIF]) {
@ -142,7 +140,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
} else if (avg_framerate <= kMiddleFrameRate) { } else if (avg_framerate <= kMiddleFrameRate) {
return kFrameRateMiddle1; return kFrameRateMiddle1;
} else if (avg_framerate <= kHighFrameRate) { } else if (avg_framerate <= kHighFrameRate) {
return kFrameRateMiddle2; return kFrameRateMiddle2;
} else { } else {
return kFrameRateHigh; return kFrameRateHigh;
} }
@ -150,8 +148,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
// RESOLUTION CLASS // RESOLUTION CLASS
VCMQmResolution::VCMQmResolution() VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
: qm_(new VCMResolutionScale()) {
Reset(); Reset();
} }
@ -174,7 +171,7 @@ void VCMQmResolution::ResetRates() {
void VCMQmResolution::ResetDownSamplingState() { void VCMQmResolution::ResetDownSamplingState() {
state_dec_factor_spatial_ = 1.0; state_dec_factor_spatial_ = 1.0;
state_dec_factor_temporal_ = 1.0; state_dec_factor_temporal_ = 1.0;
for (int i = 0; i < kDownActionHistorySize; i++) { for (int i = 0; i < kDownActionHistorySize; i++) {
down_action_history_[i].spatial = kNoChangeSpatial; down_action_history_[i].spatial = kNoChangeSpatial;
down_action_history_[i].temporal = kNoChangeTemporal; down_action_history_[i].temporal = kNoChangeTemporal;
@ -225,11 +222,12 @@ int VCMQmResolution::Initialize(float bitrate,
buffer_level_ = kInitBufferLevel * target_bitrate_; buffer_level_ = kInitBufferLevel * target_bitrate_;
// Per-frame bandwidth. // Per-frame bandwidth.
per_frame_bandwidth_ = target_bitrate_ / user_framerate; per_frame_bandwidth_ = target_bitrate_ / user_framerate;
init_ = true; init_ = true;
return VCM_OK; return VCM_OK;
} }
void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width, void VCMQmResolution::UpdateCodecParameters(float frame_rate,
uint16_t width,
uint16_t height) { uint16_t height) {
width_ = width; width_ = width;
height_ = height; height_ = height;
@ -283,12 +281,12 @@ void VCMQmResolution::UpdateRates(float target_bitrate,
// Update with the current new target and frame rate: // Update with the current new target and frame rate:
// these values are ones the encoder will use for the current/next ~1sec. // these values are ones the encoder will use for the current/next ~1sec.
target_bitrate_ = target_bitrate; target_bitrate_ = target_bitrate;
incoming_framerate_ = incoming_framerate; incoming_framerate_ = incoming_framerate;
sum_incoming_framerate_ += incoming_framerate_; sum_incoming_framerate_ += incoming_framerate_;
// Update the per_frame_bandwidth: // Update the per_frame_bandwidth:
// this is the per_frame_bw for the current/next ~1sec. // this is the per_frame_bw for the current/next ~1sec.
per_frame_bandwidth_ = 0.0f; per_frame_bandwidth_ = 0.0f;
if (incoming_framerate_ > 0.0f) { if (incoming_framerate_ > 0.0f) {
per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_; per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
} }
@ -313,7 +311,7 @@ int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
} }
if (content_metrics_ == NULL) { if (content_metrics_ == NULL) {
Reset(); Reset();
*qm = qm_; *qm = qm_;
return VCM_OK; return VCM_OK;
} }
@ -376,31 +374,31 @@ void VCMQmResolution::ComputeRatesForSelection() {
avg_rate_mismatch_sgn_ = 0.0f; avg_rate_mismatch_sgn_ = 0.0f;
avg_packet_loss_ = 0.0f; avg_packet_loss_ = 0.0f;
if (frame_cnt_ > 0) { if (frame_cnt_ > 0) {
avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) / avg_ratio_buffer_low_ =
static_cast<float>(frame_cnt_); static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
} }
if (update_rate_cnt_ > 0) { if (update_rate_cnt_ > 0) {
avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) / avg_rate_mismatch_ =
static_cast<float>(update_rate_cnt_); static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) / avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
static_cast<float>(update_rate_cnt_); static_cast<float>(update_rate_cnt_);
avg_target_rate_ = static_cast<float>(sum_target_rate_) / avg_target_rate_ = static_cast<float>(sum_target_rate_) /
static_cast<float>(update_rate_cnt_); static_cast<float>(update_rate_cnt_);
avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) / avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
static_cast<float>(update_rate_cnt_); static_cast<float>(update_rate_cnt_);
avg_packet_loss_ = static_cast<float>(sum_packet_loss_) / avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
static_cast<float>(update_rate_cnt_); static_cast<float>(update_rate_cnt_);
} }
// For selection we may want to weight some quantities more heavily // For selection we may want to weight some quantities more heavily
// with the current (i.e., next ~1sec) rate values. // with the current (i.e., next ~1sec) rate values.
avg_target_rate_ = kWeightRate * avg_target_rate_ + avg_target_rate_ =
(1.0 - kWeightRate) * target_bitrate_; kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ + avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
(1.0 - kWeightRate) * incoming_framerate_; (1.0 - kWeightRate) * incoming_framerate_;
// Use base layer frame rate for temporal layers: this will favor spatial. // Use base layer frame rate for temporal layers: this will favor spatial.
assert(num_layers_ > 0); assert(num_layers_ > 0);
framerate_level_ = FrameRateLevel( framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1))); static_cast<float>(1 << (num_layers_ - 1)));
} }
void VCMQmResolution::ComputeEncoderState() { void VCMQmResolution::ComputeEncoderState() {
@ -412,7 +410,7 @@ void VCMQmResolution::ComputeEncoderState() {
// 2) rate mis-match is high, and consistent over-shooting by encoder. // 2) rate mis-match is high, and consistent over-shooting by encoder.
if ((avg_ratio_buffer_low_ > kMaxBufferLow) || if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
((avg_rate_mismatch_ > kMaxRateMisMatch) && ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
(avg_rate_mismatch_sgn_ < -kRateOverShoot))) { (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
encoder_state_ = kStressedEncoding; encoder_state_ = kStressedEncoding;
} }
// Assign easy state if: // Assign easy state if:
@ -435,9 +433,9 @@ bool VCMQmResolution::GoingUpResolution() {
// Modify the fac_width/height for this case. // Modify the fac_width/height for this case.
if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) { if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] / fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
kFactorWidthSpatial[kOneHalfSpatialUniform]; kFactorWidthSpatial[kOneHalfSpatialUniform];
fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] / fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
kFactorHeightSpatial[kOneHalfSpatialUniform]; kFactorHeightSpatial[kOneHalfSpatialUniform];
} }
// Check if we should go up both spatially and temporally. // Check if we should go up both spatially and temporally.
@ -459,8 +457,8 @@ bool VCMQmResolution::GoingUpResolution() {
kTransRateScaleUpSpatial); kTransRateScaleUpSpatial);
} }
if (down_action_history_[0].temporal != kNoChangeTemporal) { if (down_action_history_[0].temporal != kNoChangeTemporal) {
selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp, selected_up_temporal =
kTransRateScaleUpTemp); ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
} }
if (selected_up_spatial && !selected_up_temporal) { if (selected_up_spatial && !selected_up_temporal) {
action_.spatial = down_action_history_[0].spatial; action_.spatial = down_action_history_[0].spatial;
@ -484,13 +482,13 @@ bool VCMQmResolution::ConditionForGoingUp(float fac_width,
float fac_height, float fac_height,
float fac_temp, float fac_temp,
float scale_fac) { float scale_fac) {
float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height, float estimated_transition_rate_up =
fac_temp, scale_fac); GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
// Go back up if: // Go back up if:
// 1) target rate is above threshold and current encoder state is stable, or // 1) target rate is above threshold and current encoder state is stable, or
// 2) encoder state is easy (encoder is significantly under-shooting target). // 2) encoder state is easy (encoder is significantly under-shooting target).
if (((avg_target_rate_ > estimated_transition_rate_up) && if (((avg_target_rate_ > estimated_transition_rate_up) &&
(encoder_state_ == kStableEncoding)) || (encoder_state_ == kStableEncoding)) ||
(encoder_state_ == kEasyEncoding)) { (encoder_state_ == kEasyEncoding)) {
return true; return true;
} else { } else {
@ -505,7 +503,7 @@ bool VCMQmResolution::GoingDownResolution() {
// Resolution reduction if: // Resolution reduction if:
// (1) target rate is below transition rate, or // (1) target rate is below transition rate, or
// (2) encoder is in stressed state and target rate below a max threshold. // (2) encoder is in stressed state and target rate below a max threshold.
if ((avg_target_rate_ < estimated_transition_rate_down ) || if ((avg_target_rate_ < estimated_transition_rate_down) ||
(encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) { (encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
// Get the down-sampling action: based on content class, and how low // Get the down-sampling action: based on content class, and how low
// average target rate is relative to transition rate. // average target rate is relative to transition rate.
@ -529,9 +527,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.spatial = kNoChangeSpatial; action_.spatial = kNoChangeSpatial;
break; break;
} }
default: { default: { assert(false); }
assert(false);
}
} }
switch (temp_fact) { switch (temp_fact) {
case 3: { case 3: {
@ -546,9 +542,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.temporal = kNoChangeTemporal; action_.temporal = kNoChangeTemporal;
break; break;
} }
default: { default: { assert(false); }
assert(false);
}
} }
// Only allow for one action (spatial or temporal) at a given time. // Only allow for one action (spatial or temporal) at a given time.
assert(action_.temporal == kNoChangeTemporal || assert(action_.temporal == kNoChangeTemporal ||
@ -572,9 +566,9 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
float fac_height, float fac_height,
float fac_temp, float fac_temp,
float scale_fac) { float scale_fac) {
ImageType image_type = GetImageType( ImageType image_type =
static_cast<uint16_t>(fac_width * width_), GetImageType(static_cast<uint16_t>(fac_width * width_),
static_cast<uint16_t>(fac_height * height_)); static_cast<uint16_t>(fac_height * height_));
FrameRateLevelClass framerate_level = FrameRateLevelClass framerate_level =
FrameRateLevel(fac_temp * avg_incoming_framerate_); FrameRateLevel(fac_temp * avg_incoming_framerate_);
@ -589,13 +583,13 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
// Nominal values based on image format (frame size and frame rate). // Nominal values based on image format (frame size and frame rate).
float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type]; float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
uint8_t image_class = image_type > kVGA ? 1: 0; uint8_t image_class = image_type > kVGA ? 1 : 0;
uint8_t table_index = image_class * 9 + content_class_; uint8_t table_index = image_class * 9 + content_class_;
// Scale factor for down-sampling transition threshold: // Scale factor for down-sampling transition threshold:
// factor based on the content class and the image size. // factor based on the content class and the image size.
float scaleTransRate = kScaleTransRateQm[table_index]; float scaleTransRate = kScaleTransRateQm[table_index];
// Threshold bitrate for resolution action. // Threshold bitrate for resolution action.
return static_cast<float> (scale_fac * scaleTransRate * max_rate); return static_cast<float>(scale_fac * scaleTransRate * max_rate);
} }
void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) { void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
@ -605,9 +599,9 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
// If last spatial action was 1/2x1/2, we undo it in two steps, so the // If last spatial action was 1/2x1/2, we undo it in two steps, so the
// spatial scale factor in this first step is modified as (4.0/3.0 / 2.0). // spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
if (action_.spatial == kOneQuarterSpatialUniform) { if (action_.spatial == kOneQuarterSpatialUniform) {
qm_->spatial_width_fact = qm_->spatial_width_fact = 1.0f *
1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] / kFactorWidthSpatial[kOneHalfSpatialUniform] /
kFactorWidthSpatial[kOneQuarterSpatialUniform]; kFactorWidthSpatial[kOneQuarterSpatialUniform];
qm_->spatial_height_fact = qm_->spatial_height_fact =
1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] / 1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
kFactorHeightSpatial[kOneQuarterSpatialUniform]; kFactorHeightSpatial[kOneQuarterSpatialUniform];
@ -628,17 +622,18 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
} }
UpdateCodecResolution(); UpdateCodecResolution();
state_dec_factor_spatial_ = state_dec_factor_spatial_ * state_dec_factor_spatial_ = state_dec_factor_spatial_ *
qm_->spatial_width_fact * qm_->spatial_height_fact; qm_->spatial_width_fact *
qm_->spatial_height_fact;
state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact; state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
} }
void VCMQmResolution::UpdateCodecResolution() { void VCMQmResolution::UpdateCodecResolution() {
if (action_.spatial != kNoChangeSpatial) { if (action_.spatial != kNoChangeSpatial) {
qm_->change_resolution_spatial = true; qm_->change_resolution_spatial = true;
qm_->codec_width = static_cast<uint16_t>(width_ / qm_->codec_width =
qm_->spatial_width_fact + 0.5f); static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
qm_->codec_height = static_cast<uint16_t>(height_ / qm_->codec_height =
qm_->spatial_height_fact + 0.5f); static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
// Size should not exceed native sizes. // Size should not exceed native sizes.
assert(qm_->codec_width <= native_width_); assert(qm_->codec_width <= native_width_);
assert(qm_->codec_height <= native_height_); assert(qm_->codec_height <= native_height_);
@ -662,8 +657,9 @@ void VCMQmResolution::UpdateCodecResolution() {
} }
uint8_t VCMQmResolution::RateClass(float transition_rate) { uint8_t VCMQmResolution::RateClass(float transition_rate) {
return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0: return avg_target_rate_ < (kFacLowRate * transition_rate)
(avg_target_rate_ >= transition_rate ? 2 : 1); ? 0
: (avg_target_rate_ >= transition_rate ? 2 : 1);
} }
// TODO(marpan): Would be better to capture these frame rate adjustments by // TODO(marpan): Would be better to capture these frame rate adjustments by
@ -698,15 +694,14 @@ void VCMQmResolution::AdjustAction() {
} }
// Never use temporal action if number of temporal layers is above 2. // Never use temporal action if number of temporal layers is above 2.
if (num_layers_ > 2) { if (num_layers_ > 2) {
if (action_.temporal != kNoChangeTemporal) { if (action_.temporal != kNoChangeTemporal) {
action_.spatial = kOneHalfSpatialUniform; action_.spatial = kOneHalfSpatialUniform;
} }
action_.temporal = kNoChangeTemporal; action_.temporal = kNoChangeTemporal;
} }
// If spatial action was selected, we need to make sure the frame sizes // If spatial action was selected, we need to make sure the frame sizes
// are multiples of two. Otherwise switch to 2/3 temporal. // are multiples of two. Otherwise switch to 2/3 temporal.
if (action_.spatial != kNoChangeSpatial && if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
!EvenFrameSize()) {
action_.spatial = kNoChangeSpatial; action_.spatial = kNoChangeSpatial;
// Only one action (spatial or temporal) is allowed at a given time, so need // Only one action (spatial or temporal) is allowed at a given time, so need
// to check whether temporal action is currently selected. // to check whether temporal action is currently selected.
@ -722,35 +717,36 @@ void VCMQmResolution::ConvertSpatialFractionalToWhole() {
bool found = false; bool found = false;
int isel = kDownActionHistorySize; int isel = kDownActionHistorySize;
for (int i = 0; i < kDownActionHistorySize; ++i) { for (int i = 0; i < kDownActionHistorySize; ++i) {
if (down_action_history_[i].spatial == kOneHalfSpatialUniform) { if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
isel = i; isel = i;
found = true; found = true;
break; break;
} }
} }
if (found) { if (found) {
action_.spatial = kOneQuarterSpatialUniform; action_.spatial = kOneQuarterSpatialUniform;
state_dec_factor_spatial_ = state_dec_factor_spatial_ / state_dec_factor_spatial_ =
(kFactorWidthSpatial[kOneHalfSpatialUniform] * state_dec_factor_spatial_ /
kFactorHeightSpatial[kOneHalfSpatialUniform]); (kFactorWidthSpatial[kOneHalfSpatialUniform] *
// Check if switching to 1/2x1/2 (=1/4) spatial is allowed. kFactorHeightSpatial[kOneHalfSpatialUniform]);
ConstrainAmountOfDownSampling(); // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
if (action_.spatial == kNoChangeSpatial) { ConstrainAmountOfDownSampling();
// Not allowed. Go back to 3/4x3/4 spatial. if (action_.spatial == kNoChangeSpatial) {
action_.spatial = kOneHalfSpatialUniform; // Not allowed. Go back to 3/4x3/4 spatial.
state_dec_factor_spatial_ = state_dec_factor_spatial_ * action_.spatial = kOneHalfSpatialUniform;
kFactorWidthSpatial[kOneHalfSpatialUniform] * state_dec_factor_spatial_ =
kFactorHeightSpatial[kOneHalfSpatialUniform]; state_dec_factor_spatial_ *
} else { kFactorWidthSpatial[kOneHalfSpatialUniform] *
// Switching is allowed. Remove 3/4x3/4 from the history, and update kFactorHeightSpatial[kOneHalfSpatialUniform];
// the frame size. } else {
for (int i = isel; i < kDownActionHistorySize - 1; ++i) { // Switching is allowed. Remove 3/4x3/4 from the history, and update
down_action_history_[i].spatial = // the frame size.
down_action_history_[i + 1].spatial; for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
} down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform]; }
height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform]; width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
} height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
}
} }
} }
} }
@ -815,8 +811,8 @@ void VCMQmResolution::ConstrainAmountOfDownSampling() {
float spatial_width_fact = kFactorWidthSpatial[action_.spatial]; float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
float spatial_height_fact = kFactorHeightSpatial[action_.spatial]; float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
float temporal_fact = kFactorTemporal[action_.temporal]; float temporal_fact = kFactorTemporal[action_.temporal];
float new_dec_factor_spatial = state_dec_factor_spatial_ * float new_dec_factor_spatial =
spatial_width_fact * spatial_height_fact; state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact; float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
// No spatial sampling if current frame size is too small, or if the // No spatial sampling if current frame size is too small, or if the
@ -908,8 +904,7 @@ VCMQmRobustness::VCMQmRobustness() {
Reset(); Reset();
} }
VCMQmRobustness::~VCMQmRobustness() { VCMQmRobustness::~VCMQmRobustness() {}
}
void VCMQmRobustness::Reset() { void VCMQmRobustness::Reset() {
prev_total_rate_ = 0.0f; prev_total_rate_ = 0.0f;
@ -928,7 +923,7 @@ float VCMQmRobustness::AdjustFecFactor(uint8_t code_rate_delta,
int64_t rtt_time, int64_t rtt_time,
uint8_t packet_loss) { uint8_t packet_loss) {
// Default: no adjustment // Default: no adjustment
float adjust_fec = 1.0f; float adjust_fec = 1.0f;
if (content_metrics_ == NULL) { if (content_metrics_ == NULL) {
return adjust_fec; return adjust_fec;
} }
@ -955,4 +950,4 @@ bool VCMQmRobustness::SetUepProtection(uint8_t code_rate_delta,
// Default. // Default.
return false; return false;
} }
} // namespace } // namespace webrtc

View File

@ -30,8 +30,7 @@ struct VCMResolutionScale {
spatial_height_fact(1.0f), spatial_height_fact(1.0f),
temporal_fact(1.0f), temporal_fact(1.0f),
change_resolution_spatial(false), change_resolution_spatial(false),
change_resolution_temporal(false) { change_resolution_temporal(false) {}
}
uint16_t codec_width; uint16_t codec_width;
uint16_t codec_height; uint16_t codec_height;
float frame_rate; float frame_rate;
@ -43,20 +42,20 @@ struct VCMResolutionScale {
}; };
enum ImageType { enum ImageType {
kQCIF = 0, // 176x144 kQCIF = 0, // 176x144
kHCIF, // 264x216 = half(~3/4x3/4) CIF. kHCIF, // 264x216 = half(~3/4x3/4) CIF.
kQVGA, // 320x240 = quarter VGA. kQVGA, // 320x240 = quarter VGA.
kCIF, // 352x288 kCIF, // 352x288
kHVGA, // 480x360 = half(~3/4x3/4) VGA. kHVGA, // 480x360 = half(~3/4x3/4) VGA.
kVGA, // 640x480 kVGA, // 640x480
kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD. kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
kWHD, // 1280x720 kWHD, // 1280x720
kFULLHD, // 1920x1080 kFULLHD, // 1920x1080
kNumImageTypes kNumImageTypes
}; };
const uint32_t kSizeOfImageType[kNumImageTypes] = const uint32_t kSizeOfImageType[kNumImageTypes] = {
{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 }; 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
enum FrameRateLevelClass { enum FrameRateLevelClass {
kFrameRateLow, kFrameRateLow,
@ -65,17 +64,10 @@ enum FrameRateLevelClass {
kFrameRateHigh kFrameRateHigh
}; };
enum ContentLevelClass { enum ContentLevelClass { kLow, kHigh, kDefault };
kLow,
kHigh,
kDefault
};
struct VCMContFeature { struct VCMContFeature {
VCMContFeature() VCMContFeature() : value(0.0f), level(kDefault) {}
: value(0.0f),
level(kDefault) {
}
void Reset() { void Reset() {
value = 0.0f; value = 0.0f;
level = kDefault; level = kDefault;
@ -84,43 +76,34 @@ struct VCMContFeature {
ContentLevelClass level; ContentLevelClass level;
}; };
enum UpDownAction { enum UpDownAction { kUpResolution, kDownResolution };
kUpResolution,
kDownResolution
};
enum SpatialAction { enum SpatialAction {
kNoChangeSpatial, kNoChangeSpatial,
kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction. kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction. kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
kNumModesSpatial kNumModesSpatial
}; };
enum TemporalAction { enum TemporalAction {
kNoChangeTemporal, kNoChangeTemporal,
kTwoThirdsTemporal, // 2/3 frame rate reduction kTwoThirdsTemporal, // 2/3 frame rate reduction
kOneHalfTemporal, // 1/2 frame rate reduction kOneHalfTemporal, // 1/2 frame rate reduction
kNumModesTemporal kNumModesTemporal
}; };
struct ResolutionAction { struct ResolutionAction {
ResolutionAction() ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
: spatial(kNoChangeSpatial),
temporal(kNoChangeTemporal) {
}
SpatialAction spatial; SpatialAction spatial;
TemporalAction temporal; TemporalAction temporal;
}; };
// Down-sampling factors for spatial (width and height), and temporal. // Down-sampling factors for spatial (width and height), and temporal.
const float kFactorWidthSpatial[kNumModesSpatial] = const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
{ 1.0f, 4.0f / 3.0f, 2.0f };
const float kFactorHeightSpatial[kNumModesSpatial] = const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
{ 1.0f, 4.0f / 3.0f, 2.0f };
const float kFactorTemporal[kNumModesTemporal] = const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
{ 1.0f, 1.5f, 2.0f };
enum EncoderState { enum EncoderState {
kStableEncoding, // Low rate mis-match, stable buffer levels. kStableEncoding, // Low rate mis-match, stable buffer levels.
@ -297,7 +280,7 @@ class VCMQmResolution : public VCMQmMethod {
// Select the directional (1x2 or 2x1) spatial down-sampling action. // Select the directional (1x2 or 2x1) spatial down-sampling action.
void SelectSpatialDirectionMode(float transition_rate); void SelectSpatialDirectionMode(float transition_rate);
enum { kDownActionHistorySize = 10}; enum { kDownActionHistorySize = 10 };
VCMResolutionScale* qm_; VCMResolutionScale* qm_;
// Encoder rate control parameters. // Encoder rate control parameters.

View File

@ -69,36 +69,36 @@ const uint16_t kMaxRateQm[9] = {
// Frame rate scale for maximum transition rate. // Frame rate scale for maximum transition rate.
const float kFrameRateFac[4] = { const float kFrameRateFac[4] = {
0.5f, // Low 0.5f, // Low
0.7f, // Middle level 1 0.7f, // Middle level 1
0.85f, // Middle level 2 0.85f, // Middle level 2
1.0f, // High 1.0f, // High
}; };
// Scale for transitional rate: based on content class // Scale for transitional rate: based on content class
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels // motion=L/H/D,spatial==L/H/D: for low, high, middle levels
const float kScaleTransRateQm[18] = { const float kScaleTransRateQm[18] = {
// VGA and lower // VGA and lower
0.40f, // L, L 0.40f, // L, L
0.50f, // L, H 0.50f, // L, H
0.40f, // L, D 0.40f, // L, D
0.60f, // H ,L 0.60f, // H ,L
0.60f, // H, H 0.60f, // H, H
0.60f, // H, D 0.60f, // H, D
0.50f, // D, L 0.50f, // D, L
0.50f, // D, D 0.50f, // D, D
0.50f, // D, H 0.50f, // D, H
// over VGA // over VGA
0.40f, // L, L 0.40f, // L, L
0.50f, // L, H 0.50f, // L, H
0.40f, // L, D 0.40f, // L, D
0.60f, // H ,L 0.60f, // H ,L
0.60f, // H, H 0.60f, // H, H
0.60f, // H, D 0.60f, // H, D
0.50f, // D, L 0.50f, // D, L
0.50f, // D, D 0.50f, // D, D
0.50f, // D, H 0.50f, // D, H
}; };
// Threshold on the target rate relative to transitional rate. // Threshold on the target rate relative to transitional rate.
@ -108,73 +108,73 @@ const float kFacLowRate = 0.5f;
// motion=L/H/D,spatial==L/H/D, for low, high, middle levels; // motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
// rate = 0/1/2, for target rate state relative to transition rate. // rate = 0/1/2, for target rate state relative to transition rate.
const uint8_t kSpatialAction[27] = { const uint8_t kSpatialAction[27] = {
// rateClass = 0: // rateClass = 0:
1, // L, L 1, // L, L
1, // L, H 1, // L, H
1, // L, D 1, // L, D
4, // H ,L 4, // H ,L
1, // H, H 1, // H, H
4, // H, D 4, // H, D
4, // D, L 4, // D, L
1, // D, H 1, // D, H
2, // D, D 2, // D, D
// rateClass = 1: // rateClass = 1:
1, // L, L 1, // L, L
1, // L, H 1, // L, H
1, // L, D 1, // L, D
2, // H ,L 2, // H ,L
1, // H, H 1, // H, H
2, // H, D 2, // H, D
2, // D, L 2, // D, L
1, // D, H 1, // D, H
2, // D, D 2, // D, D
// rateClass = 2: // rateClass = 2:
1, // L, L 1, // L, L
1, // L, H 1, // L, H
1, // L, D 1, // L, D
2, // H ,L 2, // H ,L
1, // H, H 1, // H, H
2, // H, D 2, // H, D
2, // D, L 2, // D, L
1, // D, H 1, // D, H
2, // D, D 2, // D, D
}; };
const uint8_t kTemporalAction[27] = { const uint8_t kTemporalAction[27] = {
// rateClass = 0: // rateClass = 0:
3, // L, L 3, // L, L
2, // L, H 2, // L, H
2, // L, D 2, // L, D
1, // H ,L 1, // H ,L
3, // H, H 3, // H, H
1, // H, D 1, // H, D
1, // D, L 1, // D, L
2, // D, H 2, // D, H
1, // D, D 1, // D, D
// rateClass = 1: // rateClass = 1:
3, // L, L 3, // L, L
3, // L, H 3, // L, H
3, // L, D 3, // L, D
1, // H ,L 1, // H ,L
3, // H, H 3, // H, H
1, // H, D 1, // H, D
1, // D, L 1, // D, L
3, // D, H 3, // D, H
1, // D, D 1, // D, D
// rateClass = 2: // rateClass = 2:
1, // L, L 1, // L, L
3, // L, H 3, // L, H
3, // L, D 3, // L, D
1, // H ,L 1, // H ,L
3, // H, H 3, // H, H
1, // H, D 1, // H, D
1, // D, L 1, // D, L
3, // D, H 3, // D, H
1, // D, D 1, // D, D
}; };
// Control the total amount of down-sampling allowed. // Control the total amount of down-sampling allowed.

View File

@ -32,10 +32,9 @@ const float kTemporalHigh = 0.1f;
class QmSelectTest : public ::testing::Test { class QmSelectTest : public ::testing::Test {
protected: protected:
QmSelectTest() QmSelectTest()
: qm_resolution_(new VCMQmResolution()), : qm_resolution_(new VCMQmResolution()),
content_metrics_(new VideoContentMetrics()), content_metrics_(new VideoContentMetrics()),
qm_scale_(NULL) { qm_scale_(NULL) {}
}
VCMQmResolution* qm_resolution_; VCMQmResolution* qm_resolution_;
VideoContentMetrics* content_metrics_; VideoContentMetrics* content_metrics_;
VCMResolutionScale* qm_scale_; VCMResolutionScale* qm_scale_;
@ -87,8 +86,8 @@ TEST_F(QmSelectTest, HandleInputs) {
qm_resolution_->UpdateContent(content_metrics); qm_resolution_->UpdateContent(content_metrics);
// Content metrics are NULL: Expect success and no down-sampling action. // Content metrics are NULL: Expect success and no down-sampling action.
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, 30.0f));
} }
// TODO(marpan): Add a test for number of temporal layers > 1. // TODO(marpan): Add a test for number of temporal layers > 1.
@ -118,8 +117,8 @@ TEST_F(QmSelectTest, NoActionHighRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass()); EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
} }
// Rate is well below transition, down-sampling action is taken, // Rate is well below transition, down-sampling action is taken,
@ -149,40 +148,40 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, low spatial: 2/3 temporal is expected. // Low motion, low spatial: 2/3 temporal is expected.
UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow); UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass()); EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Medium motion, low spatial: 2x2 spatial expected. // Medium motion, low spatial: 2x2 spatial expected.
UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow); UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// High motion, high spatial: 2/3 temporal expected. // High motion, high spatial: 2/3 temporal expected.
UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(4, qm_resolution_->ComputeContentClass()); EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial: 1/2 temporal expected. // Low motion, high spatial: 1/2 temporal expected.
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, EXPECT_TRUE(
15.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Medium motion, high spatial: 1/2 temporal expected. // Medium motion, high spatial: 1/2 temporal expected.
@ -190,8 +189,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialHigh); kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass()); EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, EXPECT_TRUE(
15.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// High motion, medium spatial: 2x2 spatial expected. // High motion, medium spatial: 2x2 spatial expected.
@ -200,8 +199,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass()); EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
// Target frame rate for frame dropper should be the same as previous == 15. // Target frame rate for frame dropper should be the same as previous == 15.
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, medium spatial: high frame rate, so 1/2 temporal expected. // Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
@ -209,8 +208,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium); kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass()); EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, EXPECT_TRUE(
15.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Medium motion, medium spatial: high frame rate, so 2/3 temporal expected. // Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
@ -218,8 +217,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium); kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(8, qm_resolution_->ComputeContentClass()); EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
} }
// Rate mis-match is high, and we have over-shooting. // Rate mis-match is high, and we have over-shooting.
@ -249,16 +248,16 @@ TEST_F(QmSelectTest, DownActionHighRateMMOvershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 360, 30.0f)); 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial // Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
} }
// Rate mis-match is high, target rate is below max for down-sampling, // Rate mis-match is high, target rate is below max for down-sampling,
@ -288,16 +287,16 @@ TEST_F(QmSelectTest, NoActionHighRateMMUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial // Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
} }
// Buffer is underflowing, and target rate is below max for down-sampling, // Buffer is underflowing, and target rate is below max for down-sampling,
@ -332,16 +331,16 @@ TEST_F(QmSelectTest, DownActionBufferUnderflow) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 360, 30.0f)); 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial // Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
} }
// Target rate is below max for down-sampling, but buffer level is stable, // Target rate is below max for down-sampling, but buffer level is stable,
@ -376,16 +375,16 @@ TEST_F(QmSelectTest, NoActionBufferStable) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState(); qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial // Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh); UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
} }
// Very low rate, but no spatial down-sampling below some size (QCIF). // Very low rate, but no spatial down-sampling below some size (QCIF).
@ -414,8 +413,8 @@ TEST_F(QmSelectTest, LimitDownSpatialAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, 30.0f));
} }
// Very low rate, but no frame reduction below some frame_rate (8fps). // Very low rate, but no frame reduction below some frame_rate (8fps).
@ -445,8 +444,8 @@ TEST_F(QmSelectTest, LimitDownTemporalAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass()); EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
8.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 8.0f));
} }
// Two stages: spatial down-sample and then back up spatially, // Two stages: spatial down-sample and then back up spatially,
@ -468,7 +467,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
int incoming_frame_rate[] = {30, 30, 30}; int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10}; uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate, UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
fraction_lost, 3); fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial. // High motion, low spatial.
@ -476,8 +475,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and go up in rate: expected to go back up, in 2 stages of 3/4. // Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
qm_resolution_->ResetRates(); qm_resolution_->ResetRates();
@ -493,8 +492,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f; float scale = (4.0f / 3.0f) / 2.0f;
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360)); EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@ -522,7 +521,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
int incoming_frame_rate[] = {30, 30, 30}; int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10}; uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate, UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
fraction_lost, 3); fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial. // High motion, low spatial.
@ -530,8 +529,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset rates and simulate under-shooting scenario.: expect to go back up. // Reset rates and simulate under-shooting scenario.: expect to go back up.
// Goes up spatially in two stages for 1/2x1/2 down-sampling. // Goes up spatially in two stages for 1/2x1/2 down-sampling.
@ -548,8 +547,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f; float scale = (4.0f / 3.0f) / 2.0f;
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360)); EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@ -577,7 +576,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
int incoming_frame_rate[] = {30, 30, 30}; int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10}; uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate, UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
fraction_lost, 3); fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial. // High motion, low spatial.
@ -585,8 +584,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and simulate large rate mis-match: expect no action to go back up. // Reset and simulate large rate mis-match: expect no action to go back up.
qm_resolution_->ResetRates(); qm_resolution_->ResetRates();
@ -601,8 +600,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
fraction_lost2, 5); fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, 30.0f));
} }
// Two stages: temporally down-sample and then back up temporally, // Two stages: temporally down-sample and then back up temporally,
@ -632,8 +631,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, EXPECT_TRUE(
15.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and go up in rate: expect to go back up. // Reset rates and go up in rate: expect to go back up.
qm_resolution_->ResetRates(); qm_resolution_->ResetRates();
@ -646,8 +645,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
fraction_lost2, 5); fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
} }
// Two stages: temporal down-sample and then back up temporally, since encoder // Two stages: temporal down-sample and then back up temporally, since encoder
@ -669,7 +668,7 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
int incoming_frame_rate[] = {30, 30, 30}; int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10}; uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate, UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
fraction_lost, 3); fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial. // Low motion, high spatial.
@ -677,8 +676,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, EXPECT_TRUE(
15.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and simulate under-shooting scenario.: expect to go back up. // Reset rates and simulate under-shooting scenario.: expect to go back up.
qm_resolution_->ResetRates(); qm_resolution_->ResetRates();
@ -691,8 +690,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
fraction_lost2, 5); fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
} }
// Two stages: temporal down-sample and then no action to go up, // Two stages: temporal down-sample and then no action to go up,
@ -736,8 +735,8 @@ TEST_F(QmSelectTest, 2StageDownTemporalNoActionUp) {
fraction_lost2, 5); fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, EXPECT_TRUE(
15.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 15.0f));
} }
// 3 stages: spatial down-sample, followed by temporal down-sample, // 3 stages: spatial down-sample, followed by temporal down-sample,
// and then go up to full state, as encoding rate has increased. // and then go up to full state, as encoding rate has increased.
@ -766,8 +765,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Change content data: expect temporal down-sample. // Change content data: expect temporal down-sample.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240); qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@ -780,7 +779,7 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
int incoming_frame_rate2[] = {30, 30, 30, 30, 30}; int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
uint8_t fraction_lost2[] = {10, 10, 10, 10, 10}; uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2, UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
fraction_lost2, 5); fraction_lost2, 5);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial. // Low motion, high spatial.
@ -788,8 +787,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Reset rates and go high up in rate: expect to go back up both spatial // Reset rates and go high up in rate: expect to go back up both spatial
// and temporally. The 1/2x1/2 spatial is undone in two stages. // and temporally. The 1/2x1/2 spatial is undone in two stages.
@ -806,8 +805,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f; float scale = (4.0f / 3.0f) / 2.0f;
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
480, 360, 30.0f)); 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360)); EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@ -842,8 +841,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass()); EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, 30.0f));
// Reset and lower rates to get another spatial action (3/4x3/4). // Reset and lower rates to get another spatial action (3/4x3/4).
// Lower the frame rate for spatial to be selected again. // Lower the frame rate for spatial to be selected again.
@ -865,8 +864,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass()); EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 270, 10.0f)); 480, 270, 10.0f));
// Reset and go to very low rate: no action should be taken, // Reset and go to very low rate: no action should be taken,
// we went down too much already. // we went down too much already.
@ -883,8 +882,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass()); EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, EXPECT_TRUE(
10.0f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, 10.0f));
} }
// Multiple down-sampling stages and then undo all of them. // Multiple down-sampling stages and then undo all of them.
@ -917,8 +916,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 360, 30.0f)); 480, 360, 30.0f));
// Go down 2/3 temporal. // Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360)); EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@ -936,8 +935,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go down 3/4x3/4 spatial: // Go down 3/4x3/4 spatial:
qm_resolution_->UpdateCodecParameters(20.0f, 480, 360); qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
@ -947,7 +946,7 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
int incoming_frame_rate3[] = {20, 20, 20, 20, 20}; int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
uint8_t fraction_lost3[] = {10, 10, 10, 10, 10}; uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3, UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
fraction_lost3, 5); fraction_lost3, 5);
// Update content: motion level, and 3 spatial prediction errors. // Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial. // High motion, low spatial.
@ -957,8 +956,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
// The two spatial actions of 3/4x3/4 are converted to 1/2x1/2, // The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
// so scale factor is 2.0. // so scale factor is 2.0.
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
20.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 20.0f));
// Reset rates and go high up in rate: expect to go up: // Reset rates and go high up in rate: expect to go up:
// 1/2x1x2 spatial and 1/2 temporally. // 1/2x1x2 spatial and 1/2 temporally.
@ -1018,8 +1017,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Go down 2/3 temporal. // Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240); qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@ -1039,8 +1038,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass()); EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up 2/3 temporally. // Go up 2/3 temporally.
qm_resolution_->UpdateCodecParameters(20.0f, 320, 240); qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
@ -1076,8 +1075,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up spatial and temporal. Spatial undoing is done in 2 stages. // Go up spatial and temporal. Spatial undoing is done in 2 stages.
qm_resolution_->UpdateCodecParameters(20.5f, 320, 240); qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
@ -1092,8 +1091,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
float scale = (4.0f / 3.0f) / 2.0f; float scale = (4.0f / 3.0f) / 2.0f;
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
480, 360, 30.0f)); 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360)); EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@ -1131,8 +1130,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 360, 30.0f)); 480, 360, 30.0f));
// Go down 2/3 temporal. // Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@ -1151,8 +1150,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass()); EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, EXPECT_TRUE(
20.5f)); IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go up 2/3 temporal. // Go up 2/3 temporal.
qm_resolution_->UpdateCodecParameters(20.5f, 480, 360); qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
@ -1184,8 +1183,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
1.0f, 640, 480, 30.0f)); 640, 480, 30.0f));
} }
// Two stages of 3/4x3/4 converted to one stage of 1/2x1/2. // Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
@ -1215,8 +1214,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
1.0f, 480, 360, 30.0f)); 480, 360, 30.0f));
// Set rates to go down another 3/4 spatial. Should be converted ton 1/2. // Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360); qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@ -1235,8 +1234,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_)); EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass()); EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState()); EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, EXPECT_TRUE(
30.0f)); IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
} }
void QmSelectTest::InitQmNativeData(float initial_bit_rate, void QmSelectTest::InitQmNativeData(float initial_bit_rate,
@ -1244,11 +1243,9 @@ void QmSelectTest::InitQmNativeData(float initial_bit_rate,
int native_width, int native_width,
int native_height, int native_height,
int num_layers) { int num_layers) {
EXPECT_EQ(0, qm_resolution_->Initialize(initial_bit_rate, EXPECT_EQ(
user_frame_rate, 0, qm_resolution_->Initialize(initial_bit_rate, user_frame_rate,
native_width, native_width, native_height, num_layers));
native_height,
num_layers));
} }
void QmSelectTest::UpdateQmContentData(float motion_metric, void QmSelectTest::UpdateQmContentData(float motion_metric,
@ -1281,8 +1278,7 @@ void QmSelectTest::UpdateQmRateData(int* target_rate,
float encoder_sent_rate_update = encoder_sent_rate[i]; float encoder_sent_rate_update = encoder_sent_rate[i];
float incoming_frame_rate_update = incoming_frame_rate[i]; float incoming_frame_rate_update = incoming_frame_rate[i];
uint8_t fraction_lost_update = fraction_lost[i]; uint8_t fraction_lost_update = fraction_lost[i];
qm_resolution_->UpdateRates(target_rate_update, qm_resolution_->UpdateRates(target_rate_update, encoder_sent_rate_update,
encoder_sent_rate_update,
incoming_frame_rate_update, incoming_frame_rate_update,
fraction_lost_update); fraction_lost_update);
} }

View File

@ -14,6 +14,7 @@
#include <cstdlib> #include <cstdlib>
#include <utility> #include <utility>
#include <vector>
#include "webrtc/base/logging.h" #include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h" #include "webrtc/base/trace_event.h"
@ -72,8 +73,8 @@ int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
// Insert the packet into the jitter buffer. The packet can either be empty or // Insert the packet into the jitter buffer. The packet can either be empty or
// contain media at this point. // contain media at this point.
bool retransmitted = false; bool retransmitted = false;
const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet, const VCMFrameBufferEnum ret =
&retransmitted); jitter_buffer_.InsertPacket(packet, &retransmitted);
if (ret == kOldPacket) { if (ret == kOldPacket) {
return VCM_OK; return VCM_OK;
} else if (ret == kFlushIndicator) { } else if (ret == kFlushIndicator) {
@ -96,13 +97,13 @@ void VCMReceiver::TriggerDecoderShutdown() {
} }
VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms, VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
int64_t& next_render_time_ms, int64_t* next_render_time_ms,
bool prefer_late_decoding) { bool prefer_late_decoding) {
const int64_t start_time_ms = clock_->TimeInMilliseconds(); const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0; uint32_t frame_timestamp = 0;
// Exhaust wait time to get a complete frame for decoding. // Exhaust wait time to get a complete frame for decoding.
bool found_frame = jitter_buffer_.NextCompleteTimestamp( bool found_frame =
max_wait_time_ms, &frame_timestamp); jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
if (!found_frame) if (!found_frame)
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp); found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
@ -114,14 +115,14 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs()); timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
const int64_t now_ms = clock_->TimeInMilliseconds(); const int64_t now_ms = clock_->TimeInMilliseconds();
timing_->UpdateCurrentDelay(frame_timestamp); timing_->UpdateCurrentDelay(frame_timestamp);
next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms); *next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
// Check render timing. // Check render timing.
bool timing_error = false; bool timing_error = false;
// Assume that render timing errors are due to changes in the video stream. // Assume that render timing errors are due to changes in the video stream.
if (next_render_time_ms < 0) { if (*next_render_time_ms < 0) {
timing_error = true; timing_error = true;
} else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) { } else if (std::abs(*next_render_time_ms - now_ms) > max_video_delay_ms_) {
int frame_delay = static_cast<int>(std::abs(next_render_time_ms - now_ms)); int frame_delay = static_cast<int>(std::abs(*next_render_time_ms - now_ms));
LOG(LS_WARNING) << "A frame about to be decoded is out of the configured " LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
<< "delay bounds (" << frame_delay << " > " << "delay bounds (" << frame_delay << " > "
<< max_video_delay_ms_ << max_video_delay_ms_
@ -143,12 +144,13 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
if (prefer_late_decoding) { if (prefer_late_decoding) {
// Decode frame as close as possible to the render timestamp. // Decode frame as close as possible to the render timestamp.
const int32_t available_wait_time = max_wait_time_ms - const int32_t available_wait_time =
max_wait_time_ms -
static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms); static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
uint16_t new_max_wait_time = static_cast<uint16_t>( uint16_t new_max_wait_time =
VCM_MAX(available_wait_time, 0)); static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
uint32_t wait_time_ms = timing_->MaxWaitingTime( uint32_t wait_time_ms = timing_->MaxWaitingTime(
next_render_time_ms, clock_->TimeInMilliseconds()); *next_render_time_ms, clock_->TimeInMilliseconds());
if (new_max_wait_time < wait_time_ms) { if (new_max_wait_time < wait_time_ms) {
// We're not allowed to wait until the frame is supposed to be rendered, // We're not allowed to wait until the frame is supposed to be rendered,
// waiting as long as we're allowed to avoid busy looping, and then return // waiting as long as we're allowed to avoid busy looping, and then return
@ -165,9 +167,9 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
if (frame == NULL) { if (frame == NULL) {
return NULL; return NULL;
} }
frame->SetRenderTime(next_render_time_ms); frame->SetRenderTime(*next_render_time_ms);
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
"SetRenderTS", "render_time", next_render_time_ms); "render_time", *next_render_time_ms);
if (!frame->Complete()) { if (!frame->Complete()) {
// Update stats for incomplete frames. // Update stats for incomplete frames.
bool retransmitted = false; bool retransmitted = false;
@ -187,8 +189,7 @@ void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
jitter_buffer_.ReleaseFrame(frame); jitter_buffer_.ReleaseFrame(frame);
} }
void VCMReceiver::ReceiveStatistics(uint32_t* bitrate, void VCMReceiver::ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate) {
uint32_t* framerate) {
assert(bitrate); assert(bitrate);
assert(framerate); assert(framerate);
jitter_buffer_.IncomingRateStatistics(framerate, bitrate); jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
@ -210,8 +211,7 @@ void VCMReceiver::SetNackMode(VCMNackMode nackMode,
void VCMReceiver::SetNackSettings(size_t max_nack_list_size, void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack, int max_packet_age_to_nack,
int max_incomplete_time_ms) { int max_incomplete_time_ms) {
jitter_buffer_.SetNackSettings(max_nack_list_size, jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
max_packet_age_to_nack,
max_incomplete_time_ms); max_incomplete_time_ms);
} }

View File

@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_ #ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_ #define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
#include <vector>
#include "webrtc/modules/video_coding/jitter_buffer.h" #include "webrtc/modules/video_coding/jitter_buffer.h"
#include "webrtc/modules/video_coding/packet.h" #include "webrtc/modules/video_coding/packet.h"
#include "webrtc/modules/video_coding/timing.h" #include "webrtc/modules/video_coding/timing.h"
@ -25,9 +27,7 @@ class VCMEncodedFrame;
class VCMReceiver { class VCMReceiver {
public: public:
VCMReceiver(VCMTiming* timing, VCMReceiver(VCMTiming* timing, Clock* clock, EventFactory* event_factory);
Clock* clock,
EventFactory* event_factory);
// Using this constructor, you can specify a different event factory for the // Using this constructor, you can specify a different event factory for the
// jitter buffer. Useful for unit tests when you want to simulate incoming // jitter buffer. Useful for unit tests when you want to simulate incoming
@ -46,7 +46,7 @@ class VCMReceiver {
uint16_t frame_width, uint16_t frame_width,
uint16_t frame_height); uint16_t frame_height);
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms, VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
int64_t& next_render_time_ms, int64_t* next_render_time_ms,
bool prefer_late_decoding); bool prefer_late_decoding);
void ReleaseFrame(VCMEncodedFrame* frame); void ReleaseFrame(VCMEncodedFrame* frame);
void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate); void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);

View File

@ -11,6 +11,7 @@
#include <list> #include <list>
#include <queue> #include <queue>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
@ -34,14 +35,11 @@ class TestVCMReceiver : public ::testing::Test {
: clock_(new SimulatedClock(0)), : clock_(new SimulatedClock(0)),
timing_(clock_.get()), timing_(clock_.get()),
receiver_(&timing_, clock_.get(), &event_factory_) { receiver_(&timing_, clock_.get(), &event_factory_) {
stream_generator_.reset(
stream_generator_.reset(new new StreamGenerator(0, clock_->TimeInMilliseconds()));
StreamGenerator(0, clock_->TimeInMilliseconds()));
} }
virtual void SetUp() { virtual void SetUp() { receiver_.Reset(); }
receiver_.Reset();
}
int32_t InsertPacket(int index) { int32_t InsertPacket(int index) {
VCMPacket packet; VCMPacket packet;
@ -79,7 +77,7 @@ class TestVCMReceiver : public ::testing::Test {
bool DecodeNextFrame() { bool DecodeNextFrame() {
int64_t render_time_ms = 0; int64_t render_time_ms = 0;
VCMEncodedFrame* frame = VCMEncodedFrame* frame =
receiver_.FrameForDecoding(0, render_time_ms, false); receiver_.FrameForDecoding(0, &render_time_ms, false);
if (!frame) if (!frame)
return false; return false;
receiver_.ReleaseFrame(frame); receiver_.ReleaseFrame(frame);
@ -116,7 +114,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_SkipToKeyFrame) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs, EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs,
receiver_.RenderBufferSizeMs()); receiver_.RenderBufferSizeMs());
} }
TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) { TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
@ -132,7 +130,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs, EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs,
receiver_.RenderBufferSizeMs()); receiver_.RenderBufferSizeMs());
} }
TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) { TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
@ -143,7 +141,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
} }
int64_t next_render_time_ms = 0; int64_t next_render_time_ms = 0;
VCMEncodedFrame* frame = VCMEncodedFrame* frame =
receiver_.FrameForDecoding(10, next_render_time_ms, false); receiver_.FrameForDecoding(10, &next_render_time_ms, false);
EXPECT_TRUE(frame == NULL); EXPECT_TRUE(frame == NULL);
receiver_.ReleaseFrame(frame); receiver_.ReleaseFrame(frame);
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
@ -161,7 +159,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
const int kMinDelayMs = 500; const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame. // Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs); clock_->AdvanceTimeMilliseconds(kMinDelayMs);
@ -178,7 +176,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
const int kMaxPacketAgeToNack = 1000; const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000; const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
for (int i = 0; i < kNumFrames; ++i) { for (int i = 0; i < kNumFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
@ -194,24 +192,23 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
const size_t kMaxNackListSize = 1000; const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000; const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate * const int kMaxNonDecodableDurationFrames =
kMaxNonDecodableDuration + 500) / 1000; (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500; const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs); receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds(); int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame. // Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence. // Insert enough frames to have too long non-decodable sequence.
for (int i = 0; i < kMaxNonDecodableDurationFrames; for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
// Advance time until it's time to decode the key frame. // Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
key_frame_inserted); key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame()); EXPECT_TRUE(DecodeNextFrame());
// Make sure we get a key frame request. // Make sure we get a key frame request.
bool request_key_frame = false; bool request_key_frame = false;
@ -225,11 +222,11 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
const size_t kMaxNackListSize = 1000; const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000; const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate * const int kMaxNonDecodableDurationFrames =
kMaxNonDecodableDuration + 500) / 1000; (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500; const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs); receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds(); int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
@ -237,13 +234,12 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert all but one frame to not trigger a key frame request due to // Insert all but one frame to not trigger a key frame request due to
// too long duration of non-decodable frames. // too long duration of non-decodable frames.
for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
// Advance time until it's time to decode the key frame. // Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
key_frame_inserted); key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame()); EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we haven't generated // Make sure we don't get a key frame request since we haven't generated
// enough frames. // enough frames.
@ -258,25 +254,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
const size_t kMaxNackListSize = 1000; const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000; const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate * const int kMaxNonDecodableDurationFrames =
kMaxNonDecodableDuration + 500) / 1000; (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500; const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs); receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds(); int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert enough frames to have too long non-decodable sequence, except that // Insert enough frames to have too long non-decodable sequence, except that
// we don't have any losses. // we don't have any losses.
for (int i = 0; i < kMaxNonDecodableDurationFrames; for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
// Insert an incomplete frame. // Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Advance time until it's time to decode the key frame. // Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
key_frame_inserted); key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame()); EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since the non-decodable duration // Make sure we don't get a key frame request since the non-decodable duration
// is only one frame. // is only one frame.
@ -291,25 +286,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
const size_t kMaxNackListSize = 1000; const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000; const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500; const int kMaxNonDecodableDuration = 500;
const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate * const int kMaxNonDecodableDurationFrames =
kMaxNonDecodableDuration + 500) / 1000; (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500; const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
kMaxNonDecodableDuration); kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs); receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds(); int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame. // Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence. // Insert enough frames to have too long non-decodable sequence.
for (int i = 0; i < kMaxNonDecodableDurationFrames; for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
} }
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError); EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame. // Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
key_frame_inserted); key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame()); EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we have a key frame // Make sure we don't get a key frame request since we have a key frame
// in the list. // in the list.
@ -340,7 +334,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
// Return true if some frame arrives between now and now+|milliseconds|. // Return true if some frame arrives between now and now+|milliseconds|.
bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) { bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame); return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
}; }
bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) { bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
int64_t start_time = TimeInMicroseconds(); int64_t start_time = TimeInMicroseconds();
@ -364,7 +358,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds()); SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
} }
return frame_injected; return frame_injected;
}; }
// Input timestamps are in unit Milliseconds. // Input timestamps are in unit Milliseconds.
// And |arrive_timestamps| must be positive and in increasing order. // And |arrive_timestamps| must be positive and in increasing order.
@ -431,7 +425,7 @@ class FrameInjectEvent : public EventWrapper {
bool Set() override { return true; } bool Set() override { return true; }
EventTypeWrapper Wait(unsigned long max_time) override { EventTypeWrapper Wait(unsigned long max_time) override { // NOLINT
if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) && if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) &&
stop_on_frame_) { stop_on_frame_) {
return EventTypeWrapper::kEventSignaled; return EventTypeWrapper::kEventSignaled;
@ -447,7 +441,6 @@ class FrameInjectEvent : public EventWrapper {
class VCMReceiverTimingTest : public ::testing::Test { class VCMReceiverTimingTest : public ::testing::Test {
protected: protected:
VCMReceiverTimingTest() VCMReceiverTimingTest()
: clock_(&stream_generator_, &receiver_), : clock_(&stream_generator_, &receiver_),
@ -460,7 +453,6 @@ class VCMReceiverTimingTest : public ::testing::Test {
rtc::scoped_ptr<EventWrapper>( rtc::scoped_ptr<EventWrapper>(
new FrameInjectEvent(&clock_, true))) {} new FrameInjectEvent(&clock_, true))) {}
virtual void SetUp() { receiver_.Reset(); } virtual void SetUp() { receiver_.Reset(); }
SimulatedClockWithFrames clock_; SimulatedClockWithFrames clock_;
@ -506,7 +498,7 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
while (num_frames_return < kNumFrames) { while (num_frames_return < kNumFrames) {
int64_t start_time = clock_.TimeInMilliseconds(); int64_t start_time = clock_.TimeInMilliseconds();
VCMEncodedFrame* frame = VCMEncodedFrame* frame =
receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, false); receiver_.FrameForDecoding(kMaxWaitTime, &next_render_time, false);
int64_t end_time = clock_.TimeInMilliseconds(); int64_t end_time = clock_.TimeInMilliseconds();
// In any case the FrameForDecoding should not wait longer than // In any case the FrameForDecoding should not wait longer than
@ -566,9 +558,8 @@ TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
while (num_frames_return < kNumFrames) { while (num_frames_return < kNumFrames) {
int64_t start_time = clock_.TimeInMilliseconds(); int64_t start_time = clock_.TimeInMilliseconds();
VCMEncodedFrame* frame = VCMEncodedFrame* frame = receiver_.FrameForDecoding(
receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, kMaxWaitTime, &next_render_time, prefer_late_decoding);
prefer_late_decoding);
int64_t end_time = clock_.TimeInMilliseconds(); int64_t end_time = clock_.TimeInMilliseconds();
if (frame) { if (frame) {
EXPECT_EQ(frame->RenderTimeMs() - max_decode_ms - render_delay_ms, EXPECT_EQ(frame->RenderTimeMs() - max_decode_ms - render_delay_ms,

View File

@ -8,13 +8,14 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/rtt_filter.h" #include "webrtc/modules/video_coding/rtt_filter.h"
#include <math.h> #include <math.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include "webrtc/modules/video_coding/internal_defines.h"
namespace webrtc { namespace webrtc {
VCMRttFilter::VCMRttFilter() VCMRttFilter::VCMRttFilter()
@ -22,181 +23,143 @@ VCMRttFilter::VCMRttFilter()
_jumpStdDevs(2.5), _jumpStdDevs(2.5),
_driftStdDevs(3.5), _driftStdDevs(3.5),
_detectThreshold(kMaxDriftJumpCount) { _detectThreshold(kMaxDriftJumpCount) {
Reset(); Reset();
} }
VCMRttFilter& VCMRttFilter& VCMRttFilter::operator=(const VCMRttFilter& rhs) {
VCMRttFilter::operator=(const VCMRttFilter& rhs) if (this != &rhs) {
{ _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
if (this != &rhs) _avgRtt = rhs._avgRtt;
{ _varRtt = rhs._varRtt;
_gotNonZeroUpdate = rhs._gotNonZeroUpdate; _maxRtt = rhs._maxRtt;
_avgRtt = rhs._avgRtt; _filtFactCount = rhs._filtFactCount;
_varRtt = rhs._varRtt; _jumpCount = rhs._jumpCount;
_maxRtt = rhs._maxRtt; _driftCount = rhs._driftCount;
_filtFactCount = rhs._filtFactCount; memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
_jumpCount = rhs._jumpCount; memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
_driftCount = rhs._driftCount; }
memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf)); return *this;
memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf)); }
void VCMRttFilter::Reset() {
_gotNonZeroUpdate = false;
_avgRtt = 0;
_varRtt = 0;
_maxRtt = 0;
_filtFactCount = 1;
_jumpCount = 0;
_driftCount = 0;
memset(_jumpBuf, 0, kMaxDriftJumpCount);
memset(_driftBuf, 0, kMaxDriftJumpCount);
}
void VCMRttFilter::Update(int64_t rttMs) {
if (!_gotNonZeroUpdate) {
if (rttMs == 0) {
return;
} }
return *this; _gotNonZeroUpdate = true;
}
// Sanity check
if (rttMs > 3000) {
rttMs = 3000;
}
double filtFactor = 0;
if (_filtFactCount > 1) {
filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
}
_filtFactCount++;
if (_filtFactCount > _filtFactMax) {
// This prevents filtFactor from going above
// (_filtFactMax - 1) / _filtFactMax,
// e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
_filtFactCount = _filtFactMax;
}
double oldAvg = _avgRtt;
double oldVar = _varRtt;
_avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
_varRtt = filtFactor * _varRtt +
(1 - filtFactor) * (rttMs - _avgRtt) * (rttMs - _avgRtt);
_maxRtt = VCM_MAX(rttMs, _maxRtt);
if (!JumpDetection(rttMs) || !DriftDetection(rttMs)) {
// In some cases we don't want to update the statistics
_avgRtt = oldAvg;
_varRtt = oldVar;
}
} }
void bool VCMRttFilter::JumpDetection(int64_t rttMs) {
VCMRttFilter::Reset() double diffFromAvg = _avgRtt - rttMs;
{ if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt)) {
_gotNonZeroUpdate = false; int diffSign = (diffFromAvg >= 0) ? 1 : -1;
_avgRtt = 0; int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
_varRtt = 0; if (diffSign != jumpCountSign) {
_maxRtt = 0; // Since the signs differ the samples currently
_filtFactCount = 1; // in the buffer is useless as they represent a
// jump in a different direction.
_jumpCount = 0;
}
if (abs(_jumpCount) < kMaxDriftJumpCount) {
// Update the buffer used for the short time
// statistics.
// The sign of the diff is used for updating the counter since
// we want to use the same buffer for keeping track of when
// the RTT jumps down and up.
_jumpBuf[abs(_jumpCount)] = rttMs;
_jumpCount += diffSign;
}
if (abs(_jumpCount) >= _detectThreshold) {
// Detected an RTT jump
ShortRttFilter(_jumpBuf, abs(_jumpCount));
_filtFactCount = _detectThreshold + 1;
_jumpCount = 0;
} else {
return false;
}
} else {
_jumpCount = 0; _jumpCount = 0;
}
return true;
}
bool VCMRttFilter::DriftDetection(int64_t rttMs) {
if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt)) {
if (_driftCount < kMaxDriftJumpCount) {
// Update the buffer used for the short time
// statistics.
_driftBuf[_driftCount] = rttMs;
_driftCount++;
}
if (_driftCount >= _detectThreshold) {
// Detected an RTT drift
ShortRttFilter(_driftBuf, _driftCount);
_filtFactCount = _detectThreshold + 1;
_driftCount = 0;
}
} else {
_driftCount = 0; _driftCount = 0;
memset(_jumpBuf, 0, kMaxDriftJumpCount); }
memset(_driftBuf, 0, kMaxDriftJumpCount); return true;
} }
void void VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length) {
VCMRttFilter::Update(int64_t rttMs) if (length == 0) {
{ return;
if (!_gotNonZeroUpdate) }
{ _maxRtt = 0;
if (rttMs == 0) _avgRtt = 0;
{ for (uint32_t i = 0; i < length; i++) {
return; if (buf[i] > _maxRtt) {
} _maxRtt = buf[i];
_gotNonZeroUpdate = true;
}
// Sanity check
if (rttMs > 3000)
{
rttMs = 3000;
}
double filtFactor = 0;
if (_filtFactCount > 1)
{
filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
}
_filtFactCount++;
if (_filtFactCount > _filtFactMax)
{
// This prevents filtFactor from going above
// (_filtFactMax - 1) / _filtFactMax,
// e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
_filtFactCount = _filtFactMax;
}
double oldAvg = _avgRtt;
double oldVar = _varRtt;
_avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
_varRtt = filtFactor * _varRtt + (1 - filtFactor) *
(rttMs - _avgRtt) * (rttMs - _avgRtt);
_maxRtt = VCM_MAX(rttMs, _maxRtt);
if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
{
// In some cases we don't want to update the statistics
_avgRtt = oldAvg;
_varRtt = oldVar;
} }
_avgRtt += buf[i];
}
_avgRtt = _avgRtt / static_cast<double>(length);
} }
bool int64_t VCMRttFilter::RttMs() const {
VCMRttFilter::JumpDetection(int64_t rttMs) return static_cast<int64_t>(_maxRtt + 0.5);
{
double diffFromAvg = _avgRtt - rttMs;
if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
{
int diffSign = (diffFromAvg >= 0) ? 1 : -1;
int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
if (diffSign != jumpCountSign)
{
// Since the signs differ the samples currently
// in the buffer is useless as they represent a
// jump in a different direction.
_jumpCount = 0;
}
if (abs(_jumpCount) < kMaxDriftJumpCount)
{
// Update the buffer used for the short time
// statistics.
// The sign of the diff is used for updating the counter since
// we want to use the same buffer for keeping track of when
// the RTT jumps down and up.
_jumpBuf[abs(_jumpCount)] = rttMs;
_jumpCount += diffSign;
}
if (abs(_jumpCount) >= _detectThreshold)
{
// Detected an RTT jump
ShortRttFilter(_jumpBuf, abs(_jumpCount));
_filtFactCount = _detectThreshold + 1;
_jumpCount = 0;
}
else
{
return false;
}
}
else
{
_jumpCount = 0;
}
return true;
}
bool
VCMRttFilter::DriftDetection(int64_t rttMs)
{
if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
{
if (_driftCount < kMaxDriftJumpCount)
{
// Update the buffer used for the short time
// statistics.
_driftBuf[_driftCount] = rttMs;
_driftCount++;
}
if (_driftCount >= _detectThreshold)
{
// Detected an RTT drift
ShortRttFilter(_driftBuf, _driftCount);
_filtFactCount = _detectThreshold + 1;
_driftCount = 0;
}
}
else
{
_driftCount = 0;
}
return true;
}
void
VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length)
{
if (length == 0)
{
return;
}
_maxRtt = 0;
_avgRtt = 0;
for (uint32_t i=0; i < length; i++)
{
if (buf[i] > _maxRtt)
{
_maxRtt = buf[i];
}
_avgRtt += buf[i];
}
_avgRtt = _avgRtt / static_cast<double>(length);
}
int64_t
VCMRttFilter::RttMs() const
{
return static_cast<int64_t>(_maxRtt + 0.5);
}
} }
} // namespace webrtc

View File

@ -13,56 +13,54 @@
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace webrtc namespace webrtc {
{
class VCMRttFilter class VCMRttFilter {
{ public:
public: VCMRttFilter();
VCMRttFilter();
VCMRttFilter& operator=(const VCMRttFilter& rhs); VCMRttFilter& operator=(const VCMRttFilter& rhs);
// Resets the filter. // Resets the filter.
void Reset(); void Reset();
// Updates the filter with a new sample. // Updates the filter with a new sample.
void Update(int64_t rttMs); void Update(int64_t rttMs);
// A getter function for the current RTT level in ms. // A getter function for the current RTT level in ms.
int64_t RttMs() const; int64_t RttMs() const;
private: private:
// The size of the drift and jump memory buffers // The size of the drift and jump memory buffers
// and thus also the detection threshold for these // and thus also the detection threshold for these
// detectors in number of samples. // detectors in number of samples.
enum { kMaxDriftJumpCount = 5 }; enum { kMaxDriftJumpCount = 5 };
// Detects RTT jumps by comparing the difference between // Detects RTT jumps by comparing the difference between
// samples and average to the standard deviation. // samples and average to the standard deviation.
// Returns true if the long time statistics should be updated // Returns true if the long time statistics should be updated
// and false otherwise // and false otherwise
bool JumpDetection(int64_t rttMs); bool JumpDetection(int64_t rttMs);
// Detects RTT drifts by comparing the difference between // Detects RTT drifts by comparing the difference between
// max and average to the standard deviation. // max and average to the standard deviation.
// Returns true if the long time statistics should be updated // Returns true if the long time statistics should be updated
// and false otherwise // and false otherwise
bool DriftDetection(int64_t rttMs); bool DriftDetection(int64_t rttMs);
// Computes the short time average and maximum of the vector buf. // Computes the short time average and maximum of the vector buf.
void ShortRttFilter(int64_t* buf, uint32_t length); void ShortRttFilter(int64_t* buf, uint32_t length);
bool _gotNonZeroUpdate; bool _gotNonZeroUpdate;
double _avgRtt; double _avgRtt;
double _varRtt; double _varRtt;
int64_t _maxRtt; int64_t _maxRtt;
uint32_t _filtFactCount; uint32_t _filtFactCount;
const uint32_t _filtFactMax; const uint32_t _filtFactMax;
const double _jumpStdDevs; const double _jumpStdDevs;
const double _driftStdDevs; const double _driftStdDevs;
int32_t _jumpCount; int32_t _jumpCount;
int32_t _driftCount; int32_t _driftCount;
const int32_t _detectThreshold; const int32_t _detectThreshold;
int64_t _jumpBuf[kMaxDriftJumpCount]; int64_t _jumpBuf[kMaxDriftJumpCount];
int64_t _driftBuf[kMaxDriftJumpCount]; int64_t _driftBuf[kMaxDriftJumpCount];
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_ #endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_

View File

@ -32,8 +32,7 @@ VCMSessionInfo::VCMSessionInfo()
empty_seq_num_low_(-1), empty_seq_num_low_(-1),
empty_seq_num_high_(-1), empty_seq_num_high_(-1),
first_packet_seq_num_(-1), first_packet_seq_num_(-1),
last_packet_seq_num_(-1) { last_packet_seq_num_(-1) {}
}
void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr, void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
const uint8_t* new_base_ptr) { const uint8_t* new_base_ptr) {
@ -88,8 +87,8 @@ bool VCMSessionInfo::LayerSync() const {
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) { if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync; return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) { } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
return return packets_.front()
packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch; .codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
} else { } else {
return false; return false;
} }
@ -193,9 +192,7 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
while (nalu_ptr < packet_buffer + packet.sizeBytes) { while (nalu_ptr < packet_buffer + packet.sizeBytes) {
size_t length = BufferToUWord16(nalu_ptr); size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength; nalu_ptr += kLengthFieldLength;
frame_buffer_ptr += Insert(nalu_ptr, frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
length,
packet.insertStartCode,
const_cast<uint8_t*>(frame_buffer_ptr)); const_cast<uint8_t*>(frame_buffer_ptr));
nalu_ptr += length; nalu_ptr += length;
} }
@ -203,14 +200,12 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
return packet.sizeBytes; return packet.sizeBytes;
} }
ShiftSubsequentPackets( ShiftSubsequentPackets(
packet_it, packet_it, packet.sizeBytes +
packet.sizeBytes + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
packet.sizeBytes = Insert(packet_buffer, packet.sizeBytes =
packet.sizeBytes, Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
packet.insertStartCode, const_cast<uint8_t*>(packet.dataPtr));
const_cast<uint8_t*>(packet.dataPtr));
return packet.sizeBytes; return packet.sizeBytes;
} }
@ -223,8 +218,7 @@ size_t VCMSessionInfo::Insert(const uint8_t* buffer,
memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes); memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
} }
memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0), memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
buffer, buffer, length);
length);
length += (insert_start_code ? kH264StartCodeLengthBytes : 0); length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
return length; return length;
@ -276,13 +270,12 @@ void VCMSessionInfo::UpdateDecodableSession(const FrameData& frame_data) {
// thresholds. // thresholds.
const float kLowPacketPercentageThreshold = 0.2f; const float kLowPacketPercentageThreshold = 0.2f;
const float kHighPacketPercentageThreshold = 0.8f; const float kHighPacketPercentageThreshold = 0.8f;
if (frame_data.rtt_ms < kRttThreshold if (frame_data.rtt_ms < kRttThreshold || frame_type_ == kVideoFrameKey ||
|| frame_type_ == kVideoFrameKey !HaveFirstPacket() ||
|| !HaveFirstPacket() (NumPackets() <= kHighPacketPercentageThreshold *
|| (NumPackets() <= kHighPacketPercentageThreshold frame_data.rolling_average_packets_per_frame &&
* frame_data.rolling_average_packets_per_frame NumPackets() > kLowPacketPercentageThreshold *
&& NumPackets() > kLowPacketPercentageThreshold frame_data.rolling_average_packets_per_frame))
* frame_data.rolling_average_packets_per_frame))
return; return;
decodable_ = true; decodable_ = true;
@ -308,7 +301,7 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
// Find the end of the NAL unit. // Find the end of the NAL unit.
for (; packet_it != packets_.end(); ++packet_it) { for (; packet_it != packets_.end(); ++packet_it) {
if (((*packet_it).completeNALU == kNaluComplete && if (((*packet_it).completeNALU == kNaluComplete &&
(*packet_it).sizeBytes > 0) || (*packet_it).sizeBytes > 0) ||
// Found next NALU. // Found next NALU.
(*packet_it).completeNALU == kNaluStart) (*packet_it).completeNALU == kNaluStart)
return --packet_it; return --packet_it;
@ -348,7 +341,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
memset(fragmentation->fragmentationLength, 0, memset(fragmentation->fragmentationLength, 0,
kMaxVP8Partitions * sizeof(size_t)); kMaxVP8Partitions * sizeof(size_t));
if (packets_.empty()) if (packets_.empty())
return new_length; return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin()); PacketIterator it = FindNextPartitionBeginning(packets_.begin());
while (it != packets_.end()) { while (it != packets_.end()) {
const int partition_id = const int partition_id =
@ -371,7 +364,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
// Set all empty fragments to start where the previous fragment ends, // Set all empty fragments to start where the previous fragment ends,
// and have zero length. // and have zero length.
if (fragmentation->fragmentationLength[0] == 0) if (fragmentation->fragmentationLength[0] == 0)
fragmentation->fragmentationOffset[0] = 0; fragmentation->fragmentationOffset[0] = 0;
for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) { for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
if (fragmentation->fragmentationLength[i] == 0) if (fragmentation->fragmentationLength[i] == 0)
fragmentation->fragmentationOffset[i] = fragmentation->fragmentationOffset[i] =
@ -379,7 +372,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
fragmentation->fragmentationLength[i - 1]; fragmentation->fragmentationLength[i - 1];
assert(i == 0 || assert(i == 0 ||
fragmentation->fragmentationOffset[i] >= fragmentation->fragmentationOffset[i] >=
fragmentation->fragmentationOffset[i - 1]); fragmentation->fragmentationOffset[i - 1]);
} }
assert(new_length <= frame_buffer_length); assert(new_length <= frame_buffer_length);
return new_length; return new_length;
@ -424,8 +417,8 @@ bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
// If the two iterators are pointing to the same packet they are considered // If the two iterators are pointing to the same packet they are considered
// to be in sequence. // to be in sequence.
return (packet_it == prev_packet_it || return (packet_it == prev_packet_it ||
(static_cast<uint16_t>((*prev_packet_it).seqNum + 1) == (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
(*packet_it).seqNum)); (*packet_it).seqNum));
} }
size_t VCMSessionInfo::MakeDecodable() { size_t VCMSessionInfo::MakeDecodable() {
@ -435,8 +428,7 @@ size_t VCMSessionInfo::MakeDecodable() {
} }
PacketIterator it = packets_.begin(); PacketIterator it = packets_.begin();
// Make sure we remove the first NAL unit if it's not decodable. // Make sure we remove the first NAL unit if it's not decodable.
if ((*it).completeNALU == kNaluIncomplete || if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
(*it).completeNALU == kNaluEnd) {
PacketIterator nalu_end = FindNaluEnd(it); PacketIterator nalu_end = FindNaluEnd(it);
return_length += DeletePacketData(it, nalu_end); return_length += DeletePacketData(it, nalu_end);
it = nalu_end; it = nalu_end;
@ -445,7 +437,7 @@ size_t VCMSessionInfo::MakeDecodable() {
// Take care of the rest of the NAL units. // Take care of the rest of the NAL units.
for (; it != packets_.end(); ++it) { for (; it != packets_.end(); ++it) {
bool start_of_nalu = ((*it).completeNALU == kNaluStart || bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
(*it).completeNALU == kNaluComplete); (*it).completeNALU == kNaluComplete);
if (!start_of_nalu && !InSequence(it, prev_it)) { if (!start_of_nalu && !InSequence(it, prev_it)) {
// Found a sequence number gap due to packet loss. // Found a sequence number gap due to packet loss.
PacketIterator nalu_end = FindNaluEnd(it); PacketIterator nalu_end = FindNaluEnd(it);
@ -463,18 +455,15 @@ void VCMSessionInfo::SetNotDecodableIfIncomplete() {
decodable_ = false; decodable_ = false;
} }
bool bool VCMSessionInfo::HaveFirstPacket() const {
VCMSessionInfo::HaveFirstPacket() const {
return !packets_.empty() && (first_packet_seq_num_ != -1); return !packets_.empty() && (first_packet_seq_num_ != -1);
} }
bool bool VCMSessionInfo::HaveLastPacket() const {
VCMSessionInfo::HaveLastPacket() const {
return !packets_.empty() && (last_packet_seq_num_ != -1); return !packets_.empty() && (last_packet_seq_num_ != -1);
} }
bool bool VCMSessionInfo::session_nack() const {
VCMSessionInfo::session_nack() const {
return session_nack_; return session_nack_;
} }
@ -502,8 +491,8 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
break; break;
// Check for duplicate packets. // Check for duplicate packets.
if (rit != packets_.rend() && if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
(*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0) (*rit).sizeBytes > 0)
return -2; return -2;
if (packet.codec == kVideoCodecH264) { if (packet.codec == kVideoCodecH264) {
@ -572,8 +561,8 @@ void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
empty_seq_num_high_ = seq_num; empty_seq_num_high_ = seq_num;
else else
empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_); empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
if (empty_seq_num_low_ == -1 || IsNewerSequenceNumber(empty_seq_num_low_, if (empty_seq_num_low_ == -1 ||
seq_num)) IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
empty_seq_num_low_ = seq_num; empty_seq_num_low_ = seq_num;
} }

View File

@ -116,8 +116,7 @@ class VCMSessionInfo {
PacketIterator FindPartitionEnd(PacketIterator it) const; PacketIterator FindPartitionEnd(PacketIterator it) const;
static bool InSequence(const PacketIterator& it, static bool InSequence(const PacketIterator& it,
const PacketIterator& prev_it); const PacketIterator& prev_it);
size_t InsertBuffer(uint8_t* frame_buffer, size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer, size_t Insert(const uint8_t* buffer,
size_t length, size_t length,
bool insert_start_code, bool insert_start_code,
@ -126,8 +125,7 @@ class VCMSessionInfo {
PacketIterator FindNaluEnd(PacketIterator packet_iter) const; PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively. // Deletes the data of all packets between |start| and |end|, inclusively.
// Note that this function doesn't delete the actual packets. // Note that this function doesn't delete the actual packets.
size_t DeletePacketData(PacketIterator start, size_t DeletePacketData(PacketIterator start, PacketIterator end);
PacketIterator end);
void UpdateCompleteSession(); void UpdateCompleteSession();
// When enabled, determine if session is decodable, i.e. incomplete but // When enabled, determine if session is decodable, i.e. incomplete but

View File

@ -81,7 +81,7 @@ class TestVP8Partitions : public TestSessionInfo {
fragmentation_.fragmentationLength[partition_id]); fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) { for (int i = 0; i < packets_expected; ++i) {
size_t packet_index = fragmentation_.fragmentationOffset[partition_id] + size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
i * packet_buffer_size(); i * packet_buffer_size();
if (packet_index + packet_buffer_size() > frame_buffer_size()) if (packet_index + packet_buffer_size() > frame_buffer_size())
return false; return false;
VerifyPacket(frame_buffer_ + packet_index, start_value + i); VerifyPacket(frame_buffer_ + packet_index, start_value + i);
@ -122,8 +122,7 @@ class TestNackList : public TestSessionInfo {
memset(seq_num_list_, 0, sizeof(seq_num_list_)); memset(seq_num_list_, 0, sizeof(seq_num_list_));
} }
void BuildSeqNumList(uint16_t low, void BuildSeqNumList(uint16_t low, uint16_t high) {
uint16_t high) {
size_t i = 0; size_t i = 0;
while (low != high + 1) { while (low != high + 1) {
EXPECT_LT(i, kMaxSeqNumListLength); EXPECT_LT(i, kMaxSeqNumListLength);
@ -173,14 +172,11 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
// To make things more difficult we will make sure to have a wrap here. // To make things more difficult we will make sure to have a wrap here.
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = true; packet_.markerBit = true;
packet_.seqNum = 2; packet_.seqNum = 2;
packet_.sizeBytes = 0; packet_.sizeBytes = 0;
packet_.frameType = kEmptyFrame; packet_.frameType = kEmptyFrame;
EXPECT_EQ(0, EXPECT_EQ(
session_.InsertPacket(packet_, 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
frame_buffer_,
kNoErrors,
frame_data));
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber()); EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
} }
@ -198,9 +194,8 @@ TEST_F(TestSessionInfo, NormalOperation) {
packet_.seqNum += 1; packet_.seqNum += 1;
FillPacket(i); FillPacket(i);
ASSERT_EQ(packet_buffer_size(), ASSERT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kNoErrors, packet_, frame_buffer_, kNoErrors, frame_data)));
frame_data)));
} }
packet_.seqNum += 1; packet_.seqNum += 1;
@ -223,9 +218,8 @@ TEST_F(TestSessionInfo, ErrorsEqualDecodableState) {
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(3); FillPacket(3);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kWithErrors, packet_, frame_buffer_, kWithErrors, frame_data)));
frame_data)));
EXPECT_TRUE(session_.decodable()); EXPECT_TRUE(session_.decodable());
} }
@ -237,18 +231,16 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
frame_data.rolling_average_packets_per_frame = 11; frame_data.rolling_average_packets_per_frame = 11;
frame_data.rtt_ms = 150; frame_data.rtt_ms = 150;
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kSelectiveErrors, packet_, frame_buffer_, kSelectiveErrors, frame_data)));
frame_data)));
EXPECT_FALSE(session_.decodable()); EXPECT_FALSE(session_.decodable());
packet_.seqNum -= 1; packet_.seqNum -= 1;
FillPacket(0); FillPacket(0);
packet_.isFirstPacket = true; packet_.isFirstPacket = true;
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kSelectiveErrors, packet_, frame_buffer_, kSelectiveErrors, frame_data)));
frame_data)));
EXPECT_TRUE(session_.decodable()); EXPECT_TRUE(session_.decodable());
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
@ -256,19 +248,17 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
for (int i = 2; i < 8; ++i) { for (int i = 2; i < 8; ++i) {
packet_.seqNum += 1; packet_.seqNum += 1;
FillPacket(i); FillPacket(i);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kSelectiveErrors, packet_, frame_buffer_, kSelectiveErrors, frame_data)));
frame_data)));
EXPECT_TRUE(session_.decodable()); EXPECT_TRUE(session_.decodable());
} }
packet_.seqNum += 1; packet_.seqNum += 1;
FillPacket(8); FillPacket(8);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_, static_cast<size_t>(session_.InsertPacket(
kSelectiveErrors, packet_, frame_buffer_, kSelectiveErrors, frame_data)));
frame_data)));
EXPECT_TRUE(session_.decodable()); EXPECT_TRUE(session_.decodable());
} }
@ -285,18 +275,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
packet_.isFirstPacket = true; packet_.isFirstPacket = true;
packet_.markerBit = true; packet_.markerBit = true;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
packet_.seqNum = 0x0000; packet_.seqNum = 0x0000;
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
} }
TEST_F(TestSessionInfo, SetMarkerBitOnce) { TEST_F(TestSessionInfo, SetMarkerBitOnce) {
@ -311,10 +297,8 @@ TEST_F(TestSessionInfo, SetMarkerBitOnce) {
packet_.isFirstPacket = true; packet_.isFirstPacket = true;
packet_.markerBit = true; packet_.markerBit = true;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
} }
TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) { TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
@ -331,10 +315,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = true; packet_.isFirstPacket = true;
packet_.markerBit = true; packet_.markerBit = true;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
packet_.seqNum = 0x0006; packet_.seqNum = 0x0006;
packet_.isFirstPacket = true; packet_.isFirstPacket = true;
packet_.markerBit = true; packet_.markerBit = true;
@ -346,10 +328,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = true; packet_.markerBit = true;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
} }
TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) { TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
@ -379,20 +359,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, EXPECT_EQ(
session_.InsertPacket(packet_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
frame_buffer_,
kNoErrors,
frame_data));
packet_.seqNum = 0x0006; packet_.seqNum = 0x0006;
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, EXPECT_EQ(
session_.InsertPacket(packet_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
frame_buffer_,
kNoErrors,
frame_data));
} }
TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) { TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
@ -417,10 +391,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
packet_.seqNum = 0x0010; packet_.seqNum = 0x0010;
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
@ -440,10 +412,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false; packet_.isFirstPacket = false;
packet_.markerBit = false; packet_.markerBit = false;
FillPacket(1); FillPacket(1);
EXPECT_EQ(-3, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
} }
TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) { TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
@ -455,8 +425,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0; packet_header_.header.sequenceNumber = 0;
FillPacket(0); FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -505,8 +475,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1; packet_header_.header.sequenceNumber = 1;
FillPacket(1); FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -567,8 +537,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd; packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0); FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -629,8 +599,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd; packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0); FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -682,7 +652,6 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
EXPECT_TRUE(VerifyPartition(1, 1, 2)); EXPECT_TRUE(VerifyPartition(1, 1, 2));
} }
TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) { TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
// Partition 1 |Partition 2 | Partition 3 // Partition 1 |Partition 2 | Partition 3
// [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ] // [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
@ -692,8 +661,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1; packet_header_.header.sequenceNumber = 1;
FillPacket(1); FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -754,8 +723,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1; packet_header_.header.sequenceNumber = 1;
FillPacket(1); FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -767,8 +736,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1; packet_header_.header.sequenceNumber += 1;
FillPacket(2); FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -841,8 +809,8 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.markerBit = false; packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0; packet_header_.header.sequenceNumber = 0;
FillPacket(0); FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(), VCMPacket* packet =
packet_header_); new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(), EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_, static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data))); kNoErrors, frame_data)));
@ -892,10 +860,8 @@ TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
packet_.sizeBytes = 0; packet_.sizeBytes = 0;
packet_.seqNum = 0; packet_.seqNum = 0;
packet_.markerBit = false; packet_.markerBit = false;
EXPECT_EQ(0, session_.InsertPacket(packet_, EXPECT_EQ(
frame_buffer_, 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
kNoErrors,
frame_data));
EXPECT_EQ(0U, session_.MakeDecodable()); EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(0U, session_.SessionLength()); EXPECT_EQ(0U, session_.SessionLength());

View File

@ -31,7 +31,7 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
_receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()), _receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_timing(clock_), _timing(clock_),
_receiver(&_timing, clock_, event_factory), _receiver(&_timing, clock_, event_factory),
_decodedFrameCallback(_timing, clock_), _decodedFrameCallback(&_timing, clock_),
_frameTypeCallback(NULL), _frameTypeCallback(NULL),
_receiveStatsCallback(NULL), _receiveStatsCallback(NULL),
_decoderTimingCallback(NULL), _decoderTimingCallback(NULL),
@ -84,20 +84,12 @@ int32_t VideoReceiver::Process() {
int jitter_buffer_ms; int jitter_buffer_ms;
int min_playout_delay_ms; int min_playout_delay_ms;
int render_delay_ms; int render_delay_ms;
_timing.GetTimings(&decode_ms, _timing.GetTimings(&decode_ms, &max_decode_ms, &current_delay_ms,
&max_decode_ms, &target_delay_ms, &jitter_buffer_ms,
&current_delay_ms, &min_playout_delay_ms, &render_delay_ms);
&target_delay_ms, _decoderTimingCallback->OnDecoderTiming(
&jitter_buffer_ms, decode_ms, max_decode_ms, current_delay_ms, target_delay_ms,
&min_playout_delay_ms, jitter_buffer_ms, min_playout_delay_ms, render_delay_ms);
&render_delay_ms);
_decoderTimingCallback->OnDecoderTiming(decode_ms,
max_decode_ms,
current_delay_ms,
target_delay_ms,
jitter_buffer_ms,
min_playout_delay_ms,
render_delay_ms);
} }
// Size of render buffer. // Size of render buffer.
@ -285,7 +277,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
} }
VCMEncodedFrame* frame = _receiver.FrameForDecoding( VCMEncodedFrame* frame = _receiver.FrameForDecoding(
maxWaitTimeMs, nextRenderTimeMs, prefer_late_decoding); maxWaitTimeMs, &nextRenderTimeMs, prefer_late_decoding);
if (!frame) if (!frame)
return VCM_FRAME_NOT_READY; return VCM_FRAME_NOT_READY;
@ -353,12 +345,8 @@ int32_t VideoReceiver::RequestKeyFrame() {
// Must be called from inside the receive side critical section. // Must be called from inside the receive side critical section.
int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) { int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
TRACE_EVENT_ASYNC_STEP1("webrtc", TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame.TimeStamp(), "Decode",
"Video", "type", frame.FrameType());
frame.TimeStamp(),
"Decode",
"type",
frame.FrameType());
// Change decoder if payload type has changed // Change decoder if payload type has changed
_decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback); _decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
if (_decoder == NULL) { if (_decoder == NULL) {
@ -419,8 +407,8 @@ int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
if (receiveCodec == NULL) { if (receiveCodec == NULL) {
return VCM_PARAMETER_ERROR; return VCM_PARAMETER_ERROR;
} }
if (!_codecDataBase.RegisterReceiveCodec( if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores,
receiveCodec, numberOfCores, requireKeyFrame)) { requireKeyFrame)) {
return -1; return -1;
} }
return 0; return 0;
@ -446,9 +434,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength, size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) { const WebRtcRTPHeader& rtpInfo) {
if (rtpInfo.frameType == kVideoFrameKey) { if (rtpInfo.frameType == kVideoFrameKey) {
TRACE_EVENT1("webrtc", TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
"VCM::PacketKeyFrame",
"seqnum",
rtpInfo.header.sequenceNumber); rtpInfo.header.sequenceNumber);
} }
if (incomingPayload == NULL) { if (incomingPayload == NULL) {
@ -487,7 +473,9 @@ int32_t VideoReceiver::SetRenderDelay(uint32_t timeMS) {
} }
// Current video delay // Current video delay
int32_t VideoReceiver::Delay() const { return _timing.TargetVideoDelay(); } int32_t VideoReceiver::Delay() const {
return _timing.TargetVideoDelay();
}
uint32_t VideoReceiver::DiscardedPackets() const { uint32_t VideoReceiver::DiscardedPackets() const {
return _receiver.DiscardedPackets(); return _receiver.DiscardedPackets();
@ -543,8 +531,8 @@ void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
CriticalSectionScoped process_cs(process_crit_sect_.get()); CriticalSectionScoped process_cs(process_crit_sect_.get());
max_nack_list_size_ = max_nack_list_size; max_nack_list_size_ = max_nack_list_size;
} }
_receiver.SetNackSettings( _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms); max_incomplete_time_ms);
} }
int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) { int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {