Move src/ -> webrtc/
TBR=niklas.enbom@webrtc.org Review URL: https://webrtc-codereview.appspot.com/915006 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2963 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
70
webrtc/modules/video_coding/main/source/Android.mk
Normal file
70
webrtc/modules/video_coding/main/source/Android.mk
Normal file
@ -0,0 +1,70 @@
|
||||
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
LOCAL_PATH := $(call my-dir)
|
||||
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
include $(LOCAL_PATH)/../../../../../android-webrtc.mk
|
||||
|
||||
LOCAL_ARM_MODE := arm
|
||||
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
|
||||
LOCAL_MODULE := libwebrtc_video_coding
|
||||
LOCAL_MODULE_TAGS := optional
|
||||
LOCAL_CPP_EXTENSION := .cc
|
||||
LOCAL_SRC_FILES := \
|
||||
codec_database.cc \
|
||||
codec_timer.cc \
|
||||
content_metrics_processing.cc \
|
||||
decoding_state.cc \
|
||||
encoded_frame.cc \
|
||||
exp_filter.cc \
|
||||
frame_buffer.cc \
|
||||
frame_dropper.cc \
|
||||
generic_decoder.cc \
|
||||
generic_encoder.cc \
|
||||
inter_frame_delay.cc \
|
||||
jitter_buffer.cc \
|
||||
jitter_buffer_common.cc \
|
||||
jitter_estimator.cc \
|
||||
media_opt_util.cc \
|
||||
media_optimization.cc \
|
||||
packet.cc \
|
||||
qm_select.cc \
|
||||
receiver.cc \
|
||||
rtt_filter.cc \
|
||||
session_info.cc \
|
||||
timestamp_extrapolator.cc \
|
||||
timestamp_map.cc \
|
||||
timing.cc \
|
||||
video_coding_impl.cc
|
||||
|
||||
# Flags passed to both C and C++ files.
|
||||
LOCAL_CFLAGS := \
|
||||
$(MY_WEBRTC_COMMON_DEFS)
|
||||
|
||||
LOCAL_C_INCLUDES := \
|
||||
$(LOCAL_PATH)/../interface \
|
||||
$(LOCAL_PATH)/../../codecs/interface \
|
||||
$(LOCAL_PATH)/../../codecs/i420/main/interface \
|
||||
$(LOCAL_PATH)/../../codecs/vp8/main/interface \
|
||||
$(LOCAL_PATH)/../../../interface \
|
||||
$(LOCAL_PATH)/../../../.. \
|
||||
$(LOCAL_PATH)/../../../../common_video/vplib/main/interface \
|
||||
$(LOCAL_PATH)/../../../../common_video/interface \
|
||||
$(LOCAL_PATH)/../../../../system_wrappers/interface
|
||||
|
||||
LOCAL_SHARED_LIBRARIES := \
|
||||
libcutils \
|
||||
libdl \
|
||||
libstlport
|
||||
|
||||
ifndef NDK_ROOT
|
||||
include external/stlport/libstlport.mk
|
||||
endif
|
||||
include $(BUILD_STATIC_LIBRARY)
|
||||
575
webrtc/modules/video_coding/main/source/codec_database.cc
Normal file
575
webrtc/modules/video_coding/main/source/codec_database.cc
Normal file
@ -0,0 +1,575 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/codec_database.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "engine_configurations.h"
|
||||
#ifdef VIDEOCODEC_I420
|
||||
#include "modules/video_coding/codecs/i420/main/interface/i420.h"
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||
#endif
|
||||
#include "modules/video_coding/main/source/internal_defines.h"
|
||||
#include "system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
|
||||
int number_of_cores,
|
||||
bool require_key_frame)
|
||||
: settings(settings),
|
||||
number_of_cores(number_of_cores),
|
||||
require_key_frame(require_key_frame) {
|
||||
assert(number_of_cores >= 0);
|
||||
}
|
||||
|
||||
VCMExtDecoderMapItem::VCMExtDecoderMapItem(
|
||||
VideoDecoder* external_decoder_instance,
|
||||
uint8_t payload_type,
|
||||
bool internal_render_timing)
|
||||
: payload_type(payload_type),
|
||||
external_decoder_instance(external_decoder_instance),
|
||||
internal_render_timing(internal_render_timing) {
|
||||
}
|
||||
|
||||
VCMCodecDataBase::VCMCodecDataBase(int id)
|
||||
: id_(id),
|
||||
number_of_cores_(0),
|
||||
max_payload_size_(kDefaultPayloadSize),
|
||||
periodic_key_frames_(false),
|
||||
current_enc_is_external_(false),
|
||||
send_codec_(),
|
||||
receive_codec_(),
|
||||
external_payload_type_(0),
|
||||
external_encoder_(NULL),
|
||||
internal_source_(false),
|
||||
ptr_encoder_(NULL),
|
||||
ptr_decoder_(NULL),
|
||||
current_dec_is_external_(false),
|
||||
dec_map_(),
|
||||
dec_external_map_() {
|
||||
}
|
||||
|
||||
VCMCodecDataBase::~VCMCodecDataBase() {
|
||||
ResetSender();
|
||||
ResetReceiver();
|
||||
}
|
||||
|
||||
int VCMCodecDataBase::NumberOfCodecs() {
|
||||
return VCM_NUM_VIDEO_CODECS_AVAILABLE;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::Codec(int list_id,
|
||||
VideoCodec* settings) {
|
||||
if (!settings) {
|
||||
return false;
|
||||
}
|
||||
if (list_id >= VCM_NUM_VIDEO_CODECS_AVAILABLE) {
|
||||
return false;
|
||||
}
|
||||
memset(settings, 0, sizeof(VideoCodec));
|
||||
switch (list_id) {
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case VCM_VP8_IDX: {
|
||||
strncpy(settings->plName, "VP8", 4);
|
||||
settings->codecType = kVideoCodecVP8;
|
||||
// 96 to 127 dynamic payload types for video codecs.
|
||||
settings->plType = VCM_VP8_PAYLOAD_TYPE;
|
||||
settings->startBitrate = 100;
|
||||
settings->minBitrate = VCM_MIN_BITRATE;
|
||||
settings->maxBitrate = 0;
|
||||
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
|
||||
settings->width = VCM_DEFAULT_CODEC_WIDTH;
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->numberOfSimulcastStreams = 0;
|
||||
settings->codecSpecific.VP8.resilience = kResilientStream;
|
||||
settings->codecSpecific.VP8.numberOfTemporalLayers = 1;
|
||||
settings->codecSpecific.VP8.denoisingOn = true;
|
||||
settings->codecSpecific.VP8.errorConcealmentOn = false;
|
||||
settings->codecSpecific.VP8.automaticResizeOn = false;
|
||||
settings->codecSpecific.VP8.frameDroppingOn = true;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case VCM_I420_IDX: {
|
||||
strncpy(settings->plName, "I420", 5);
|
||||
settings->codecType = kVideoCodecI420;
|
||||
// 96 to 127 dynamic payload types for video codecs.
|
||||
settings->plType = VCM_I420_PAYLOAD_TYPE;
|
||||
// Bitrate needed for this size and framerate.
|
||||
settings->startBitrate = 3 * VCM_DEFAULT_CODEC_WIDTH *
|
||||
VCM_DEFAULT_CODEC_HEIGHT * 8 *
|
||||
VCM_DEFAULT_FRAME_RATE / 1000 / 2;
|
||||
settings->maxBitrate = settings->startBitrate;
|
||||
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
|
||||
settings->width = VCM_DEFAULT_CODEC_WIDTH;
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->minBitrate = VCM_MIN_BITRATE;
|
||||
settings->numberOfSimulcastStreams = 0;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
default: {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::Codec(VideoCodecType codec_type,
|
||||
VideoCodec* settings) {
|
||||
for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++) {
|
||||
const bool ret = VCMCodecDataBase::Codec(i, settings);
|
||||
if (!ret) {
|
||||
return false;
|
||||
}
|
||||
if (codec_type == settings->codecType) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::ResetSender() {
|
||||
DeleteEncoder();
|
||||
periodic_key_frames_ = false;
|
||||
}
|
||||
|
||||
// Assuming only one registered encoder - since only one used, no need for more.
|
||||
bool VCMCodecDataBase::RegisterSendCodec(
|
||||
const VideoCodec* send_codec,
|
||||
int number_of_cores,
|
||||
int max_payload_size) {
|
||||
if (!send_codec) {
|
||||
return false;
|
||||
}
|
||||
if (max_payload_size <= 0) {
|
||||
max_payload_size = kDefaultPayloadSize;
|
||||
}
|
||||
if (number_of_cores < 0 || number_of_cores > 32) {
|
||||
return false;
|
||||
}
|
||||
if (send_codec->plType <= 0) {
|
||||
return false;
|
||||
}
|
||||
// Make sure the start bit rate is sane...
|
||||
if (send_codec->startBitrate > 1000000) {
|
||||
return false;
|
||||
}
|
||||
if (send_codec->codecType == kVideoCodecUnknown) {
|
||||
return false;
|
||||
}
|
||||
number_of_cores_ = number_of_cores;
|
||||
max_payload_size_ = max_payload_size;
|
||||
|
||||
memcpy(&send_codec_, send_codec, sizeof(VideoCodec));
|
||||
|
||||
if (send_codec_.maxBitrate == 0) {
|
||||
// max is one bit per pixel
|
||||
send_codec_.maxBitrate = (static_cast<int>(send_codec_.height) *
|
||||
static_cast<int>(send_codec_.width) *
|
||||
static_cast<int>(send_codec_.maxFramerate)) / 1000;
|
||||
if (send_codec_.startBitrate > send_codec_.maxBitrate) {
|
||||
// But if the user tries to set a higher start bit rate we will
|
||||
// increase the max accordingly.
|
||||
send_codec_.maxBitrate = send_codec_.startBitrate;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::SendCodec(VideoCodec* current_send_codec) const {
|
||||
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(id_),
|
||||
"SendCodec");
|
||||
if (!ptr_encoder_) {
|
||||
return false;
|
||||
}
|
||||
memcpy(current_send_codec, &send_codec_, sizeof(VideoCodec));
|
||||
return true;
|
||||
}
|
||||
|
||||
VideoCodecType VCMCodecDataBase::SendCodec() const {
|
||||
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(id_),
|
||||
"SendCodec type");
|
||||
if (!ptr_encoder_) {
|
||||
return kVideoCodecUnknown;
|
||||
}
|
||||
return send_codec_.codecType;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DeregisterExternalEncoder(
|
||||
uint8_t payload_type, bool* was_send_codec) {
|
||||
assert(was_send_codec);
|
||||
*was_send_codec = false;
|
||||
if (external_payload_type_ != payload_type) {
|
||||
return false;
|
||||
}
|
||||
if (send_codec_.plType == payload_type) {
|
||||
// De-register as send codec if needed.
|
||||
DeleteEncoder();
|
||||
memset(&send_codec_, 0, sizeof(VideoCodec));
|
||||
current_enc_is_external_ = false;
|
||||
*was_send_codec = true;
|
||||
}
|
||||
external_payload_type_ = 0;
|
||||
external_encoder_ = NULL;
|
||||
internal_source_ = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::RegisterExternalEncoder(
|
||||
VideoEncoder* external_encoder,
|
||||
uint8_t payload_type,
|
||||
bool internal_source) {
|
||||
// Since only one encoder can be used at a given time, only one external
|
||||
// encoder can be registered/used.
|
||||
external_encoder_ = external_encoder;
|
||||
external_payload_type_ = payload_type;
|
||||
internal_source_ = internal_source;
|
||||
}
|
||||
|
||||
VCMGenericEncoder* VCMCodecDataBase::GetEncoder(
|
||||
const VideoCodec* settings,
|
||||
VCMEncodedFrameCallback* encoded_frame_callback) {
|
||||
// If encoder exists, will destroy it and create new one.
|
||||
DeleteEncoder();
|
||||
if (settings->plType == external_payload_type_) {
|
||||
// External encoder.
|
||||
ptr_encoder_ = new VCMGenericEncoder(*external_encoder_, internal_source_);
|
||||
current_enc_is_external_ = true;
|
||||
} else {
|
||||
ptr_encoder_ = CreateEncoder(settings->codecType);
|
||||
current_enc_is_external_ = false;
|
||||
}
|
||||
encoded_frame_callback->SetPayloadType(settings->plType);
|
||||
if (!ptr_encoder_) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceVideoCoding,
|
||||
VCMId(id_),
|
||||
"Failed to create encoder: %s.",
|
||||
settings->plName);
|
||||
return NULL;
|
||||
}
|
||||
if (ptr_encoder_->InitEncode(settings, number_of_cores_, max_payload_size_) <
|
||||
0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceVideoCoding,
|
||||
VCMId(id_),
|
||||
"Failed to initialize encoder: %s.",
|
||||
settings->plName);
|
||||
DeleteEncoder();
|
||||
return NULL;
|
||||
} else if (ptr_encoder_->RegisterEncodeCallback(encoded_frame_callback) <
|
||||
0) {
|
||||
DeleteEncoder();
|
||||
return NULL;
|
||||
}
|
||||
// Intentionally don't check return value since the encoder registration
|
||||
// shouldn't fail because the codec doesn't support changing the periodic key
|
||||
// frame setting.
|
||||
ptr_encoder_->SetPeriodicKeyFrames(periodic_key_frames_);
|
||||
return ptr_encoder_;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::SetPeriodicKeyFrames(bool enable) {
|
||||
periodic_key_frames_ = enable;
|
||||
if (ptr_encoder_) {
|
||||
return (ptr_encoder_->SetPeriodicKeyFrames(periodic_key_frames_) == 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::ResetReceiver() {
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = NULL;
|
||||
memset(&receive_codec_, 0, sizeof(VideoCodec));
|
||||
while (!dec_map_.empty()) {
|
||||
DecoderMap::iterator it = dec_map_.begin();
|
||||
delete (*it).second;
|
||||
dec_map_.erase(it);
|
||||
}
|
||||
while (!dec_external_map_.empty()) {
|
||||
ExternalDecoderMap::iterator external_it = dec_external_map_.begin();
|
||||
delete (*external_it).second;
|
||||
dec_external_map_.erase(external_it);
|
||||
}
|
||||
current_dec_is_external_ = false;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
|
||||
ExternalDecoderMap::iterator it = dec_external_map_.find(payload_type);
|
||||
if (it == dec_external_map_.end()) {
|
||||
// Not found
|
||||
return false;
|
||||
}
|
||||
if (receive_codec_.plType == payload_type) {
|
||||
// Release it if it was registered and in use.
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = NULL;
|
||||
}
|
||||
DeregisterReceiveCodec(payload_type);
|
||||
delete (*it).second;
|
||||
dec_external_map_.erase(it);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Add the external encoder object to the list of external decoders.
|
||||
// Won't be registered as a receive codec until RegisterReceiveCodec is called.
|
||||
bool VCMCodecDataBase::RegisterExternalDecoder(
|
||||
VideoDecoder* external_decoder,
|
||||
uint8_t payload_type,
|
||||
bool internal_render_timing) {
|
||||
// Check if payload value already exists, if so - erase old and insert new.
|
||||
VCMExtDecoderMapItem* ext_decoder = new VCMExtDecoderMapItem(
|
||||
external_decoder, payload_type, internal_render_timing);
|
||||
if (!ext_decoder) {
|
||||
return false;
|
||||
}
|
||||
DeregisterExternalDecoder(payload_type);
|
||||
dec_external_map_[payload_type] = ext_decoder;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DecoderRegistered() const {
|
||||
return !dec_map_.empty();
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::RegisterReceiveCodec(
|
||||
const VideoCodec* receive_codec,
|
||||
int number_of_cores,
|
||||
bool require_key_frame) {
|
||||
if (number_of_cores < 0) {
|
||||
return false;
|
||||
}
|
||||
WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCoding, VCMId(id_),
|
||||
"Codec: %s, Payload type %d, Height %d, Width %d, Bitrate %d,"
|
||||
"Framerate %d.",
|
||||
receive_codec->plName, receive_codec->plType,
|
||||
receive_codec->height, receive_codec->width,
|
||||
receive_codec->startBitrate, receive_codec->maxFramerate);
|
||||
// Check if payload value already exists, if so - erase old and insert new.
|
||||
DeregisterReceiveCodec(receive_codec->plType);
|
||||
if (receive_codec->codecType == kVideoCodecUnknown) {
|
||||
return false;
|
||||
}
|
||||
VideoCodec* new_receive_codec = new VideoCodec(*receive_codec);
|
||||
dec_map_[receive_codec->plType] = new VCMDecoderMapItem(new_receive_codec,
|
||||
number_of_cores,
|
||||
require_key_frame);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DeregisterReceiveCodec(
|
||||
uint8_t payload_type) {
|
||||
DecoderMap::iterator it = dec_map_.find(payload_type);
|
||||
if (it == dec_map_.end()) {
|
||||
return false;
|
||||
}
|
||||
VCMDecoderMapItem* dec_item = (*it).second;
|
||||
delete dec_item;
|
||||
dec_map_.erase(it);
|
||||
if (receive_codec_.plType == payload_type) {
|
||||
// This codec is currently in use.
|
||||
memset(&receive_codec_, 0, sizeof(VideoCodec));
|
||||
current_dec_is_external_ = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::ReceiveCodec(VideoCodec* current_receive_codec) const {
|
||||
assert(current_receive_codec);
|
||||
if (!ptr_decoder_) {
|
||||
return false;
|
||||
}
|
||||
memcpy(current_receive_codec, &receive_codec_, sizeof(VideoCodec));
|
||||
return true;
|
||||
}
|
||||
|
||||
VideoCodecType VCMCodecDataBase::ReceiveCodec() const {
|
||||
if (!ptr_decoder_) {
|
||||
return kVideoCodecUnknown;
|
||||
}
|
||||
return receive_codec_.codecType;
|
||||
}
|
||||
|
||||
VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
|
||||
uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback) {
|
||||
if (payload_type == receive_codec_.plType || payload_type == 0) {
|
||||
return ptr_decoder_;
|
||||
}
|
||||
// Check for exisitng decoder, if exists - delete.
|
||||
if (ptr_decoder_) {
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = NULL;
|
||||
memset(&receive_codec_, 0, sizeof(VideoCodec));
|
||||
}
|
||||
ptr_decoder_ = CreateAndInitDecoder(payload_type, &receive_codec_,
|
||||
¤t_dec_is_external_);
|
||||
if (!ptr_decoder_) {
|
||||
return NULL;
|
||||
}
|
||||
if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
|
||||
< 0) {
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = NULL;
|
||||
memset(&receive_codec_, 0, sizeof(VideoCodec));
|
||||
return NULL;
|
||||
}
|
||||
return ptr_decoder_;
|
||||
}
|
||||
|
||||
VCMGenericDecoder* VCMCodecDataBase::CreateDecoderCopy() const {
|
||||
if (!ptr_decoder_) {
|
||||
return NULL;
|
||||
}
|
||||
VideoDecoder* decoder_copy = ptr_decoder_->_decoder.Copy();
|
||||
if (!decoder_copy) {
|
||||
return NULL;
|
||||
}
|
||||
return new VCMGenericDecoder(*decoder_copy, id_, ptr_decoder_->External());
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const {
|
||||
if (decoder) {
|
||||
assert(&decoder->_decoder);
|
||||
decoder->Release();
|
||||
if (!decoder->External()) {
|
||||
delete &decoder->_decoder;
|
||||
}
|
||||
delete decoder;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::CopyDecoder(const VCMGenericDecoder& decoder) {
|
||||
VideoDecoder* decoder_copy = decoder._decoder.Copy();
|
||||
if (decoder_copy) {
|
||||
VCMDecodedFrameCallback* cb = ptr_decoder_->_callback;
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = new VCMGenericDecoder(*decoder_copy, id_,
|
||||
decoder.External());
|
||||
if (cb && ptr_decoder_->RegisterDecodeCompleteCallback(cb)) {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::SupportsRenderScheduling() const {
|
||||
bool render_timing = true;
|
||||
if (current_dec_is_external_) {
|
||||
const VCMExtDecoderMapItem* ext_item = FindExternalDecoderItem(
|
||||
receive_codec_.plType);
|
||||
render_timing = ext_item->internal_render_timing;
|
||||
}
|
||||
return render_timing;
|
||||
}
|
||||
|
||||
VCMGenericDecoder* VCMCodecDataBase::CreateAndInitDecoder(
|
||||
uint8_t payload_type,
|
||||
VideoCodec* new_codec,
|
||||
bool* external) const {
|
||||
assert(external);
|
||||
assert(new_codec);
|
||||
const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
|
||||
if (!decoder_item) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(id_),
|
||||
"Unknown payload type: %u", payload_type);
|
||||
return NULL;
|
||||
}
|
||||
VCMGenericDecoder* ptr_decoder = NULL;
|
||||
const VCMExtDecoderMapItem* external_dec_item = FindExternalDecoderItem(
|
||||
payload_type);
|
||||
if (external_dec_item) {
|
||||
// External codec.
|
||||
ptr_decoder = new VCMGenericDecoder(
|
||||
*external_dec_item->external_decoder_instance, id_, true);
|
||||
*external = true;
|
||||
} else {
|
||||
// Create decoder.
|
||||
ptr_decoder = CreateDecoder(decoder_item->settings->codecType);
|
||||
*external = false;
|
||||
}
|
||||
if (!ptr_decoder) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ptr_decoder->InitDecode(decoder_item->settings.get(),
|
||||
decoder_item->number_of_cores,
|
||||
decoder_item->require_key_frame) < 0) {
|
||||
ReleaseDecoder(ptr_decoder);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec));
|
||||
return ptr_decoder;
|
||||
}
|
||||
|
||||
VCMGenericEncoder* VCMCodecDataBase::CreateEncoder(
|
||||
const VideoCodecType type) const {
|
||||
switch (type) {
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case kVideoCodecVP8:
|
||||
return new VCMGenericEncoder(*(VP8Encoder::Create()));
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case kVideoCodecI420:
|
||||
return new VCMGenericEncoder(*(new I420Encoder));
|
||||
#endif
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::DeleteEncoder() {
|
||||
if (ptr_encoder_) {
|
||||
ptr_encoder_->Release();
|
||||
if (!current_enc_is_external_) {
|
||||
delete &ptr_encoder_->_encoder;
|
||||
}
|
||||
delete ptr_encoder_;
|
||||
ptr_encoder_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
VCMGenericDecoder* VCMCodecDataBase::CreateDecoder(VideoCodecType type) const {
|
||||
switch (type) {
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case kVideoCodecVP8:
|
||||
return new VCMGenericDecoder(*(VP8Decoder::Create()), id_);
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case kVideoCodecI420:
|
||||
return new VCMGenericDecoder(*(new I420Decoder), id_);
|
||||
#endif
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
|
||||
uint8_t payload_type) const {
|
||||
DecoderMap::const_iterator it = dec_map_.find(payload_type);
|
||||
if (it != dec_map_.end()) {
|
||||
return (*it).second;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
|
||||
uint8_t payload_type) const {
|
||||
ExternalDecoderMap::const_iterator it = dec_external_map_.find(payload_type);
|
||||
if (it != dec_external_map_.end()) {
|
||||
return (*it).second;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
} // namespace webrtc
|
||||
200
webrtc/modules/video_coding/main/source/codec_database.h
Normal file
200
webrtc/modules/video_coding/main/source/codec_database.h
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
#include "modules/video_coding/main/source/generic_decoder.h"
|
||||
#include "modules/video_coding/main/source/generic_encoder.h"
|
||||
#include "system_wrappers/interface/scoped_ptr.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum VCMCodecDBProperties {
|
||||
kDefaultPayloadSize = 1440
|
||||
};
|
||||
|
||||
struct VCMDecoderMapItem {
|
||||
public:
|
||||
VCMDecoderMapItem(VideoCodec* settings,
|
||||
int number_of_cores,
|
||||
bool require_key_frame);
|
||||
|
||||
scoped_ptr<VideoCodec> settings;
|
||||
int number_of_cores;
|
||||
bool require_key_frame;
|
||||
};
|
||||
|
||||
struct VCMExtDecoderMapItem {
|
||||
public:
|
||||
VCMExtDecoderMapItem(VideoDecoder* external_decoder_instance,
|
||||
uint8_t payload_type,
|
||||
bool internal_render_timing);
|
||||
|
||||
uint8_t payload_type;
|
||||
VideoDecoder* external_decoder_instance;
|
||||
bool internal_render_timing;
|
||||
};
|
||||
|
||||
class VCMCodecDataBase {
|
||||
public:
|
||||
explicit VCMCodecDataBase(int id);
|
||||
~VCMCodecDataBase();
|
||||
|
||||
// Sender Side
|
||||
// Returns the number of supported codecs (or -1 in case of error).
|
||||
static int NumberOfCodecs();
|
||||
|
||||
// Returns the default settings for the codec with id |list_id|.
|
||||
static bool Codec(int list_id, VideoCodec* settings);
|
||||
|
||||
// Returns the default settings for the codec with type |codec_type|.
|
||||
static bool Codec(VideoCodecType codec_type, VideoCodec* settings);
|
||||
|
||||
void ResetSender();
|
||||
|
||||
// Sets the sender side codec and initiates the desired codec given the
|
||||
// VideoCodec struct.
|
||||
// Returns true if the codec was successfully registered, false otherwise.
|
||||
bool RegisterSendCodec(const VideoCodec* send_codec,
|
||||
int number_of_cores,
|
||||
int max_payload_size);
|
||||
|
||||
// Gets the current send codec. Relevant for internal codecs only.
|
||||
// Returns true if there is a send codec, false otherwise.
|
||||
bool SendCodec(VideoCodec* current_send_codec) const;
|
||||
|
||||
// Gets current send side codec type. Relevant for internal codecs only.
|
||||
// Returns kVideoCodecUnknown if there is no send codec.
|
||||
VideoCodecType SendCodec() const;
|
||||
|
||||
// Registers and initializes an external encoder object.
|
||||
// |internal_source| should be set to true if the codec has an internal
|
||||
// video source and doesn't need the user to provide it with frames via
|
||||
// the Encode() method.
|
||||
void RegisterExternalEncoder(VideoEncoder* external_encoder,
|
||||
uint8_t payload_type,
|
||||
bool internal_source);
|
||||
|
||||
// Deregisters an external encoder. Returns true if the encoder was
|
||||
// found and deregistered, false otherwise. |was_send_codec| is set to true
|
||||
// if the external encoder was the send codec before being deregistered.
|
||||
bool DeregisterExternalEncoder(uint8_t payload_type, bool* was_send_codec);
|
||||
|
||||
// Returns an encoder specified by the payload type in |settings|. The
|
||||
// encoded frame callback of the encoder is set to |encoded_frame_callback|.
|
||||
// If no such encoder already exists an instance will be created and
|
||||
// initialized using |settings|.
|
||||
// NULL is returned if no encoder with the specified payload type was found
|
||||
// and the function failed to create one.
|
||||
VCMGenericEncoder* GetEncoder(
|
||||
const VideoCodec* settings,
|
||||
VCMEncodedFrameCallback* encoded_frame_callback);
|
||||
|
||||
bool SetPeriodicKeyFrames(bool enable);
|
||||
|
||||
// Receiver Side
|
||||
void ResetReceiver();
|
||||
|
||||
// Deregisters an external decoder object specified by |payload_type|.
|
||||
bool DeregisterExternalDecoder(uint8_t payload_type);
|
||||
|
||||
// Registers an external decoder object to the payload type |payload_type|.
|
||||
// |internal_render_timing| is set to true if the |external_decoder| has
|
||||
// built in rendering which is able to obey the render timestamps of the
|
||||
// encoded frames.
|
||||
bool RegisterExternalDecoder(VideoDecoder* external_decoder,
|
||||
uint8_t payload_type,
|
||||
bool internal_render_timing);
|
||||
|
||||
bool DecoderRegistered() const;
|
||||
|
||||
bool RegisterReceiveCodec(const VideoCodec* receive_codec,
|
||||
int number_of_cores,
|
||||
bool require_key_frame);
|
||||
|
||||
bool DeregisterReceiveCodec(uint8_t payload_type);
|
||||
|
||||
// Get current receive side codec. Relevant for internal codecs only.
|
||||
bool ReceiveCodec(VideoCodec* current_receive_codec) const;
|
||||
|
||||
// Get current receive side codec type. Relevant for internal codecs only.
|
||||
VideoCodecType ReceiveCodec() const;
|
||||
|
||||
// Returns a decoder specified by |payload_type|. The decoded frame callback
|
||||
// of the encoder is set to |decoded_frame_callback|. If no such decoder
|
||||
// already exists an instance will be created and initialized.
|
||||
// NULL is returned if no encoder with the specified payload type was found
|
||||
// and the function failed to create one.
|
||||
VCMGenericDecoder* GetDecoder(
|
||||
uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback);
|
||||
|
||||
// Returns a deep copy of the currently active decoder.
|
||||
VCMGenericDecoder* CreateDecoderCopy() const;
|
||||
|
||||
// Deletes the memory of the decoder instance |decoder|. Used to delete
|
||||
// deep copies returned by CreateDecoderCopy().
|
||||
void ReleaseDecoder(VCMGenericDecoder* decoder) const;
|
||||
|
||||
// Creates a deep copy of |decoder| and replaces the currently used decoder
|
||||
// with it.
|
||||
void CopyDecoder(const VCMGenericDecoder& decoder);
|
||||
|
||||
// Returns true if the currently active decoder supports render scheduling,
|
||||
// that is, it is able to render frames according to the render timestamp of
|
||||
// the encoded frames.
|
||||
bool SupportsRenderScheduling() const;
|
||||
|
||||
private:
|
||||
typedef std::map<uint8_t, VCMDecoderMapItem*> DecoderMap;
|
||||
typedef std::map<uint8_t, VCMExtDecoderMapItem*> ExternalDecoderMap;
|
||||
|
||||
VCMGenericDecoder* CreateAndInitDecoder(uint8_t payload_type,
|
||||
VideoCodec* new_codec,
|
||||
bool* external) const;
|
||||
|
||||
// Create an internal encoder given a codec type.
|
||||
VCMGenericEncoder* CreateEncoder(const VideoCodecType type) const;
|
||||
|
||||
void DeleteEncoder();
|
||||
|
||||
// Create an internal Decoder given a codec type
|
||||
VCMGenericDecoder* CreateDecoder(VideoCodecType type) const;
|
||||
|
||||
const VCMDecoderMapItem* FindDecoderItem(uint8_t payload_type) const;
|
||||
|
||||
const VCMExtDecoderMapItem* FindExternalDecoderItem(
|
||||
uint8_t payload_type) const;
|
||||
|
||||
int id_;
|
||||
int number_of_cores_;
|
||||
int max_payload_size_;
|
||||
bool periodic_key_frames_;
|
||||
bool current_enc_is_external_;
|
||||
VideoCodec send_codec_;
|
||||
VideoCodec receive_codec_;
|
||||
uint8_t external_payload_type_;
|
||||
VideoEncoder* external_encoder_;
|
||||
bool internal_source_;
|
||||
VCMGenericEncoder* ptr_encoder_;
|
||||
VCMGenericDecoder* ptr_decoder_;
|
||||
bool current_dec_is_external_;
|
||||
DecoderMap dec_map_;
|
||||
ExternalDecoderMap dec_external_map_;
|
||||
}; // VCMCodecDataBase
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
|
||||
133
webrtc/modules/video_coding/main/source/codec_timer.cc
Normal file
133
webrtc/modules/video_coding/main/source/codec_timer.cc
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "codec_timer.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VCMCodecTimer::VCMCodecTimer()
|
||||
:
|
||||
_filteredMax(0),
|
||||
_firstDecodeTime(true),
|
||||
_shortMax(0),
|
||||
_history()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMCodecTimer::StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs)
|
||||
{
|
||||
const WebRtc_Word32 timeDiff = static_cast<WebRtc_Word32>(nowMs - startTimeMs);
|
||||
MaxFilter(timeDiff, nowMs);
|
||||
return timeDiff;
|
||||
}
|
||||
|
||||
void VCMCodecTimer::Reset()
|
||||
{
|
||||
_filteredMax = 0;
|
||||
_firstDecodeTime = true;
|
||||
_shortMax = 0;
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
_history[i].shortMax = 0;
|
||||
_history[i].timeMs = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the max-value filter
|
||||
void VCMCodecTimer::MaxFilter(WebRtc_Word32 decodeTime, WebRtc_Word64 nowMs)
|
||||
{
|
||||
if (!_firstDecodeTime)
|
||||
{
|
||||
UpdateMaxHistory(decodeTime, nowMs);
|
||||
ProcessHistory(nowMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_firstDecodeTime = false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now)
|
||||
{
|
||||
if (_history[0].timeMs >= 0 &&
|
||||
now - _history[0].timeMs < SHORT_FILTER_MS)
|
||||
{
|
||||
if (decodeTime > _shortMax)
|
||||
{
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only add a new value to the history once a second
|
||||
if(_history[0].timeMs == -1)
|
||||
{
|
||||
// First, no shift
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shift
|
||||
for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
|
||||
{
|
||||
_history[i+1].shortMax = _history[i].shortMax;
|
||||
_history[i+1].timeMs = _history[i].timeMs;
|
||||
}
|
||||
}
|
||||
if (_shortMax == 0)
|
||||
{
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
|
||||
_history[0].shortMax = _shortMax;
|
||||
_history[0].timeMs = now;
|
||||
_shortMax = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::ProcessHistory(WebRtc_Word64 nowMs)
|
||||
{
|
||||
_filteredMax = _shortMax;
|
||||
if (_history[0].timeMs == -1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
if (_history[i].timeMs == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
|
||||
{
|
||||
// This sample (and all samples after this) is too old
|
||||
break;
|
||||
}
|
||||
if (_history[i].shortMax > _filteredMax)
|
||||
{
|
||||
// This sample is the largest one this far into the history
|
||||
_filteredMax = _history[i].shortMax;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the maximum observed time within a time window
|
||||
WebRtc_Word32 VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
|
||||
{
|
||||
return _filteredMax;
|
||||
}
|
||||
|
||||
}
|
||||
61
webrtc/modules/video_coding/main/source/codec_timer.h
Normal file
61
webrtc/modules/video_coding/main/source/codec_timer.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
|
||||
#define MAX_HISTORY_SIZE 20
|
||||
#define SHORT_FILTER_MS 1000
|
||||
|
||||
class VCMShortMaxSample
|
||||
{
|
||||
public:
|
||||
VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
|
||||
|
||||
WebRtc_Word32 shortMax;
|
||||
WebRtc_Word64 timeMs;
|
||||
};
|
||||
|
||||
class VCMCodecTimer
|
||||
{
|
||||
public:
|
||||
VCMCodecTimer();
|
||||
|
||||
// Updates and returns the max filtered decode time.
|
||||
WebRtc_Word32 StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs);
|
||||
|
||||
// Empty the list of timers.
|
||||
void Reset();
|
||||
|
||||
// Get the required decode time in ms.
|
||||
WebRtc_Word32 RequiredDecodeTimeMs(FrameType frameType) const;
|
||||
|
||||
private:
|
||||
void UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now);
|
||||
void MaxFilter(WebRtc_Word32 newTime, WebRtc_Word64 nowMs);
|
||||
void ProcessHistory(WebRtc_Word64 nowMs);
|
||||
|
||||
WebRtc_Word32 _filteredMax;
|
||||
bool _firstDecodeTime;
|
||||
WebRtc_Word32 _shortMax;
|
||||
VCMShortMaxSample _history[MAX_HISTORY_SIZE];
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/content_metrics_processing.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/interface/video_coding_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
//////////////////////////////////
|
||||
/// VCMContentMetricsProcessing //
|
||||
//////////////////////////////////
|
||||
|
||||
VCMContentMetricsProcessing::VCMContentMetricsProcessing()
|
||||
: recursive_avg_factor_(1 / 150.0f), // matched to 30fps.
|
||||
frame_cnt_uniform_avg_(0),
|
||||
avg_motion_level_(0.0f),
|
||||
avg_spatial_level_(0.0f) {
|
||||
recursive_avg_ = new VideoContentMetrics();
|
||||
uniform_avg_ = new VideoContentMetrics();
|
||||
}
|
||||
|
||||
VCMContentMetricsProcessing::~VCMContentMetricsProcessing() {
|
||||
delete recursive_avg_;
|
||||
delete uniform_avg_;
|
||||
}
|
||||
|
||||
int VCMContentMetricsProcessing::Reset() {
|
||||
recursive_avg_->Reset();
|
||||
uniform_avg_->Reset();
|
||||
frame_cnt_uniform_avg_ = 0;
|
||||
avg_motion_level_ = 0.0f;
|
||||
avg_spatial_level_ = 0.0f;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
|
||||
// Update factor for recursive averaging.
|
||||
recursive_avg_factor_ = static_cast<float> (1000.0f) /
|
||||
static_cast<float>(frameRate * kQmMinIntervalMs);
|
||||
}
|
||||
|
||||
VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
|
||||
return recursive_avg_;
|
||||
}
|
||||
|
||||
VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
|
||||
if (frame_cnt_uniform_avg_ == 0) {
|
||||
return NULL;
|
||||
}
|
||||
// Two metrics are used: motion and spatial level.
|
||||
uniform_avg_->motion_magnitude = avg_motion_level_ /
|
||||
static_cast<float>(frame_cnt_uniform_avg_);
|
||||
uniform_avg_->spatial_pred_err = avg_spatial_level_ /
|
||||
static_cast<float>(frame_cnt_uniform_avg_);
|
||||
return uniform_avg_;
|
||||
}
|
||||
|
||||
void VCMContentMetricsProcessing::ResetShortTermAvgData() {
|
||||
// Reset.
|
||||
avg_motion_level_ = 0.0f;
|
||||
avg_spatial_level_ = 0.0f;
|
||||
frame_cnt_uniform_avg_ = 0;
|
||||
}
|
||||
|
||||
int VCMContentMetricsProcessing::UpdateContentData(
|
||||
const VideoContentMetrics *contentMetrics) {
|
||||
if (contentMetrics == NULL) {
|
||||
return VCM_OK;
|
||||
}
|
||||
return ProcessContent(contentMetrics);
|
||||
}
|
||||
|
||||
int VCMContentMetricsProcessing::ProcessContent(
|
||||
const VideoContentMetrics *contentMetrics) {
|
||||
// Update the recursive averaged metrics: average is over longer window
|
||||
// of time: over QmMinIntervalMs ms.
|
||||
UpdateRecursiveAvg(contentMetrics);
|
||||
// Update the uniform averaged metrics: average is over shorter window
|
||||
// of time: based on ~RTCP reports.
|
||||
UpdateUniformAvg(contentMetrics);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMContentMetricsProcessing::UpdateUniformAvg(
|
||||
const VideoContentMetrics *contentMetrics) {
|
||||
// Update frame counter.
|
||||
frame_cnt_uniform_avg_ += 1;
|
||||
// Update averaged metrics: motion and spatial level are used.
|
||||
avg_motion_level_ += contentMetrics->motion_magnitude;
|
||||
avg_spatial_level_ += contentMetrics->spatial_pred_err;
|
||||
return;
|
||||
}
|
||||
|
||||
void VCMContentMetricsProcessing::UpdateRecursiveAvg(
|
||||
const VideoContentMetrics *contentMetrics) {
|
||||
|
||||
// Spatial metrics: 2x2, 1x2(H), 2x1(V).
|
||||
recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) *
|
||||
recursive_avg_->spatial_pred_err +
|
||||
recursive_avg_factor_ * contentMetrics->spatial_pred_err;
|
||||
|
||||
recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) *
|
||||
recursive_avg_->spatial_pred_err_h +
|
||||
recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
|
||||
|
||||
recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) *
|
||||
recursive_avg_->spatial_pred_err_v +
|
||||
recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
|
||||
|
||||
// Motion metric: Derived from NFD (normalized frame difference).
|
||||
recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) *
|
||||
recursive_avg_->motion_magnitude +
|
||||
recursive_avg_factor_ * contentMetrics->motion_magnitude;
|
||||
}
|
||||
} // end of namespace
|
||||
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct VideoContentMetrics;
|
||||
|
||||
// QM interval time (in ms)
|
||||
enum {
|
||||
kQmMinIntervalMs = 10000
|
||||
};
|
||||
|
||||
// Flag for NFD metric vs motion metric
|
||||
enum {
|
||||
kNfdMetric = 1
|
||||
};
|
||||
|
||||
/**********************************/
|
||||
/* Content Metrics Processing */
|
||||
/**********************************/
|
||||
class VCMContentMetricsProcessing {
|
||||
public:
|
||||
VCMContentMetricsProcessing();
|
||||
~VCMContentMetricsProcessing();
|
||||
|
||||
// Update class with latest metrics.
|
||||
int UpdateContentData(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Reset the short-term averaged content data.
|
||||
void ResetShortTermAvgData();
|
||||
|
||||
// Initialize.
|
||||
int Reset();
|
||||
|
||||
// Inform class of current frame rate.
|
||||
void UpdateFrameRate(uint32_t frameRate);
|
||||
|
||||
// Returns the long-term averaged content data: recursive average over longer
|
||||
// time scale.
|
||||
VideoContentMetrics* LongTermAvgData();
|
||||
|
||||
// Returns the short-term averaged content data: uniform average over
|
||||
// shorter time scalE.
|
||||
VideoContentMetrics* ShortTermAvgData();
|
||||
|
||||
private:
|
||||
// Compute working average.
|
||||
int ProcessContent(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Update the recursive averaged metrics: longer time average (~5/10 secs).
|
||||
void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Update the uniform averaged metrics: shorter time average (~RTCP report).
|
||||
void UpdateUniformAvg(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
VideoContentMetrics* recursive_avg_;
|
||||
VideoContentMetrics* uniform_avg_;
|
||||
float recursive_avg_factor_;
|
||||
uint32_t frame_cnt_uniform_avg_;
|
||||
float avg_motion_level_;
|
||||
float avg_spatial_level_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
206
webrtc/modules/video_coding/main/source/decoding_state.cc
Normal file
206
webrtc/modules/video_coding/main/source/decoding_state.cc
Normal file
@ -0,0 +1,206 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/decoding_state.h"
|
||||
|
||||
#include "modules/video_coding/main/source/frame_buffer.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer_common.h"
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
#include "modules/interface/module_common_types.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMDecodingState::VCMDecodingState()
|
||||
: sequence_num_(0),
|
||||
time_stamp_(0),
|
||||
picture_id_(kNoPictureId),
|
||||
temporal_id_(kNoTemporalIdx),
|
||||
tl0_pic_id_(kNoTl0PicIdx),
|
||||
full_sync_(true),
|
||||
init_(true) {}
|
||||
|
||||
VCMDecodingState::~VCMDecodingState() {}
|
||||
|
||||
void VCMDecodingState::Reset() {
|
||||
// TODO(mikhal): Verify - not always would want to reset the sync
|
||||
sequence_num_ = 0;
|
||||
time_stamp_ = 0;
|
||||
picture_id_ = kNoPictureId;
|
||||
temporal_id_ = kNoTemporalIdx;
|
||||
tl0_pic_id_ = kNoTl0PicIdx;
|
||||
full_sync_ = true;
|
||||
init_ = true;
|
||||
}
|
||||
|
||||
uint32_t VCMDecodingState::time_stamp() const {
|
||||
return time_stamp_;
|
||||
}
|
||||
|
||||
uint16_t VCMDecodingState::sequence_num() const {
|
||||
return sequence_num_;
|
||||
}
|
||||
|
||||
bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
|
||||
assert(frame != NULL);
|
||||
if (init_)
|
||||
return false;
|
||||
return (LatestTimestamp(time_stamp_, frame->TimeStamp(), NULL)
|
||||
== time_stamp_);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
|
||||
assert(packet != NULL);
|
||||
if (init_)
|
||||
return false;
|
||||
return (LatestTimestamp(time_stamp_, packet->timestamp, NULL)
|
||||
== time_stamp_);
|
||||
}
|
||||
|
||||
void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
|
||||
assert(frame != NULL && frame->GetHighSeqNum() >= 0);
|
||||
UpdateSyncState(frame);
|
||||
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
|
||||
time_stamp_ = frame->TimeStamp();
|
||||
picture_id_ = frame->PictureId();
|
||||
temporal_id_ = frame->TemporalId();
|
||||
tl0_pic_id_ = frame->Tl0PicId();
|
||||
init_ = false;
|
||||
}
|
||||
|
||||
void VCMDecodingState::SetStateOneBack(const VCMFrameBuffer* frame) {
|
||||
assert(frame != NULL && frame->GetHighSeqNum() >= 0);
|
||||
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum()) - 1u;
|
||||
time_stamp_ = frame->TimeStamp() - 1u;
|
||||
temporal_id_ = frame->TemporalId();
|
||||
if (frame->PictureId() != kNoPictureId) {
|
||||
if (frame->PictureId() == 0)
|
||||
picture_id_ = 0x7FFF;
|
||||
else
|
||||
picture_id_ = frame->PictureId() - 1;
|
||||
}
|
||||
if (frame->Tl0PicId() != kNoTl0PicIdx) {
|
||||
if (frame->Tl0PicId() == 0)
|
||||
tl0_pic_id_ = 0x00FF;
|
||||
else
|
||||
tl0_pic_id_ = frame->Tl0PicId() - 1;
|
||||
}
|
||||
init_ = false;
|
||||
}
|
||||
|
||||
void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) {
|
||||
assert(packet != NULL);
|
||||
if (packet->timestamp == time_stamp_) {
|
||||
// Late packet belonging to the last decoded frame - make sure we update the
|
||||
// last decoded sequence number.
|
||||
sequence_num_ = LatestSequenceNumber(packet->seqNum, sequence_num_, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void VCMDecodingState::SetSeqNum(uint16_t new_seq_num) {
|
||||
sequence_num_ = new_seq_num;
|
||||
}
|
||||
|
||||
bool VCMDecodingState::init() const {
|
||||
return init_;
|
||||
}
|
||||
|
||||
bool VCMDecodingState::full_sync() const {
|
||||
return full_sync_;
|
||||
}
|
||||
|
||||
void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
|
||||
if (init_)
|
||||
return;
|
||||
if (frame->TemporalId() == kNoTemporalIdx ||
|
||||
frame->Tl0PicId() == kNoTl0PicIdx) {
|
||||
full_sync_ = true;
|
||||
} else if (frame->FrameType() == kVideoFrameKey || frame->LayerSync()) {
|
||||
full_sync_ = true;
|
||||
} else if (full_sync_) {
|
||||
// Verify that we are still in sync.
|
||||
// Sync will be broken if continuity is true for layers but not for the
|
||||
// other methods (PictureId and SeqNum).
|
||||
if (UsingPictureId(frame)) {
|
||||
full_sync_ = ContinuousPictureId(frame->PictureId());
|
||||
} else {
|
||||
full_sync_ = ContinuousSeqNum(static_cast<uint16_t>(
|
||||
frame->GetLowSeqNum()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
|
||||
// Check continuity based on the following hierarchy:
|
||||
// - Temporal layers (stop here if out of sync).
|
||||
// - Picture Id when available.
|
||||
// - Sequence numbers.
|
||||
// Return true when in initial state.
|
||||
// Note that when a method is not applicable it will return false.
|
||||
assert(frame != NULL);
|
||||
if (init_)
|
||||
return true;
|
||||
|
||||
if (!ContinuousLayer(frame->TemporalId(), frame->Tl0PicId())) {
|
||||
// Base layers are not continuous or temporal layers are inactive.
|
||||
// In the presence of temporal layers, check for Picture ID/sequence number
|
||||
// continuity if sync can be restored by this frame.
|
||||
if (!full_sync_ && !frame->LayerSync())
|
||||
return false;
|
||||
else if (UsingPictureId(frame)) {
|
||||
return ContinuousPictureId(frame->PictureId());
|
||||
} else {
|
||||
return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousPictureId(int picture_id) const {
|
||||
int next_picture_id = picture_id_ + 1;
|
||||
if (picture_id < picture_id_) {
|
||||
// Wrap
|
||||
if (picture_id_ >= 0x80) {
|
||||
// 15 bits used for picture id
|
||||
return ((next_picture_id & 0x7FFF) == picture_id);
|
||||
} else {
|
||||
// 7 bits used for picture id
|
||||
return ((next_picture_id & 0x7F) == picture_id);
|
||||
}
|
||||
}
|
||||
// No wrap
|
||||
return (next_picture_id == picture_id);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
|
||||
return (seq_num == static_cast<uint16_t>(sequence_num_ + 1));
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousLayer(int temporal_id,
|
||||
int tl0_pic_id) const {
|
||||
// First, check if applicable.
|
||||
if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
|
||||
return false;
|
||||
// If this is the first frame to use temporal layers, make sure we start
|
||||
// from base.
|
||||
else if (tl0_pic_id_ == kNoTl0PicIdx && temporal_id_ == kNoTemporalIdx &&
|
||||
temporal_id == 0)
|
||||
return true;
|
||||
|
||||
// Current implementation: Look for base layer continuity.
|
||||
if (temporal_id != 0)
|
||||
return false;
|
||||
return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
|
||||
return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
70
webrtc/modules/video_coding/main/source/decoding_state.h
Normal file
70
webrtc/modules/video_coding/main/source/decoding_state.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Forward declarations
|
||||
class VCMFrameBuffer;
|
||||
class VCMPacket;
|
||||
|
||||
class VCMDecodingState {
|
||||
public:
|
||||
VCMDecodingState();
|
||||
~VCMDecodingState();
|
||||
// Check for old frame
|
||||
bool IsOldFrame(const VCMFrameBuffer* frame) const;
|
||||
// Check for old packet
|
||||
bool IsOldPacket(const VCMPacket* packet) const;
|
||||
// Check for frame continuity based on current decoded state. Use best method
|
||||
// possible, i.e. temporal info, picture ID or sequence number.
|
||||
bool ContinuousFrame(const VCMFrameBuffer* frame) const;
|
||||
void SetState(const VCMFrameBuffer* frame);
|
||||
// Set the decoding state one frame back.
|
||||
void SetStateOneBack(const VCMFrameBuffer* frame);
|
||||
// Update the sequence number if the timestamp matches current state and the
|
||||
// sequence number is higher than the current one. This accounts for packets
|
||||
// arriving late.
|
||||
void UpdateOldPacket(const VCMPacket* packet);
|
||||
void SetSeqNum(uint16_t new_seq_num);
|
||||
void Reset();
|
||||
uint32_t time_stamp() const;
|
||||
uint16_t sequence_num() const;
|
||||
// Return true if at initial state.
|
||||
bool init() const;
|
||||
// Return true when sync is on - decode all layers.
|
||||
bool full_sync() const;
|
||||
|
||||
private:
|
||||
void UpdateSyncState(const VCMFrameBuffer* frame);
|
||||
// Designated continuity functions
|
||||
bool ContinuousPictureId(int picture_id) const;
|
||||
bool ContinuousSeqNum(uint16_t seq_num) const;
|
||||
bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
|
||||
bool UsingPictureId(const VCMFrameBuffer* frame) const;
|
||||
|
||||
// Keep state of last decoded frame.
|
||||
// TODO(mikhal/stefan): create designated classes to handle these types.
|
||||
uint16_t sequence_num_;
|
||||
uint32_t time_stamp_;
|
||||
int picture_id_;
|
||||
int temporal_id_;
|
||||
int tl0_pic_id_;
|
||||
bool full_sync_; // Sync flag when temporal layers are used.
|
||||
bool init_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
|
||||
@ -0,0 +1,462 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "modules/video_coding/main/source/decoding_state.h"
|
||||
#include "modules/video_coding/main/source/frame_buffer.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer_common.h"
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
|
||||
TEST(TestDecodingState, Sanity) {
|
||||
VCMDecodingState dec_state;
|
||||
dec_state.Reset();
|
||||
EXPECT_TRUE(dec_state.init());
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, FrameContinuity) {
|
||||
VCMDecodingState dec_state;
|
||||
// Check that makes decision based on correct method.
|
||||
VCMFrameBuffer frame;
|
||||
frame.SetState(kStateEmpty);
|
||||
VCMPacket* packet = new VCMPacket();
|
||||
packet->isFirstPacket = 1;
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 0xffff;
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->codecSpecificHeader.codec = kRTPVideoVP8;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0x007F;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
// Should return true on init.
|
||||
dec_state.Reset();
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
frame.Reset();
|
||||
// Use pictureId
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0x0002;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
packet->seqNum = 10;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Use sequence numbers.
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->seqNum = dec_state.sequence_num() - 1u;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->seqNum = dec_state.sequence_num() + 1u;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
// Insert another packet to this frame
|
||||
packet->seqNum++;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
// Verify wrap.
|
||||
EXPECT_EQ(dec_state.sequence_num(), 0xffff);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Insert packet with temporal info.
|
||||
dec_state.Reset();
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
packet->seqNum = 1;
|
||||
packet->timestamp = 1;
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
// 1 layer up - still good.
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
|
||||
packet->seqNum = 2;
|
||||
packet->timestamp = 2;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
// Lost non-base layer packet => should update sync parameter.
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 3;
|
||||
packet->seqNum = 4;
|
||||
packet->timestamp = 4;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
// Now insert the next non-base layer (belonging to a next tl0PicId).
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 4;
|
||||
packet->seqNum = 5;
|
||||
packet->timestamp = 5;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
// Checking continuity and not updating the state - this should not trigger
|
||||
// an update of sync state.
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
// Next base layer (dropped interim non-base layers) - should update sync.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 5;
|
||||
packet->seqNum = 6;
|
||||
packet->timestamp = 6;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
|
||||
// Check wrap for temporal layers.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x00FF;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 6;
|
||||
packet->seqNum = 7;
|
||||
packet->timestamp = 7;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x0000;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 7;
|
||||
packet->seqNum = 8;
|
||||
packet->timestamp = 8;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
// The current frame is not continuous
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
delete packet;
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, SetStateOneBack) {
|
||||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
frame.SetState(kStateEmpty);
|
||||
VCMPacket* packet = new VCMPacket();
|
||||
// Based on PictureId.
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->codecSpecificHeader.codec = kRTPVideoVP8;
|
||||
packet->timestamp = 0;
|
||||
packet->seqNum = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetStateOneBack(&frame);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 0xFFFF);
|
||||
// Check continuity.
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Based on Temporal layers.
|
||||
packet->timestamp = 0;
|
||||
packet->seqNum = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetStateOneBack(&frame);
|
||||
// Check continuity
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
delete packet;
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, UpdateOldPacket) {
|
||||
VCMDecodingState dec_state;
|
||||
// Update only if zero size and newer than previous.
|
||||
// Should only update if the timeStamp match.
|
||||
VCMFrameBuffer frame;
|
||||
frame.SetState(kStateEmpty);
|
||||
VCMPacket* packet = new VCMPacket();
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 1;
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 1);
|
||||
// Insert an empty packet that does not belong to the same frame.
|
||||
// => Sequence num should be the same.
|
||||
packet->timestamp = 2;
|
||||
dec_state.UpdateOldPacket(packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 1);
|
||||
// Now insert empty packet belonging to the same frame.
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 2;
|
||||
packet->frameType = kFrameEmpty;
|
||||
packet->sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 2);
|
||||
// Now insert delta packet belonging to the same frame.
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 3;
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->sizeBytes = 1400;
|
||||
dec_state.UpdateOldPacket(packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 3);
|
||||
// Insert a packet belonging to an older timestamp - should not update the
|
||||
// sequence number.
|
||||
packet->timestamp = 0;
|
||||
packet->seqNum = 4;
|
||||
packet->frameType = kFrameEmpty;
|
||||
packet->sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 3);
|
||||
|
||||
delete packet;
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, MultiLayerBehavior) {
|
||||
// Identify sync/non-sync when more than one layer.
|
||||
VCMDecodingState dec_state;
|
||||
// Identify packets belonging to old frames/packets.
|
||||
// Set state for current frames.
|
||||
// tl0PicIdx 0, temporal id 0.
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket* packet = new VCMPacket();
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->codecSpecificHeader.codec = kRTPVideoVP8;
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->timestamp = 0;
|
||||
packet->seqNum = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
// tl0PicIdx 0, temporal id 1.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
// Lost tl0PicIdx 0, temporal id 2.
|
||||
// Insert tl0PicIdx 0, temporal id 3.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->timestamp = 3;
|
||||
packet->seqNum = 3;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 3;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
// Insert next base layer
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->timestamp = 4;
|
||||
packet->seqNum = 4;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 4;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
// Insert key frame - should update sync value.
|
||||
// A key frame is always a base layer.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameKey;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->timestamp = 5;
|
||||
packet->seqNum = 5;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 2;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 5;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
// After sync, a continuous PictureId is required
|
||||
// (continuous base layer is not enough )
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->timestamp = 6;
|
||||
packet->seqNum = 6;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 3;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 6;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->timestamp = 8;
|
||||
packet->seqNum = 8;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 8;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
|
||||
// Insert a non-ref frame - should update sync value.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->timestamp = 9;
|
||||
packet->seqNum = 9;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 9;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
|
||||
// The following test will verify the sync flag behavior after a loss.
|
||||
// Create the following pattern:
|
||||
// Update base layer, lose packet 1 (sync flag on, layer 2), insert packet 3
|
||||
// (sync flag on, layer 2) check continuity and sync flag after inserting
|
||||
// packet 2 (sync flag on, layer 1).
|
||||
// Base layer.
|
||||
frame.Reset();
|
||||
dec_state.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->markerBit = 1;
|
||||
packet->timestamp = 0;
|
||||
packet->seqNum = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.layerSync = false;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
// Layer 2 - 2 packets (insert one, lose one).
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->markerBit = 0;
|
||||
packet->timestamp = 1;
|
||||
packet->seqNum = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
// Layer 1
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet->frameType = kVideoFrameDelta;
|
||||
packet->isFirstPacket = 1;
|
||||
packet->markerBit = 1;
|
||||
packet->timestamp = 2;
|
||||
packet->seqNum = 3;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 2;
|
||||
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
|
||||
delete packet;
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
|
||||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.codecSpecificHeader.codec = kRTPVideoVP8;
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 0;
|
||||
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
|
||||
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
|
||||
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
|
||||
frame.InsertPacket(packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
|
||||
// Continuous sequence number but discontinuous picture id. This implies a
|
||||
// a loss and we have to fall back to only decoding the base layer.
|
||||
frame.Reset();
|
||||
frame.SetState(kStateEmpty);
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.timestamp += 3000;
|
||||
++packet.seqNum;
|
||||
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
|
||||
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
|
||||
frame.InsertPacket(packet, 0, false, 0);
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
EXPECT_FALSE(dec_state.full_sync());
|
||||
}
|
||||
|
||||
TEST(TestDecodingState, OldInput) {
|
||||
VCMDecodingState dec_state;
|
||||
// Identify packets belonging to old frames/packets.
|
||||
// Set state for current frames.
|
||||
VCMFrameBuffer frame;
|
||||
frame.SetState(kStateEmpty);
|
||||
VCMPacket* packet = new VCMPacket();
|
||||
packet->timestamp = 10;
|
||||
packet->seqNum = 1;
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
dec_state.SetState(&frame);
|
||||
packet->timestamp = 9;
|
||||
EXPECT_TRUE(dec_state.IsOldPacket(packet));
|
||||
// Check for old frame
|
||||
frame.Reset();
|
||||
frame.InsertPacket(*packet, 0, false, 0);
|
||||
EXPECT_TRUE(dec_state.IsOldFrame(&frame));
|
||||
|
||||
|
||||
delete packet;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
256
webrtc/modules/video_coding/main/source/encoded_frame.cc
Normal file
256
webrtc/modules/video_coding/main/source/encoded_frame.cc
Normal file
@ -0,0 +1,256 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "encoded_frame.h"
|
||||
#include "generic_encoder.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "video_coding_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame()
|
||||
:
|
||||
webrtc::EncodedImage(),
|
||||
_renderTimeMs(-1),
|
||||
_payloadType(0),
|
||||
_missingFrame(false),
|
||||
_codec(kVideoCodecUnknown),
|
||||
_fragmentation()
|
||||
{
|
||||
_codecSpecificInfo.codecType = kVideoCodecUnknown;
|
||||
}
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
|
||||
:
|
||||
webrtc::EncodedImage(rhs),
|
||||
_renderTimeMs(-1),
|
||||
_payloadType(0),
|
||||
_missingFrame(false),
|
||||
_codec(kVideoCodecUnknown),
|
||||
_fragmentation()
|
||||
{
|
||||
_codecSpecificInfo.codecType = kVideoCodecUnknown;
|
||||
_buffer = NULL;
|
||||
_size = 0;
|
||||
_length = 0;
|
||||
if (rhs._buffer != NULL)
|
||||
{
|
||||
VerifyAndAllocate(rhs._length);
|
||||
memcpy(_buffer, rhs._buffer, rhs._length);
|
||||
}
|
||||
}
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
|
||||
:
|
||||
webrtc::EncodedImage(rhs),
|
||||
_renderTimeMs(rhs._renderTimeMs),
|
||||
_payloadType(rhs._payloadType),
|
||||
_missingFrame(rhs._missingFrame),
|
||||
_codecSpecificInfo(rhs._codecSpecificInfo),
|
||||
_codec(rhs._codec),
|
||||
_fragmentation() {
|
||||
_buffer = NULL;
|
||||
_size = 0;
|
||||
_length = 0;
|
||||
if (rhs._buffer != NULL)
|
||||
{
|
||||
VerifyAndAllocate(rhs._length);
|
||||
memcpy(_buffer, rhs._buffer, rhs._length);
|
||||
_length = rhs._length;
|
||||
}
|
||||
// Deep operator=
|
||||
_fragmentation = rhs._fragmentation;
|
||||
}
|
||||
|
||||
VCMEncodedFrame::~VCMEncodedFrame()
|
||||
{
|
||||
Free();
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::Free()
|
||||
{
|
||||
Reset();
|
||||
if (_buffer != NULL)
|
||||
{
|
||||
delete [] _buffer;
|
||||
_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::Reset()
|
||||
{
|
||||
_renderTimeMs = -1;
|
||||
_timeStamp = 0;
|
||||
_payloadType = 0;
|
||||
_frameType = kDeltaFrame;
|
||||
_encodedWidth = 0;
|
||||
_encodedHeight = 0;
|
||||
_completeFrame = false;
|
||||
_missingFrame = false;
|
||||
_length = 0;
|
||||
_codecSpecificInfo.codecType = kVideoCodecUnknown;
|
||||
_codec = kVideoCodecUnknown;
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
|
||||
{
|
||||
if (header)
|
||||
{
|
||||
switch (header->codec)
|
||||
{
|
||||
case kRTPVideoVP8:
|
||||
{
|
||||
if (_codecSpecificInfo.codecType != kVideoCodecVP8)
|
||||
{
|
||||
// This is the first packet for this frame.
|
||||
_codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
|
||||
_codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
|
||||
_codecSpecificInfo.codecSpecific.VP8.layerSync = false;
|
||||
_codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
|
||||
_codecSpecificInfo.codecType = kVideoCodecVP8;
|
||||
}
|
||||
_codecSpecificInfo.codecSpecific.VP8.nonReference =
|
||||
header->codecHeader.VP8.nonReference;
|
||||
if (header->codecHeader.VP8.pictureId != kNoPictureId)
|
||||
{
|
||||
_codecSpecificInfo.codecSpecific.VP8.pictureId =
|
||||
header->codecHeader.VP8.pictureId;
|
||||
}
|
||||
if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx)
|
||||
{
|
||||
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
|
||||
header->codecHeader.VP8.temporalIdx;
|
||||
_codecSpecificInfo.codecSpecific.VP8.layerSync =
|
||||
header->codecHeader.VP8.layerSync;
|
||||
}
|
||||
if (header->codecHeader.VP8.keyIdx != kNoKeyIdx)
|
||||
{
|
||||
_codecSpecificInfo.codecSpecific.VP8.keyIdx =
|
||||
header->codecHeader.VP8.keyIdx;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
_codecSpecificInfo.codecType = kVideoCodecUnknown;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
|
||||
return &_fragmentation;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrame::Store(VCMFrameStorageCallback& storeCallback) const
|
||||
{
|
||||
EncodedVideoData frameToStore;
|
||||
frameToStore.codec = _codec;
|
||||
if (_buffer != NULL)
|
||||
{
|
||||
frameToStore.VerifyAndAllocate(_length);
|
||||
memcpy(frameToStore.payloadData, _buffer, _length);
|
||||
frameToStore.payloadSize = _length;
|
||||
}
|
||||
frameToStore.completeFrame = _completeFrame;
|
||||
frameToStore.encodedWidth = _encodedWidth;
|
||||
frameToStore.encodedHeight = _encodedHeight;
|
||||
frameToStore.frameType = ConvertFrameType(_frameType);
|
||||
frameToStore.missingFrame = _missingFrame;
|
||||
frameToStore.payloadType = _payloadType;
|
||||
frameToStore.renderTimeMs = _renderTimeMs;
|
||||
frameToStore.timeStamp = _timeStamp;
|
||||
storeCallback.StoreReceivedFrame(frameToStore);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrame::VerifyAndAllocate(const WebRtc_UWord32 minimumSize)
|
||||
{
|
||||
if(minimumSize > _size)
|
||||
{
|
||||
// create buffer of sufficient size
|
||||
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[minimumSize];
|
||||
if (newBuffer == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
if(_buffer)
|
||||
{
|
||||
// copy old data
|
||||
memcpy(newBuffer, _buffer, _size);
|
||||
delete [] _buffer;
|
||||
}
|
||||
_buffer = newBuffer;
|
||||
_size = minimumSize;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
|
||||
{
|
||||
switch(frameType)
|
||||
{
|
||||
case kKeyFrame:
|
||||
{
|
||||
return kVideoFrameKey;
|
||||
}
|
||||
case kDeltaFrame:
|
||||
{
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
case kGoldenFrame:
|
||||
{
|
||||
return kVideoFrameGolden;
|
||||
}
|
||||
case kAltRefFrame:
|
||||
{
|
||||
return kVideoFrameAltRef;
|
||||
}
|
||||
case kSkipFrame:
|
||||
{
|
||||
return kFrameEmpty;
|
||||
}
|
||||
default:
|
||||
{
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case kVideoFrameKey:
|
||||
return kKeyFrame;
|
||||
case kVideoFrameDelta:
|
||||
return kDeltaFrame;
|
||||
case kVideoFrameGolden:
|
||||
return kGoldenFrame;
|
||||
case kVideoFrameAltRef:
|
||||
return kAltRefFrame;
|
||||
default:
|
||||
assert(false);
|
||||
return kDeltaFrame;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::ConvertFrameTypes(
|
||||
const std::vector<webrtc::FrameType>& frame_types,
|
||||
std::vector<VideoFrameType>* video_frame_types) {
|
||||
assert(video_frame_types);
|
||||
video_frame_types->reserve(frame_types.size());
|
||||
for (size_t i = 0; i < frame_types.size(); ++i) {
|
||||
(*video_frame_types)[i] = ConvertFrameType(frame_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
125
webrtc/modules/video_coding/main/source/encoded_frame.h
Normal file
125
webrtc/modules/video_coding/main/source/encoded_frame.h
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h"
|
||||
#include "common_video/interface/video_image.h"
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
|
||||
#include "modules/video_coding/main/interface/video_coding_defines.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMEncodedFrame : protected EncodedImage
|
||||
{
|
||||
public:
|
||||
VCMEncodedFrame();
|
||||
VCMEncodedFrame(const webrtc::EncodedImage& rhs);
|
||||
VCMEncodedFrame(const VCMEncodedFrame& rhs);
|
||||
|
||||
~VCMEncodedFrame();
|
||||
/**
|
||||
* Delete VideoFrame and resets members to zero
|
||||
*/
|
||||
void Free();
|
||||
/**
|
||||
* Set render time in milliseconds
|
||||
*/
|
||||
void SetRenderTime(const WebRtc_Word64 renderTimeMs) {_renderTimeMs = renderTimeMs;}
|
||||
|
||||
/**
|
||||
* Set the encoded frame size
|
||||
*/
|
||||
void SetEncodedSize(WebRtc_UWord32 width, WebRtc_UWord32 height)
|
||||
{ _encodedWidth = width; _encodedHeight = height; }
|
||||
/**
|
||||
* Get the encoded image
|
||||
*/
|
||||
const webrtc::EncodedImage& EncodedImage() const
|
||||
{ return static_cast<const webrtc::EncodedImage&>(*this); }
|
||||
/**
|
||||
* Get pointer to frame buffer
|
||||
*/
|
||||
const WebRtc_UWord8* Buffer() const {return _buffer;}
|
||||
/**
|
||||
* Get frame length
|
||||
*/
|
||||
WebRtc_UWord32 Length() const {return _length;}
|
||||
/**
|
||||
* Get frame timestamp (90kHz)
|
||||
*/
|
||||
WebRtc_UWord32 TimeStamp() const {return _timeStamp;}
|
||||
/**
|
||||
* Get render time in milliseconds
|
||||
*/
|
||||
WebRtc_Word64 RenderTimeMs() const {return _renderTimeMs;}
|
||||
/**
|
||||
* Get frame type
|
||||
*/
|
||||
webrtc::FrameType FrameType() const {return ConvertFrameType(_frameType);}
|
||||
/**
|
||||
* True if this frame is complete, false otherwise
|
||||
*/
|
||||
bool Complete() const { return _completeFrame; }
|
||||
/**
|
||||
* True if there's a frame missing before this frame
|
||||
*/
|
||||
bool MissingFrame() const { return _missingFrame; }
|
||||
/**
|
||||
* Payload type of the encoded payload
|
||||
*/
|
||||
WebRtc_UWord8 PayloadType() const { return _payloadType; }
|
||||
/**
|
||||
* Get codec specific info.
|
||||
* The returned pointer is only valid as long as the VCMEncodedFrame
|
||||
* is valid. Also, VCMEncodedFrame owns the pointer and will delete
|
||||
* the object.
|
||||
*/
|
||||
const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
|
||||
|
||||
const RTPFragmentationHeader* FragmentationHeader() const;
|
||||
|
||||
WebRtc_Word32 Store(VCMFrameStorageCallback& storeCallback) const;
|
||||
|
||||
static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
|
||||
static VideoFrameType ConvertFrameType(webrtc::FrameType frameType);
|
||||
static void ConvertFrameTypes(
|
||||
const std::vector<webrtc::FrameType>& frame_types,
|
||||
std::vector<VideoFrameType>* video_frame_types);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Verifies that current allocated buffer size is larger than or equal to the input size.
|
||||
* If the current buffer size is smaller, a new allocation is made and the old buffer data
|
||||
* is copied to the new buffer.
|
||||
* Buffer size is updated to minimumSize.
|
||||
*/
|
||||
WebRtc_Word32 VerifyAndAllocate(const WebRtc_UWord32 minimumSize);
|
||||
|
||||
void Reset();
|
||||
|
||||
void CopyCodecSpecific(const RTPVideoHeader* header);
|
||||
|
||||
WebRtc_Word64 _renderTimeMs;
|
||||
WebRtc_UWord8 _payloadType;
|
||||
bool _missingFrame;
|
||||
CodecSpecificInfo _codecSpecificInfo;
|
||||
webrtc::VideoCodecType _codec;
|
||||
RTPFragmentationHeader _fragmentation;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
38742
webrtc/modules/video_coding/main/source/er_tables_xor.h
Normal file
38742
webrtc/modules/video_coding/main/source/er_tables_xor.h
Normal file
File diff suppressed because it is too large
Load Diff
63
webrtc/modules/video_coding/main/source/event.h
Normal file
63
webrtc/modules/video_coding/main/source/event.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
|
||||
#include "event_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
//#define EVENT_DEBUG
|
||||
|
||||
class VCMEvent : public EventWrapper
|
||||
{
|
||||
public:
|
||||
VCMEvent() : _event(*EventWrapper::Create()) {};
|
||||
|
||||
virtual ~VCMEvent() { delete &_event; };
|
||||
|
||||
/**
|
||||
* Release waiting threads
|
||||
*/
|
||||
bool Set() { return _event.Set(); };
|
||||
|
||||
bool Reset() { return _event.Reset(); };
|
||||
|
||||
/**
|
||||
* Wait for this event
|
||||
*/
|
||||
EventTypeWrapper Wait(unsigned long maxTime)
|
||||
{
|
||||
#ifdef EVENT_DEBUG
|
||||
return kEventTimeout;
|
||||
#else
|
||||
return _event.Wait(maxTime);
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* Start a timer
|
||||
*/
|
||||
bool StartTimer(bool periodic, unsigned long time)
|
||||
{ return _event.StartTimer(periodic, time); };
|
||||
/**
|
||||
* Stop the timer
|
||||
*/
|
||||
bool StopTimer() { return _event.StopTimer(); };
|
||||
|
||||
private:
|
||||
EventWrapper& _event;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
60
webrtc/modules/video_coding/main/source/exp_filter.cc
Normal file
60
webrtc/modules/video_coding/main/source/exp_filter.cc
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "exp_filter.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
void
|
||||
VCMExpFilter::Reset(float alpha)
|
||||
{
|
||||
_alpha = alpha;
|
||||
_filtered = -1.0;
|
||||
}
|
||||
|
||||
float
|
||||
VCMExpFilter::Apply(float exp, float sample)
|
||||
{
|
||||
if (_filtered == -1.0)
|
||||
{
|
||||
// Initialize filtered bit rates
|
||||
_filtered = sample;
|
||||
}
|
||||
else if (exp == 1.0)
|
||||
{
|
||||
_filtered = _alpha * _filtered + (1 - _alpha) * sample;
|
||||
}
|
||||
else
|
||||
{
|
||||
float alpha = pow(_alpha, exp);
|
||||
_filtered = alpha * _filtered + (1 - alpha) * sample;
|
||||
}
|
||||
if (_max != -1 && _filtered > _max)
|
||||
{
|
||||
_filtered = _max;
|
||||
}
|
||||
return _filtered;
|
||||
}
|
||||
|
||||
void
|
||||
VCMExpFilter::UpdateBase(float alpha)
|
||||
{
|
||||
_alpha = alpha;
|
||||
}
|
||||
|
||||
float
|
||||
VCMExpFilter::Value() const
|
||||
{
|
||||
return _filtered;
|
||||
}
|
||||
|
||||
}
|
||||
58
webrtc/modules/video_coding/main/source/exp_filter.h
Normal file
58
webrtc/modules/video_coding/main/source/exp_filter.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
/**********************/
|
||||
/* ExpFilter class */
|
||||
/**********************/
|
||||
|
||||
class VCMExpFilter
|
||||
{
|
||||
public:
|
||||
VCMExpFilter(float alpha, float max = -1.0) : _alpha(alpha), _filtered(-1.0), _max(max) {}
|
||||
|
||||
// Resets the filter to its initial state, and resets alpha to the given value
|
||||
//
|
||||
// Input:
|
||||
// - alpha : the new value of the filter factor base.
|
||||
void Reset(float alpha);
|
||||
|
||||
// Applies the filter with the given exponent on the provided sample
|
||||
//
|
||||
// Input:
|
||||
// - exp : Exponent T in y(k) = alpha^T * y(k-1) + (1 - alpha^T) * x(k)
|
||||
// - sample : x(k) in the above filter equation
|
||||
float Apply(float exp, float sample);
|
||||
|
||||
// Return current filtered value: y(k)
|
||||
//
|
||||
// Return value : The current filter output
|
||||
float Value() const;
|
||||
|
||||
// Change the filter factor base
|
||||
//
|
||||
// Input:
|
||||
// - alpha : The new filter factor base.
|
||||
void UpdateBase(float alpha);
|
||||
|
||||
private:
|
||||
float _alpha; // Filter factor base
|
||||
float _filtered; // Current filter output
|
||||
const float _max;
|
||||
}; // end of ExpFilter class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
6481
webrtc/modules/video_coding/main/source/fec_tables_xor.h
Normal file
6481
webrtc/modules/video_coding/main/source/fec_tables_xor.h
Normal file
File diff suppressed because it is too large
Load Diff
410
webrtc/modules/video_coding/main/source/frame_buffer.cc
Normal file
410
webrtc/modules/video_coding/main/source/frame_buffer.cc
Normal file
@ -0,0 +1,410 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "frame_buffer.h"
|
||||
#include "packet.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMFrameBuffer::VCMFrameBuffer()
|
||||
:
|
||||
_state(kStateFree),
|
||||
_frameCounted(false),
|
||||
_nackCount(0),
|
||||
_latestPacketTimeMs(-1) {
|
||||
}
|
||||
|
||||
VCMFrameBuffer::~VCMFrameBuffer() {
|
||||
}
|
||||
|
||||
VCMFrameBuffer::VCMFrameBuffer(VCMFrameBuffer& rhs)
|
||||
:
|
||||
VCMEncodedFrame(rhs),
|
||||
_state(rhs._state),
|
||||
_frameCounted(rhs._frameCounted),
|
||||
_sessionInfo(),
|
||||
_nackCount(rhs._nackCount),
|
||||
_latestPacketTimeMs(rhs._latestPacketTimeMs)
|
||||
{
|
||||
_sessionInfo = rhs._sessionInfo;
|
||||
_sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
|
||||
}
|
||||
|
||||
webrtc::FrameType
|
||||
VCMFrameBuffer::FrameType() const
|
||||
{
|
||||
return _sessionInfo.FrameType();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::SetPreviousFrameLoss()
|
||||
{
|
||||
_sessionInfo.SetPreviousFrameLoss();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::GetLowSeqNum() const
|
||||
{
|
||||
return _sessionInfo.LowSequenceNumber();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::GetHighSeqNum() const
|
||||
{
|
||||
return _sessionInfo.HighSequenceNumber();
|
||||
}
|
||||
|
||||
int VCMFrameBuffer::PictureId() const {
|
||||
return _sessionInfo.PictureId();
|
||||
}
|
||||
|
||||
int VCMFrameBuffer::TemporalId() const {
|
||||
return _sessionInfo.TemporalId();
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::LayerSync() const {
|
||||
return _sessionInfo.LayerSync();
|
||||
}
|
||||
|
||||
int VCMFrameBuffer::Tl0PicId() const {
|
||||
return _sessionInfo.Tl0PicId();
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::NonReference() const {
|
||||
return _sessionInfo.NonReference();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::IsSessionComplete() const
|
||||
{
|
||||
return _sessionInfo.complete();
|
||||
}
|
||||
|
||||
// Insert packet
|
||||
VCMFrameBufferEnum
|
||||
VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs,
|
||||
bool enableDecodableState, WebRtc_UWord32 rttMS)
|
||||
{
|
||||
if (_state == kStateDecoding)
|
||||
{
|
||||
// Do not insert packet
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
// Sanity to check if the frame has been freed. (Too old for example)
|
||||
if (_state == kStateFree)
|
||||
{
|
||||
return kStateError;
|
||||
}
|
||||
|
||||
// is this packet part of this frame
|
||||
if (TimeStamp() && (TimeStamp() != packet.timestamp))
|
||||
{
|
||||
return kTimeStampError;
|
||||
}
|
||||
|
||||
// sanity checks
|
||||
if (_size + packet.sizeBytes +
|
||||
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0 )
|
||||
> kMaxJBFrameSizeBytes)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if (NULL == packet.dataPtr && packet.sizeBytes > 0)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if (packet.dataPtr != NULL)
|
||||
{
|
||||
_payloadType = packet.payloadType;
|
||||
}
|
||||
|
||||
if (kStateEmpty == _state)
|
||||
{
|
||||
// First packet (empty and/or media) inserted into this frame.
|
||||
// store some info and set some initial values.
|
||||
_timeStamp = packet.timestamp;
|
||||
_codec = packet.codec;
|
||||
if (packet.frameType != kFrameEmpty)
|
||||
{
|
||||
// first media packet
|
||||
SetState(kStateIncomplete);
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord32 requiredSizeBytes = Length() + packet.sizeBytes +
|
||||
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
|
||||
if (requiredSizeBytes >= _size)
|
||||
{
|
||||
const WebRtc_UWord8* prevBuffer = _buffer;
|
||||
const WebRtc_UWord32 increments = requiredSizeBytes /
|
||||
kBufferIncStepSizeBytes +
|
||||
(requiredSizeBytes %
|
||||
kBufferIncStepSizeBytes > 0);
|
||||
const WebRtc_UWord32 newSize = _size +
|
||||
increments * kBufferIncStepSizeBytes;
|
||||
if (newSize > kMaxJBFrameSizeBytes)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if (VerifyAndAllocate(newSize) == -1)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
|
||||
}
|
||||
|
||||
CopyCodecSpecific(&packet.codecSpecificHeader);
|
||||
|
||||
int retVal = _sessionInfo.InsertPacket(packet, _buffer,
|
||||
enableDecodableState,
|
||||
rttMS);
|
||||
if (retVal == -1)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
else if (retVal == -2)
|
||||
{
|
||||
return kDuplicatePacket;
|
||||
}
|
||||
// update length
|
||||
_length = Length() + static_cast<WebRtc_UWord32>(retVal);
|
||||
|
||||
_latestPacketTimeMs = timeInMs;
|
||||
|
||||
if (_sessionInfo.complete()) {
|
||||
return kCompleteSession;
|
||||
} else if (_sessionInfo.decodable()) {
|
||||
SetState(kStateDecodable);
|
||||
return kDecodableSession;
|
||||
} else {
|
||||
// this layer is not complete
|
||||
if (_state == kStateComplete) {
|
||||
// we already have a complete layer
|
||||
// wait for all independent layers belonging to the same frame
|
||||
_state = kStateIncomplete;
|
||||
}
|
||||
}
|
||||
return kIncomplete;
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMFrameBuffer::LatestPacketTimeMs() const
|
||||
{
|
||||
return _latestPacketTimeMs;
|
||||
}
|
||||
|
||||
// Build hard NACK list:Zero out all entries in list up to and including the
|
||||
// (first) entry equal to _lowSeqNum.
|
||||
int VCMFrameBuffer::BuildHardNackList(int* list, int num) {
|
||||
if (_sessionInfo.BuildHardNackList(list, num) != 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Build selective NACK list: Create a soft (selective) list of entries to zero
|
||||
// out up to and including the (first) entry equal to _lowSeqNum.
|
||||
int VCMFrameBuffer::BuildSoftNackList(int* list, int num, int rttMs) {
|
||||
return _sessionInfo.BuildSoftNackList(list, num, rttMs);
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::IncrementNackCount()
|
||||
{
|
||||
_nackCount++;
|
||||
}
|
||||
|
||||
WebRtc_Word16
|
||||
VCMFrameBuffer::GetNackCount() const
|
||||
{
|
||||
return _nackCount;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::HaveLastPacket() const
|
||||
{
|
||||
return _sessionInfo.HaveLastPacket();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::Reset()
|
||||
{
|
||||
_length = 0;
|
||||
_timeStamp = 0;
|
||||
_sessionInfo.Reset();
|
||||
_frameCounted = false;
|
||||
_payloadType = 0;
|
||||
_nackCount = 0;
|
||||
_latestPacketTimeMs = -1;
|
||||
_state = kStateFree;
|
||||
VCMEncodedFrame::Reset();
|
||||
}
|
||||
|
||||
// Makes sure the session contains a decodable stream.
|
||||
void
|
||||
VCMFrameBuffer::MakeSessionDecodable()
|
||||
{
|
||||
WebRtc_UWord32 retVal;
|
||||
#ifdef INDEPENDENT_PARTITIONS
|
||||
if (_codec != kVideoCodecVP8) {
|
||||
retVal = _sessionInfo.MakeDecodable();
|
||||
_length -= retVal;
|
||||
}
|
||||
#else
|
||||
retVal = _sessionInfo.MakeDecodable();
|
||||
_length -= retVal;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Set state of frame
|
||||
void
|
||||
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state)
|
||||
{
|
||||
if (_state == state)
|
||||
{
|
||||
return;
|
||||
}
|
||||
switch (state)
|
||||
{
|
||||
case kStateFree:
|
||||
// Reset everything
|
||||
// We can go to this state from all other states.
|
||||
// The one setting the state to free must ensure
|
||||
// that the frame is removed from the timestamp
|
||||
// ordered frame list in the jb.
|
||||
Reset();
|
||||
break;
|
||||
|
||||
case kStateIncomplete:
|
||||
// we can go to this state from state kStateEmpty
|
||||
assert(_state == kStateEmpty ||
|
||||
_state == kStateDecoding);
|
||||
|
||||
// Do nothing, we received a packet
|
||||
break;
|
||||
|
||||
case kStateComplete:
|
||||
assert(_state == kStateEmpty ||
|
||||
_state == kStateIncomplete ||
|
||||
_state == kStateDecodable);
|
||||
|
||||
break;
|
||||
|
||||
case kStateEmpty:
|
||||
assert(_state == kStateFree);
|
||||
// Do nothing
|
||||
break;
|
||||
|
||||
case kStateDecoding:
|
||||
// A frame might have received empty packets, or media packets might
|
||||
// have been removed when making the frame decodable. The frame can
|
||||
// still be set to decodable since it can be used to inform the
|
||||
// decoder of a frame loss.
|
||||
assert(_state == kStateComplete || _state == kStateIncomplete ||
|
||||
_state == kStateDecodable || _state == kStateEmpty);
|
||||
// Transfer frame information to EncodedFrame and create any codec
|
||||
// specific information
|
||||
RestructureFrameInformation();
|
||||
break;
|
||||
|
||||
case kStateDecodable:
|
||||
assert(_state == kStateEmpty ||
|
||||
_state == kStateIncomplete);
|
||||
break;
|
||||
}
|
||||
_state = state;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::RestructureFrameInformation()
|
||||
{
|
||||
PrepareForDecode();
|
||||
_frameType = ConvertFrameType(_sessionInfo.FrameType());
|
||||
_completeFrame = _sessionInfo.complete();
|
||||
_missingFrame = _sessionInfo.PreviousFrameLoss();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage)
|
||||
{
|
||||
_frameType = ConvertFrameType(frameFromStorage.frameType);
|
||||
_timeStamp = frameFromStorage.timeStamp;
|
||||
_payloadType = frameFromStorage.payloadType;
|
||||
_encodedWidth = frameFromStorage.encodedWidth;
|
||||
_encodedHeight = frameFromStorage.encodedHeight;
|
||||
_missingFrame = frameFromStorage.missingFrame;
|
||||
_completeFrame = frameFromStorage.completeFrame;
|
||||
_renderTimeMs = frameFromStorage.renderTimeMs;
|
||||
_codec = frameFromStorage.codec;
|
||||
const WebRtc_UWord8 *prevBuffer = _buffer;
|
||||
if (VerifyAndAllocate(frameFromStorage.payloadSize) < 0)
|
||||
{
|
||||
return VCM_MEMORY;
|
||||
}
|
||||
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
|
||||
memcpy(_buffer, frameFromStorage.payloadData, frameFromStorage.payloadSize);
|
||||
_length = frameFromStorage.payloadSize;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
int VCMFrameBuffer::NotDecodablePackets() const {
|
||||
return _sessionInfo.packets_not_decodable();
|
||||
}
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void VCMFrameBuffer::SetCountedFrame(bool frameCounted)
|
||||
{
|
||||
_frameCounted = frameCounted;
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::GetCountedFrame() const
|
||||
{
|
||||
return _frameCounted;
|
||||
}
|
||||
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum
|
||||
VCMFrameBuffer::GetState() const
|
||||
{
|
||||
return _state;
|
||||
}
|
||||
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum
|
||||
VCMFrameBuffer::GetState(WebRtc_UWord32& timeStamp) const
|
||||
{
|
||||
timeStamp = TimeStamp();
|
||||
return GetState();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::IsRetransmitted() const
|
||||
{
|
||||
return _sessionInfo.session_nack();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::PrepareForDecode()
|
||||
{
|
||||
#ifdef INDEPENDENT_PARTITIONS
|
||||
if (_codec == kVideoCodecVP8)
|
||||
{
|
||||
_length =
|
||||
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
|
||||
&_fragmentation);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
102
webrtc/modules/video_coding/main/source/frame_buffer.h
Normal file
102
webrtc/modules/video_coding/main/source/frame_buffer.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/source/encoded_frame.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer_common.h"
|
||||
#include "modules/video_coding/main/source/session_info.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMFrameBuffer : public VCMEncodedFrame
|
||||
{
|
||||
public:
|
||||
VCMFrameBuffer();
|
||||
virtual ~VCMFrameBuffer();
|
||||
|
||||
VCMFrameBuffer(VCMFrameBuffer& rhs);
|
||||
|
||||
virtual void Reset();
|
||||
|
||||
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_Word64 timeInMs,
|
||||
bool enableDecodableState,
|
||||
WebRtc_UWord32 rttMs);
|
||||
|
||||
// State
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum GetState() const;
|
||||
// Get current state and timestamp of frame
|
||||
VCMFrameBufferStateEnum GetState(WebRtc_UWord32& timeStamp) const;
|
||||
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
|
||||
|
||||
bool IsRetransmitted() const;
|
||||
bool IsSessionComplete() const;
|
||||
bool HaveLastPacket() const;
|
||||
// Makes sure the session contain a decodable stream.
|
||||
void MakeSessionDecodable();
|
||||
|
||||
// Sequence numbers
|
||||
// Get lowest packet sequence number in frame
|
||||
WebRtc_Word32 GetLowSeqNum() const;
|
||||
// Get highest packet sequence number in frame
|
||||
WebRtc_Word32 GetHighSeqNum() const;
|
||||
|
||||
int PictureId() const;
|
||||
int TemporalId() const;
|
||||
bool LayerSync() const;
|
||||
int Tl0PicId() const;
|
||||
bool NonReference() const;
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void SetCountedFrame(bool frameCounted);
|
||||
bool GetCountedFrame() const;
|
||||
|
||||
// NACK - Building the NACK lists.
|
||||
// Build hard NACK list: Zero out all entries in list up to and including
|
||||
// _lowSeqNum.
|
||||
int BuildHardNackList(int* list, int num);
|
||||
// Build soft NACK list: Zero out only a subset of the packets, discard
|
||||
// empty packets.
|
||||
int BuildSoftNackList(int* list, int num, int rttMs);
|
||||
void IncrementNackCount();
|
||||
WebRtc_Word16 GetNackCount() const;
|
||||
|
||||
WebRtc_Word64 LatestPacketTimeMs() const;
|
||||
|
||||
webrtc::FrameType FrameType() const;
|
||||
void SetPreviousFrameLoss();
|
||||
|
||||
WebRtc_Word32 ExtractFromStorage(const EncodedVideoData& frameFromStorage);
|
||||
|
||||
// The number of packets discarded because the decoder can't make use of
|
||||
// them.
|
||||
int NotDecodablePackets() const;
|
||||
|
||||
protected:
|
||||
void RestructureFrameInformation();
|
||||
void PrepareForDecode();
|
||||
|
||||
private:
|
||||
VCMFrameBufferStateEnum _state; // Current state of the frame
|
||||
bool _frameCounted; // Was this frame counted by JB?
|
||||
VCMSessionInfo _sessionInfo;
|
||||
WebRtc_UWord16 _nackCount;
|
||||
WebRtc_Word64 _latestPacketTimeMs;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
350
webrtc/modules/video_coding/main/source/frame_dropper.cc
Normal file
350
webrtc/modules/video_coding/main/source/frame_dropper.cc
Normal file
@ -0,0 +1,350 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "frame_dropper.h"
|
||||
#include "internal_defines.h"
|
||||
#include "trace.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VCMFrameDropper::VCMFrameDropper(WebRtc_Word32 vcmId)
|
||||
:
|
||||
_vcmId(vcmId),
|
||||
_keyFrameSizeAvgKbits(0.9f),
|
||||
_keyFrameRatio(0.99f),
|
||||
_dropRatio(0.9f, 0.96f)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Reset()
|
||||
{
|
||||
_keyFrameRatio.Reset(0.99f);
|
||||
_keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
|
||||
_keyFrameSizeAvgKbits.Reset(0.9f);
|
||||
_keyFrameCount = 0;
|
||||
_accumulator = 0.0f;
|
||||
_accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
|
||||
_targetBitRate = 300.0f;
|
||||
_incoming_frame_rate = 30;
|
||||
_keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
|
||||
_dropNext = false;
|
||||
_dropRatio.Reset(0.9f);
|
||||
_dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
|
||||
_dropCount = 0;
|
||||
_windowSize = 0.5f;
|
||||
_wasBelowMax = true;
|
||||
_enabled = true;
|
||||
_fastMode = false; // start with normal (non-aggressive) mode
|
||||
// Cap for the encoder buffer level/accumulator, in secs.
|
||||
_cap_buffer_size = 3.0f;
|
||||
// Cap on maximum amount of dropped frames between kept frames, in secs.
|
||||
_max_time_drops = 4.0f;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Enable(bool enable)
|
||||
{
|
||||
_enabled = enable;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
|
||||
if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
|
||||
{
|
||||
_keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
|
||||
_keyFrameRatio.Apply(1.0, 1.0);
|
||||
if (frameSizeKbits > _keyFrameSizeAvgKbits.Value())
|
||||
{
|
||||
// Remove the average key frame size since we
|
||||
// compensate for key frames when adding delta
|
||||
// frames.
|
||||
frameSizeKbits -= _keyFrameSizeAvgKbits.Value();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shouldn't be negative, so zero is the lower bound.
|
||||
frameSizeKbits = 0;
|
||||
}
|
||||
if (_keyFrameRatio.Value() > 1e-5 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
|
||||
{
|
||||
// We are sending key frames more often than our upper bound for
|
||||
// how much we allow the key frame compensation to be spread
|
||||
// out in time. Therefor we must use the key frame ratio rather
|
||||
// than keyFrameSpreadFrames.
|
||||
_keyFrameCount = static_cast<WebRtc_Word32>(1 / _keyFrameRatio.Value() + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Compensate for the key frame the following frames
|
||||
_keyFrameCount = static_cast<WebRtc_Word32>(_keyFrameSpreadFrames + 0.5);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decrease the keyFrameRatio
|
||||
_keyFrameRatio.Apply(1.0, 0.0);
|
||||
}
|
||||
// Change the level of the accumulator (bucket)
|
||||
_accumulator += frameSizeKbits;
|
||||
CapAccumulator();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Leak(WebRtc_UWord32 inputFrameRate)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (inputFrameRate < 1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (_targetBitRate < 0.0f)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_keyFrameSpreadFrames = 0.5f * inputFrameRate;
|
||||
// T is the expected bits per frame (target). If all frames were the same size,
|
||||
// we would get T bits per frame. Notice that T is also weighted to be able to
|
||||
// force a lower frame rate if wanted.
|
||||
float T = _targetBitRate / inputFrameRate;
|
||||
if (_keyFrameCount > 0)
|
||||
{
|
||||
// Perform the key frame compensation
|
||||
if (_keyFrameRatio.Value() > 0 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
|
||||
{
|
||||
T -= _keyFrameSizeAvgKbits.Value() * _keyFrameRatio.Value();
|
||||
}
|
||||
else
|
||||
{
|
||||
T -= _keyFrameSizeAvgKbits.Value() / _keyFrameSpreadFrames;
|
||||
}
|
||||
_keyFrameCount--;
|
||||
}
|
||||
_accumulator -= T;
|
||||
UpdateRatio();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::UpdateNack(WebRtc_UWord32 nackBytes)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::FillBucket(float inKbits, float outKbits)
|
||||
{
|
||||
_accumulator += (inKbits - outKbits);
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::UpdateRatio()
|
||||
{
|
||||
if (_accumulator > 1.3f * _accumulatorMax)
|
||||
{
|
||||
// Too far above accumulator max, react faster
|
||||
_dropRatio.UpdateBase(0.8f);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Go back to normal reaction
|
||||
_dropRatio.UpdateBase(0.9f);
|
||||
}
|
||||
if (_accumulator > _accumulatorMax)
|
||||
{
|
||||
// We are above accumulator max, and should ideally
|
||||
// drop a frame. Increase the dropRatio and drop
|
||||
// the frame later.
|
||||
if (_wasBelowMax)
|
||||
{
|
||||
_dropNext = true;
|
||||
}
|
||||
if (_fastMode)
|
||||
{
|
||||
// always drop in aggressive mode
|
||||
_dropNext = true;
|
||||
}
|
||||
|
||||
_dropRatio.Apply(1.0f, 1.0f);
|
||||
_dropRatio.UpdateBase(0.9f);
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropRatio.Apply(1.0f, 0.0f);
|
||||
}
|
||||
if (_accumulator < 0.0f)
|
||||
{
|
||||
_accumulator = 0.0f;
|
||||
}
|
||||
_wasBelowMax = _accumulator < _accumulatorMax;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId), "FrameDropper: dropRatio = %f accumulator = %f, accumulatorMax = %f", _dropRatio.Value(), _accumulator, _accumulatorMax);
|
||||
}
|
||||
|
||||
// This function signals when to drop frames to the caller. It makes use of the dropRatio
|
||||
// to smooth out the drops over time.
|
||||
bool
|
||||
VCMFrameDropper::DropFrame()
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (_dropNext)
|
||||
{
|
||||
_dropNext = false;
|
||||
_dropCount = 0;
|
||||
}
|
||||
|
||||
if (_dropRatio.Value() >= 0.5f) // Drops per keep
|
||||
{
|
||||
// limit is the number of frames we should drop between each kept frame
|
||||
// to keep our drop ratio. limit is positive in this case.
|
||||
float denom = 1.0f - _dropRatio.Value();
|
||||
if (denom < 1e-5)
|
||||
{
|
||||
denom = (float)1e-5;
|
||||
}
|
||||
WebRtc_Word32 limit = static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
|
||||
// Put a bound on the max amount of dropped frames between each kept
|
||||
// frame, in terms of frame rate and window size (secs).
|
||||
int max_limit = static_cast<int>(_incoming_frame_rate *
|
||||
_max_time_drops);
|
||||
if (limit > max_limit) {
|
||||
limit = max_limit;
|
||||
}
|
||||
if (_dropCount < 0)
|
||||
{
|
||||
// Reset the _dropCount since it was negative and should be positive.
|
||||
if (_dropRatio.Value() > 0.4f)
|
||||
{
|
||||
_dropCount = -_dropCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
}
|
||||
}
|
||||
if (_dropCount < limit)
|
||||
{
|
||||
// As long we are below the limit we should drop frames.
|
||||
_dropCount++;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only when we reset _dropCount a frame should be kept.
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (_dropRatio.Value() > 0.0f && _dropRatio.Value() < 0.5f) // Keeps per drop
|
||||
{
|
||||
// limit is the number of frames we should keep between each drop
|
||||
// in order to keep the drop ratio. limit is negative in this case,
|
||||
// and the _dropCount is also negative.
|
||||
float denom = _dropRatio.Value();
|
||||
if (denom < 1e-5)
|
||||
{
|
||||
denom = (float)1e-5;
|
||||
}
|
||||
WebRtc_Word32 limit = -static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
|
||||
if (_dropCount > 0)
|
||||
{
|
||||
// Reset the _dropCount since we have a positive
|
||||
// _dropCount, and it should be negative.
|
||||
if (_dropRatio.Value() < 0.6f)
|
||||
{
|
||||
_dropCount = -_dropCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
}
|
||||
}
|
||||
if (_dropCount > limit)
|
||||
{
|
||||
if (_dropCount == 0)
|
||||
{
|
||||
// Drop frames when we reset _dropCount.
|
||||
_dropCount--;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Keep frames as long as we haven't reached limit.
|
||||
_dropCount--;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
|
||||
// A simpler version, unfiltered and quicker
|
||||
//bool dropNext = _dropNext;
|
||||
//_dropNext = false;
|
||||
//return dropNext;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::SetRates(float bitRate, float incoming_frame_rate)
|
||||
{
|
||||
// Bit rate of -1 means infinite bandwidth.
|
||||
_accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
|
||||
if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
|
||||
{
|
||||
// Rescale the accumulator level if the accumulator max decreases
|
||||
_accumulator = bitRate / _targetBitRate * _accumulator;
|
||||
}
|
||||
_targetBitRate = bitRate;
|
||||
CapAccumulator();
|
||||
_incoming_frame_rate = incoming_frame_rate;
|
||||
}
|
||||
|
||||
float
|
||||
VCMFrameDropper::ActualFrameRate(WebRtc_UWord32 inputFrameRate) const
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return static_cast<float>(inputFrameRate);
|
||||
}
|
||||
return inputFrameRate * (1.0f - _dropRatio.Value());
|
||||
}
|
||||
|
||||
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
|
||||
// This is a temporary fix for screencasting where very large frames from
|
||||
// encoder will cause very slow response (too many frame drops).
|
||||
void VCMFrameDropper::CapAccumulator() {
|
||||
float max_accumulator = _targetBitRate * _cap_buffer_size;
|
||||
if (_accumulator > max_accumulator) {
|
||||
_accumulator = max_accumulator;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
98
webrtc/modules/video_coding/main/source/frame_dropper.h
Normal file
98
webrtc/modules/video_coding/main/source/frame_dropper.h
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
|
||||
#include "exp_filter.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
/******************************/
|
||||
/* VCMFrameDropper class */
|
||||
/****************************/
|
||||
// The Frame Dropper implements a variant of the leaky bucket algorithm
|
||||
// for keeping track of when to drop frames to avoid bit rate
|
||||
// over use when the encoder can't keep its bit rate.
|
||||
class VCMFrameDropper
|
||||
{
|
||||
public:
|
||||
VCMFrameDropper(WebRtc_Word32 vcmId = 0);
|
||||
// Resets the FrameDropper to its initial state.
|
||||
// This means that the frameRateWeight is set to its
|
||||
// default value as well.
|
||||
void Reset();
|
||||
|
||||
void Enable(bool enable);
|
||||
// Answers the question if it's time to drop a frame
|
||||
// if we want to reach a given frame rate. Must be
|
||||
// called for every frame.
|
||||
//
|
||||
// Return value : True if we should drop the current frame
|
||||
bool DropFrame();
|
||||
// Updates the FrameDropper with the size of the latest encoded
|
||||
// frame. The FrameDropper calculates a new drop ratio (can be
|
||||
// seen as the probability to drop a frame) and updates its
|
||||
// internal statistics.
|
||||
//
|
||||
// Input:
|
||||
// - frameSizeBytes : The size of the latest frame
|
||||
// returned from the encoder.
|
||||
// - deltaFrame : True if the encoder returned
|
||||
// a key frame.
|
||||
void Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame);
|
||||
|
||||
void Leak(WebRtc_UWord32 inputFrameRate);
|
||||
|
||||
void UpdateNack(WebRtc_UWord32 nackBytes);
|
||||
|
||||
// Sets the target bit rate and the frame rate produced by
|
||||
// the camera.
|
||||
//
|
||||
// Input:
|
||||
// - bitRate : The target bit rate
|
||||
void SetRates(float bitRate, float incoming_frame_rate);
|
||||
|
||||
// Return value : The current average frame rate produced
|
||||
// if the DropFrame() function is used as
|
||||
// instruction of when to drop frames.
|
||||
float ActualFrameRate(WebRtc_UWord32 inputFrameRate) const;
|
||||
|
||||
|
||||
private:
|
||||
void FillBucket(float inKbits, float outKbits);
|
||||
void UpdateRatio();
|
||||
void CapAccumulator();
|
||||
|
||||
WebRtc_Word32 _vcmId;
|
||||
VCMExpFilter _keyFrameSizeAvgKbits;
|
||||
VCMExpFilter _keyFrameRatio;
|
||||
float _keyFrameSpreadFrames;
|
||||
WebRtc_Word32 _keyFrameCount;
|
||||
float _accumulator;
|
||||
float _accumulatorMax;
|
||||
float _targetBitRate;
|
||||
bool _dropNext;
|
||||
VCMExpFilter _dropRatio;
|
||||
WebRtc_Word32 _dropCount;
|
||||
float _windowSize;
|
||||
float _incoming_frame_rate;
|
||||
bool _wasBelowMax;
|
||||
bool _enabled;
|
||||
bool _fastMode;
|
||||
float _cap_buffer_size;
|
||||
float _max_time_drops;
|
||||
}; // end of VCMFrameDropper class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
221
webrtc/modules/video_coding/main/source/generic_decoder.cc
Normal file
221
webrtc/modules/video_coding/main/source/generic_decoder.cc
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video_coding.h"
|
||||
#include "trace.h"
|
||||
#include "generic_decoder.h"
|
||||
#include "internal_defines.h"
|
||||
#include "tick_time_base.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
|
||||
TickTimeBase* clock)
|
||||
:
|
||||
_critSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_clock(clock),
|
||||
_receiveCallback(NULL),
|
||||
_timing(timing),
|
||||
_timestampMap(kDecoderFrameMemoryLength),
|
||||
_lastReceivedPictureID(0)
|
||||
{
|
||||
}
|
||||
|
||||
VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
|
||||
{
|
||||
delete _critSect;
|
||||
}
|
||||
|
||||
void VCMDecodedFrameCallback::SetUserReceiveCallback(
|
||||
VCMReceiveCallback* receiveCallback)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_receiveCallback = receiveCallback;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage)
|
||||
{
|
||||
// TODO(holmer): We should improve this so that we can handle multiple
|
||||
// callbacks from one call to Decode().
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
VCMFrameInformation* frameInfo = static_cast<VCMFrameInformation*>(
|
||||
_timestampMap.Pop(decodedImage.TimeStamp()));
|
||||
if (frameInfo == NULL)
|
||||
{
|
||||
// The map should never be empty or full if this callback is called.
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
_timing.StopDecodeTimer(
|
||||
decodedImage.TimeStamp(),
|
||||
frameInfo->decodeStartTimeMs,
|
||||
_clock->MillisecondTimestamp());
|
||||
|
||||
if (_receiveCallback != NULL)
|
||||
{
|
||||
_frame.SwapFrame(decodedImage);
|
||||
_frame.SetRenderTime(frameInfo->renderTimeMs);
|
||||
WebRtc_Word32 callbackReturn = _receiveCallback->FrameToRender(_frame);
|
||||
if (callbackReturn < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug,
|
||||
webrtc::kTraceVideoCoding,
|
||||
-1,
|
||||
"Render callback returned error: %d", callbackReturn);
|
||||
}
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
|
||||
const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (_receiveCallback != NULL)
|
||||
{
|
||||
return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMDecodedFrameCallback::ReceivedDecodedFrame(const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
_lastReceivedPictureID = pictureId;
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord64 VCMDecodedFrameCallback::LastReceivedPictureID() const
|
||||
{
|
||||
return _lastReceivedPictureID;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _timestampMap.Add(timestamp, frameInfo);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Pop(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (_timestampMap.Pop(timestamp) == NULL)
|
||||
{
|
||||
return VCM_GENERAL_ERROR;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id, bool isExternal)
|
||||
:
|
||||
_id(id),
|
||||
_callback(NULL),
|
||||
_frameInfos(),
|
||||
_nextFrameInfoIdx(0),
|
||||
_decoder(decoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_isExternal(isExternal),
|
||||
_requireKeyFrame(false),
|
||||
_keyFrameDecoded(false)
|
||||
{
|
||||
}
|
||||
|
||||
VCMGenericDecoder::~VCMGenericDecoder()
|
||||
{
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::InitDecode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
bool requireKeyFrame)
|
||||
{
|
||||
_requireKeyFrame = requireKeyFrame;
|
||||
_keyFrameDecoded = false;
|
||||
_codecType = settings->codecType;
|
||||
|
||||
return _decoder.InitDecode(settings, numberOfCores);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
|
||||
int64_t nowMs)
|
||||
{
|
||||
if (_requireKeyFrame &&
|
||||
!_keyFrameDecoded &&
|
||||
frame.FrameType() != kVideoFrameKey &&
|
||||
frame.FrameType() != kVideoFrameGolden)
|
||||
{
|
||||
// Require key frame is enabled, meaning that one key frame must be decoded
|
||||
// before we can decode delta frames.
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug,
|
||||
webrtc::kTraceVideoCoding,
|
||||
VCMId(_id),
|
||||
"Decoding timestamp %u", frame.TimeStamp());
|
||||
|
||||
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
||||
|
||||
WebRtc_Word32 ret = _decoder.Decode(frame.EncodedImage(),
|
||||
frame.MissingFrame(),
|
||||
frame.FragmentationHeader(),
|
||||
frame.CodecSpecific(),
|
||||
frame.RenderTimeMs());
|
||||
|
||||
if (ret < WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_id), "Decoder error: %d\n", ret);
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
return ret;
|
||||
}
|
||||
else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
|
||||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
|
||||
{
|
||||
// No output
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
}
|
||||
// Update the key frame decoded variable so that we know whether or not we've decoded a key frame since reset.
|
||||
_keyFrameDecoded = (frame.FrameType() == kVideoFrameKey || frame.FrameType() == kVideoFrameGolden);
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericDecoder::Release()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::Reset()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::SetCodecConfigParameters(const WebRtc_UWord8* buffer, WebRtc_Word32 size)
|
||||
{
|
||||
return _decoder.SetCodecConfigParameters(buffer, size);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
|
||||
{
|
||||
_callback = callback;
|
||||
return _decoder.RegisterDecodeCompleteCallback(callback);
|
||||
}
|
||||
|
||||
bool VCMGenericDecoder::External() const
|
||||
{
|
||||
return _isExternal;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
121
webrtc/modules/video_coding/main/source/generic_decoder.h
Normal file
121
webrtc/modules/video_coding/main/source/generic_decoder.h
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
|
||||
#include "timing.h"
|
||||
#include "timestamp_map.h"
|
||||
#include "video_codec_interface.h"
|
||||
#include "encoded_frame.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMReceiveCallback;
|
||||
|
||||
enum { kDecoderFrameMemoryLength = 10 };
|
||||
|
||||
struct VCMFrameInformation
|
||||
{
|
||||
WebRtc_Word64 renderTimeMs;
|
||||
WebRtc_Word64 decodeStartTimeMs;
|
||||
void* userData;
|
||||
};
|
||||
|
||||
class VCMDecodedFrameCallback : public DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
VCMDecodedFrameCallback(VCMTiming& timing, TickTimeBase* clock);
|
||||
virtual ~VCMDecodedFrameCallback();
|
||||
void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
|
||||
|
||||
virtual WebRtc_Word32 Decoded(VideoFrame& decodedImage);
|
||||
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId);
|
||||
virtual WebRtc_Word32 ReceivedDecodedFrame(const WebRtc_UWord64 pictureId);
|
||||
|
||||
WebRtc_UWord64 LastReceivedPictureID() const;
|
||||
|
||||
WebRtc_Word32 Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo);
|
||||
WebRtc_Word32 Pop(WebRtc_UWord32 timestamp);
|
||||
|
||||
private:
|
||||
CriticalSectionWrapper* _critSect;
|
||||
TickTimeBase* _clock;
|
||||
VideoFrame _frame;
|
||||
VCMReceiveCallback* _receiveCallback;
|
||||
VCMTiming& _timing;
|
||||
VCMTimestampMap _timestampMap;
|
||||
WebRtc_UWord64 _lastReceivedPictureID;
|
||||
};
|
||||
|
||||
|
||||
class VCMGenericDecoder
|
||||
{
|
||||
friend class VCMCodecDataBase;
|
||||
public:
|
||||
VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id = 0, bool isExternal = false);
|
||||
~VCMGenericDecoder();
|
||||
|
||||
/**
|
||||
* Initialize the decoder with the information from the VideoCodec
|
||||
*/
|
||||
WebRtc_Word32 InitDecode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
bool requireKeyFrame);
|
||||
|
||||
/**
|
||||
* Decode to a raw I420 frame,
|
||||
*
|
||||
* inputVideoBuffer reference to encoded video frame
|
||||
*/
|
||||
WebRtc_Word32 Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
|
||||
|
||||
/**
|
||||
* Free the decoder memory
|
||||
*/
|
||||
WebRtc_Word32 Release();
|
||||
|
||||
/**
|
||||
* Reset the decoder state, prepare for a new call
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
|
||||
/**
|
||||
* Codec configuration data sent out-of-band, i.e. in SIP call setup
|
||||
*
|
||||
* buffer pointer to the configuration data
|
||||
* size the size of the configuration data in bytes
|
||||
*/
|
||||
WebRtc_Word32 SetCodecConfigParameters(const WebRtc_UWord8* /*buffer*/,
|
||||
WebRtc_Word32 /*size*/);
|
||||
|
||||
WebRtc_Word32 RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
|
||||
|
||||
bool External() const;
|
||||
|
||||
protected:
|
||||
|
||||
WebRtc_Word32 _id;
|
||||
VCMDecodedFrameCallback* _callback;
|
||||
VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
|
||||
WebRtc_UWord32 _nextFrameInfoIdx;
|
||||
VideoDecoder& _decoder;
|
||||
VideoCodecType _codecType;
|
||||
bool _isExternal;
|
||||
bool _requireKeyFrame;
|
||||
bool _keyFrameDecoded;
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
278
webrtc/modules/video_coding/main/source/generic_encoder.cc
Normal file
278
webrtc/modules/video_coding/main/source/generic_encoder.cc
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "encoded_frame.h"
|
||||
#include "generic_encoder.h"
|
||||
#include "media_optimization.h"
|
||||
#include "../../../../engine_configurations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
//#define DEBUG_ENCODER_BIT_STREAM
|
||||
|
||||
VCMGenericEncoder::VCMGenericEncoder(VideoEncoder& encoder, bool internalSource /*= false*/)
|
||||
:
|
||||
_encoder(encoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_VCMencodedFrameCallback(NULL),
|
||||
_bitRate(0),
|
||||
_frameRate(0),
|
||||
_internalSource(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
VCMGenericEncoder::~VCMGenericEncoder()
|
||||
{
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericEncoder::Release()
|
||||
{
|
||||
_bitRate = 0;
|
||||
_frameRate = 0;
|
||||
_VCMencodedFrameCallback = NULL;
|
||||
return _encoder.Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::InitEncode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize)
|
||||
{
|
||||
_bitRate = settings->startBitrate;
|
||||
_frameRate = settings->maxFramerate;
|
||||
_codecType = settings->codecType;
|
||||
if (_VCMencodedFrameCallback != NULL)
|
||||
{
|
||||
_VCMencodedFrameCallback->SetCodecType(_codecType);
|
||||
}
|
||||
return _encoder.InitEncode(settings, numberOfCores, maxPayloadSize);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<FrameType>* frameTypes) {
|
||||
std::vector<VideoFrameType> video_frame_types(frameTypes->size(),
|
||||
kDeltaFrame);
|
||||
if (frameTypes) {
|
||||
VCMEncodedFrame::ConvertFrameTypes(*frameTypes, &video_frame_types);
|
||||
}
|
||||
return _encoder.Encode(inputFrame, codecSpecificInfo, &video_frame_types);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetChannelParameters(WebRtc_Word32 packetLoss, int rtt)
|
||||
{
|
||||
return _encoder.SetChannelParameters(packetLoss, rtt);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate)
|
||||
{
|
||||
WebRtc_Word32 ret = _encoder.SetRates(newBitRate, frameRate);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
_bitRate = newBitRate;
|
||||
_frameRate = frameRate;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size)
|
||||
{
|
||||
WebRtc_Word32 ret = _encoder.CodecConfigParameters(buffer, size);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMGenericEncoder::BitRate() const
|
||||
{
|
||||
return _bitRate;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMGenericEncoder::FrameRate() const
|
||||
{
|
||||
return _frameRate;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
|
||||
{
|
||||
return _encoder.SetPeriodicKeyFrames(enable);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericEncoder::RequestFrame(
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (!frame_types) {
|
||||
return 0;
|
||||
}
|
||||
VideoFrame image;
|
||||
std::vector<VideoFrameType> video_frame_types(kVideoFrameDelta);
|
||||
if (frame_types) {
|
||||
VCMEncodedFrame::ConvertFrameTypes(*frame_types, &video_frame_types);
|
||||
}
|
||||
return _encoder.Encode(image, NULL, &video_frame_types);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback)
|
||||
{
|
||||
_VCMencodedFrameCallback = VCMencodedFrameCallback;
|
||||
|
||||
_VCMencodedFrameCallback->SetCodecType(_codecType);
|
||||
_VCMencodedFrameCallback->SetInternalSource(_internalSource);
|
||||
return _encoder.RegisterEncodeCompleteCallback(_VCMencodedFrameCallback);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMGenericEncoder::InternalSource() const
|
||||
{
|
||||
return _internalSource;
|
||||
}
|
||||
|
||||
/***************************
|
||||
* Callback Implementation
|
||||
***************************/
|
||||
VCMEncodedFrameCallback::VCMEncodedFrameCallback():
|
||||
_sendCallback(),
|
||||
_mediaOpt(NULL),
|
||||
_encodedBytes(0),
|
||||
_payloadType(0),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_internalSource(false)
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
, _bitStreamAfterEncoder(NULL)
|
||||
#endif
|
||||
{
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
_bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
|
||||
#endif
|
||||
}
|
||||
|
||||
VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
|
||||
{
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
fclose(_bitStreamAfterEncoder);
|
||||
#endif
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
|
||||
{
|
||||
_sendCallback = transport;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrameCallback::Encoded(
|
||||
EncodedImage &encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentationHeader)
|
||||
{
|
||||
FrameType frameType = VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
|
||||
|
||||
WebRtc_UWord32 encodedBytes = 0;
|
||||
if (_sendCallback != NULL)
|
||||
{
|
||||
encodedBytes = encodedImage._length;
|
||||
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
if (_bitStreamAfterEncoder != NULL)
|
||||
{
|
||||
fwrite(encodedImage._buffer, 1, encodedImage._length, _bitStreamAfterEncoder);
|
||||
}
|
||||
#endif
|
||||
|
||||
RTPVideoHeader rtpVideoHeader;
|
||||
RTPVideoHeader* rtpVideoHeaderPtr = &rtpVideoHeader;
|
||||
if (codecSpecificInfo)
|
||||
{
|
||||
CopyCodecSpecific(*codecSpecificInfo, &rtpVideoHeaderPtr);
|
||||
}
|
||||
else
|
||||
{
|
||||
rtpVideoHeaderPtr = NULL;
|
||||
}
|
||||
|
||||
WebRtc_Word32 callbackReturn = _sendCallback->SendData(
|
||||
frameType,
|
||||
_payloadType,
|
||||
encodedImage._timeStamp,
|
||||
encodedImage.capture_time_ms_,
|
||||
encodedImage._buffer,
|
||||
encodedBytes,
|
||||
*fragmentationHeader,
|
||||
rtpVideoHeaderPtr);
|
||||
if (callbackReturn < 0)
|
||||
{
|
||||
return callbackReturn;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
_encodedBytes = encodedBytes;
|
||||
if (_mediaOpt != NULL) {
|
||||
_mediaOpt->UpdateWithEncodedData(_encodedBytes, frameType);
|
||||
if (_internalSource)
|
||||
{
|
||||
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame
|
||||
}
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMEncodedFrameCallback::EncodedBytes()
|
||||
{
|
||||
return _encodedBytes;
|
||||
}
|
||||
|
||||
void
|
||||
VCMEncodedFrameCallback::SetMediaOpt(VCMMediaOptimization *mediaOpt)
|
||||
{
|
||||
_mediaOpt = mediaOpt;
|
||||
}
|
||||
|
||||
void VCMEncodedFrameCallback::CopyCodecSpecific(const CodecSpecificInfo& info,
|
||||
RTPVideoHeader** rtp) {
|
||||
switch (info.codecType) {
|
||||
case kVideoCodecVP8: {
|
||||
(*rtp)->codecHeader.VP8.InitRTPVideoHeaderVP8();
|
||||
(*rtp)->codecHeader.VP8.pictureId =
|
||||
info.codecSpecific.VP8.pictureId;
|
||||
(*rtp)->codecHeader.VP8.nonReference =
|
||||
info.codecSpecific.VP8.nonReference;
|
||||
(*rtp)->codecHeader.VP8.temporalIdx =
|
||||
info.codecSpecific.VP8.temporalIdx;
|
||||
(*rtp)->codecHeader.VP8.layerSync =
|
||||
info.codecSpecific.VP8.layerSync;
|
||||
(*rtp)->codecHeader.VP8.tl0PicIdx =
|
||||
info.codecSpecific.VP8.tl0PicIdx;
|
||||
(*rtp)->codecHeader.VP8.keyIdx =
|
||||
info.codecSpecific.VP8.keyIdx;
|
||||
(*rtp)->simulcastIdx = info.codecSpecific.VP8.simulcastIdx;
|
||||
return;
|
||||
}
|
||||
default: {
|
||||
// No codec specific info. Change RTP header pointer to NULL.
|
||||
*rtp = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace webrtc
|
||||
145
webrtc/modules/video_coding/main/source/generic_encoder.h
Normal file
145
webrtc/modules/video_coding/main/source/generic_encoder.h
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
|
||||
#include "video_codec_interface.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMMediaOptimization;
|
||||
|
||||
/*************************************/
|
||||
/* VCMEncodeFrameCallback class */
|
||||
/***********************************/
|
||||
class VCMEncodedFrameCallback : public EncodedImageCallback
|
||||
{
|
||||
public:
|
||||
VCMEncodedFrameCallback();
|
||||
virtual ~VCMEncodedFrameCallback();
|
||||
|
||||
/*
|
||||
* Callback implementation - codec encode complete
|
||||
*/
|
||||
WebRtc_Word32 Encoded(
|
||||
EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo = NULL,
|
||||
const RTPFragmentationHeader* fragmentationHeader = NULL);
|
||||
/*
|
||||
* Get number of encoded bytes
|
||||
*/
|
||||
WebRtc_UWord32 EncodedBytes();
|
||||
/*
|
||||
* Callback implementation - generic encoder encode complete
|
||||
*/
|
||||
WebRtc_Word32 SetTransportCallback(VCMPacketizationCallback* transport);
|
||||
/**
|
||||
* Set media Optimization
|
||||
*/
|
||||
void SetMediaOpt (VCMMediaOptimization* mediaOpt);
|
||||
|
||||
void SetPayloadType(WebRtc_UWord8 payloadType) { _payloadType = payloadType; };
|
||||
void SetCodecType(VideoCodecType codecType) {_codecType = codecType;};
|
||||
void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
|
||||
|
||||
private:
|
||||
/*
|
||||
* Map information from info into rtp. If no relevant information is found
|
||||
* in info, rtp is set to NULL.
|
||||
*/
|
||||
static void CopyCodecSpecific(const CodecSpecificInfo& info,
|
||||
RTPVideoHeader** rtp);
|
||||
|
||||
VCMPacketizationCallback* _sendCallback;
|
||||
VCMMediaOptimization* _mediaOpt;
|
||||
WebRtc_UWord32 _encodedBytes;
|
||||
WebRtc_UWord8 _payloadType;
|
||||
VideoCodecType _codecType;
|
||||
bool _internalSource;
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
FILE* _bitStreamAfterEncoder;
|
||||
#endif
|
||||
};// end of VCMEncodeFrameCallback class
|
||||
|
||||
|
||||
/******************************/
|
||||
/* VCMGenericEncoder class */
|
||||
/******************************/
|
||||
class VCMGenericEncoder
|
||||
{
|
||||
friend class VCMCodecDataBase;
|
||||
public:
|
||||
VCMGenericEncoder(VideoEncoder& encoder, bool internalSource = false);
|
||||
~VCMGenericEncoder();
|
||||
/**
|
||||
* Free encoder memory
|
||||
*/
|
||||
WebRtc_Word32 Release();
|
||||
/**
|
||||
* Initialize the encoder with the information from the VideoCodec
|
||||
*/
|
||||
WebRtc_Word32 InitEncode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize);
|
||||
/**
|
||||
* Encode raw image
|
||||
* inputFrame : Frame containing raw image
|
||||
* codecSpecificInfo : Specific codec data
|
||||
* cameraFrameRate : request or information from the remote side
|
||||
* frameType : The requested frame type to encode
|
||||
*/
|
||||
WebRtc_Word32 Encode(const VideoFrame& inputFrame,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<FrameType>* frameTypes);
|
||||
/**
|
||||
* Set new target bit rate and frame rate
|
||||
* Return Value: new bit rate if OK, otherwise <0s
|
||||
*/
|
||||
WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate);
|
||||
/**
|
||||
* Set a new packet loss rate and a new round-trip time in milliseconds.
|
||||
*/
|
||||
WebRtc_Word32 SetChannelParameters(WebRtc_Word32 packetLoss, int rtt);
|
||||
WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size);
|
||||
/**
|
||||
* Register a transport callback which will be called to deliver the encoded buffers
|
||||
*/
|
||||
WebRtc_Word32 RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback);
|
||||
/**
|
||||
* Get encoder bit rate
|
||||
*/
|
||||
WebRtc_UWord32 BitRate() const;
|
||||
/**
|
||||
* Get encoder frame rate
|
||||
*/
|
||||
WebRtc_UWord32 FrameRate() const;
|
||||
|
||||
WebRtc_Word32 SetPeriodicKeyFrames(bool enable);
|
||||
|
||||
WebRtc_Word32 RequestFrame(const std::vector<FrameType>* frame_types);
|
||||
|
||||
bool InternalSource() const;
|
||||
|
||||
private:
|
||||
VideoEncoder& _encoder;
|
||||
VideoCodecType _codecType;
|
||||
VCMEncodedFrameCallback* _VCMencodedFrameCallback;
|
||||
WebRtc_UWord32 _bitRate;
|
||||
WebRtc_UWord32 _frameRate;
|
||||
bool _internalSource;
|
||||
}; // end of VCMGenericEncoder class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
114
webrtc/modules/video_coding/main/source/inter_frame_delay.cc
Normal file
114
webrtc/modules/video_coding/main/source/inter_frame_delay.cc
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "inter_frame_delay.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock)
|
||||
{
|
||||
Reset(currentWallClock);
|
||||
}
|
||||
|
||||
// Resets the delay estimate
|
||||
void
|
||||
VCMInterFrameDelay::Reset(int64_t currentWallClock)
|
||||
{
|
||||
_zeroWallClock = currentWallClock;
|
||||
_wrapArounds = 0;
|
||||
_prevWallClock = 0;
|
||||
_prevTimestamp = 0;
|
||||
_dTS = 0;
|
||||
}
|
||||
|
||||
// Calculates the delay of a frame with the given timestamp.
|
||||
// This method is called when the frame is complete.
|
||||
bool
|
||||
VCMInterFrameDelay::CalculateDelay(WebRtc_UWord32 timestamp,
|
||||
WebRtc_Word64 *delay,
|
||||
int64_t currentWallClock)
|
||||
{
|
||||
if (_prevWallClock == 0)
|
||||
{
|
||||
// First set of data, initialization, wait for next frame
|
||||
_prevWallClock = currentWallClock;
|
||||
_prevTimestamp = timestamp;
|
||||
*delay = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtc_Word32 prevWrapArounds = _wrapArounds;
|
||||
CheckForWrapArounds(timestamp);
|
||||
|
||||
// This will be -1 for backward wrap arounds and +1 for forward wrap arounds
|
||||
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
|
||||
|
||||
// Account for reordering in jitter variance estimate in the future?
|
||||
// Note that this also captures incomplete frames which are grabbed
|
||||
// for decoding after a later frame has been complete, i.e. real
|
||||
// packet losses.
|
||||
if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
|
||||
{
|
||||
*delay = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute the compensated timestamp difference and convert it to ms and
|
||||
// round it to closest integer.
|
||||
_dTS = static_cast<WebRtc_Word64>((timestamp + wrapAroundsSincePrev *
|
||||
(static_cast<WebRtc_Word64>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
|
||||
|
||||
// frameDelay is the difference of dT and dTS -- i.e. the difference of
|
||||
// the wall clock time difference and the timestamp difference between
|
||||
// two following frames.
|
||||
*delay = static_cast<WebRtc_Word64>(currentWallClock - _prevWallClock - _dTS);
|
||||
|
||||
_prevTimestamp = timestamp;
|
||||
_prevWallClock = currentWallClock;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns the current difference between incoming timestamps
|
||||
WebRtc_UWord32 VCMInterFrameDelay::CurrentTimeStampDiffMs() const
|
||||
{
|
||||
if (_dTS < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return static_cast<WebRtc_UWord32>(_dTS);
|
||||
}
|
||||
|
||||
// Investigates if the timestamp clock has overflowed since the last timestamp and
|
||||
// keeps track of the number of wrap arounds since reset.
|
||||
void
|
||||
VCMInterFrameDelay::CheckForWrapArounds(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
if (timestamp < _prevTimestamp)
|
||||
{
|
||||
// This difference will probably be less than -2^31 if we have had a wrap around
|
||||
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
|
||||
// it should be positive.
|
||||
if (static_cast<WebRtc_Word32>(timestamp - _prevTimestamp) > 0)
|
||||
{
|
||||
// Forward wrap around
|
||||
_wrapArounds++;
|
||||
}
|
||||
}
|
||||
// This difference will probably be less than -2^31 if we have had a backward wrap around.
|
||||
// Since it is cast to a Word32, it should be positive.
|
||||
else if (static_cast<WebRtc_Word32>(_prevTimestamp - timestamp) > 0)
|
||||
{
|
||||
// Backward wrap around
|
||||
_wrapArounds--;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
66
webrtc/modules/video_coding/main/source/inter_frame_delay.h
Normal file
66
webrtc/modules/video_coding/main/source/inter_frame_delay.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMInterFrameDelay
|
||||
{
|
||||
public:
|
||||
VCMInterFrameDelay(int64_t currentWallClock);
|
||||
|
||||
// Resets the estimate. Zeros are given as parameters.
|
||||
void Reset(int64_t currentWallClock);
|
||||
|
||||
// Calculates the delay of a frame with the given timestamp.
|
||||
// This method is called when the frame is complete.
|
||||
//
|
||||
// Input:
|
||||
// - timestamp : RTP timestamp of a received frame
|
||||
// - *delay : Pointer to memory where the result should be stored
|
||||
// - currentWallClock : The current time in milliseconds.
|
||||
// Should be -1 for normal operation, only used for testing.
|
||||
// Return value : true if OK, false when reordered timestamps
|
||||
bool CalculateDelay(WebRtc_UWord32 timestamp,
|
||||
WebRtc_Word64 *delay,
|
||||
int64_t currentWallClock);
|
||||
|
||||
// Returns the current difference between incoming timestamps
|
||||
//
|
||||
// Return value : Wrap-around compensated difference between incoming
|
||||
// timestamps.
|
||||
WebRtc_UWord32 CurrentTimeStampDiffMs() const;
|
||||
|
||||
private:
|
||||
// Controls if the RTP timestamp counter has had a wrap around
|
||||
// between the current and the previously received frame.
|
||||
//
|
||||
// Input:
|
||||
// - timestmap : RTP timestamp of the current frame.
|
||||
void CheckForWrapArounds(WebRtc_UWord32 timestamp);
|
||||
|
||||
WebRtc_Word64 _zeroWallClock; // Local timestamp of the first video packet received
|
||||
WebRtc_Word32 _wrapArounds; // Number of wrapArounds detected
|
||||
// The previous timestamp passed to the delay estimate
|
||||
WebRtc_UWord32 _prevTimestamp;
|
||||
// The previous wall clock timestamp used by the delay estimate
|
||||
WebRtc_Word64 _prevWallClock;
|
||||
// Wrap-around compensated difference between incoming timestamps
|
||||
WebRtc_Word64 _dTS;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
58
webrtc/modules/video_coding/main/source/internal_defines.h
Normal file
58
webrtc/modules/video_coding/main/source/internal_defines.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
|
||||
|
||||
inline WebRtc_UWord32 MaskWord64ToUWord32(WebRtc_Word64 w64)
|
||||
{
|
||||
return static_cast<WebRtc_UWord32>(MASK_32_BITS(w64));
|
||||
}
|
||||
|
||||
#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
|
||||
#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
|
||||
#define VCM_DEFAULT_CODEC_WIDTH 352
|
||||
#define VCM_DEFAULT_CODEC_HEIGHT 288
|
||||
#define VCM_DEFAULT_FRAME_RATE 30
|
||||
#define VCM_MIN_BITRATE 30
|
||||
#define VCM_FLUSH_INDICATOR 4
|
||||
|
||||
// Helper macros for creating the static codec list
|
||||
#define VCM_NO_CODEC_IDX -1
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
#define VCM_VP8_IDX VCM_NO_CODEC_IDX + 1
|
||||
#else
|
||||
#define VCM_VP8_IDX VCM_NO_CODEC_IDX
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
#define VCM_I420_IDX VCM_VP8_IDX + 1
|
||||
#else
|
||||
#define VCM_I420_IDX VCM_VP8_IDX
|
||||
#endif
|
||||
#define VCM_NUM_VIDEO_CODECS_AVAILABLE VCM_I420_IDX + 1
|
||||
|
||||
#define VCM_NO_RECEIVER_ID 0
|
||||
|
||||
inline WebRtc_Word32 VCMId(const WebRtc_Word32 vcmId, const WebRtc_Word32 receiverId = 0)
|
||||
{
|
||||
return static_cast<WebRtc_Word32>((vcmId << 16) + receiverId);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
1432
webrtc/modules/video_coding/main/source/jitter_buffer.cc
Normal file
1432
webrtc/modules/video_coding/main/source/jitter_buffer.cc
Normal file
File diff suppressed because it is too large
Load Diff
266
webrtc/modules/video_coding/main/source/jitter_buffer.h
Normal file
266
webrtc/modules/video_coding/main/source/jitter_buffer.h
Normal file
@ -0,0 +1,266 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/interface/video_coding_defines.h"
|
||||
#include "modules/video_coding/main/source/decoding_state.h"
|
||||
#include "modules/video_coding/main/source/event.h"
|
||||
#include "modules/video_coding/main/source/inter_frame_delay.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer_common.h"
|
||||
#include "modules/video_coding/main/source/jitter_estimator.h"
|
||||
#include "system_wrappers/interface/constructor_magic.h"
|
||||
#include "system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum VCMNackMode {
|
||||
kNackInfinite,
|
||||
kNackHybrid,
|
||||
kNoNack
|
||||
};
|
||||
|
||||
typedef std::list<VCMFrameBuffer*> FrameList;
|
||||
|
||||
// forward declarations
|
||||
class TickTimeBase;
|
||||
class VCMFrameBuffer;
|
||||
class VCMPacket;
|
||||
class VCMEncodedFrame;
|
||||
|
||||
struct VCMJitterSample {
|
||||
VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {}
|
||||
uint32_t timestamp;
|
||||
uint32_t frame_size;
|
||||
int64_t latest_packet_time;
|
||||
};
|
||||
|
||||
class VCMJitterBuffer {
|
||||
public:
|
||||
VCMJitterBuffer(TickTimeBase* clock, int vcm_id = -1, int receiver_id = -1,
|
||||
bool master = true);
|
||||
virtual ~VCMJitterBuffer();
|
||||
|
||||
// Makes |this| a deep copy of |rhs|.
|
||||
void CopyFrom(const VCMJitterBuffer& rhs);
|
||||
|
||||
// Initializes and starts jitter buffer.
|
||||
void Start();
|
||||
|
||||
// Signals all internal events and stops the jitter buffer.
|
||||
void Stop();
|
||||
|
||||
// Returns true if the jitter buffer is running.
|
||||
bool Running() const;
|
||||
|
||||
// Empty the jitter buffer of all its data.
|
||||
void Flush();
|
||||
|
||||
// Get the number of received key and delta frames since the jitter buffer
|
||||
// was started.
|
||||
void FrameStatistics(uint32_t* received_delta_frames,
|
||||
uint32_t* received_key_frames) const;
|
||||
|
||||
// The number of packets discarded by the jitter buffer because the decoder
|
||||
// won't be able to decode them.
|
||||
int num_not_decodable_packets() const;
|
||||
|
||||
// Gets number of packets discarded by the jitter buffer.
|
||||
int num_discarded_packets() const;
|
||||
|
||||
// Statistics, Calculate frame and bit rates.
|
||||
void IncomingRateStatistics(unsigned int* framerate,
|
||||
unsigned int* bitrate);
|
||||
|
||||
// Waits for the first packet in the next frame to arrive and then returns
|
||||
// the timestamp of that frame. |incoming_frame_type| and |render_time_ms| are
|
||||
// set to the frame type and render time of the next frame.
|
||||
// Blocks for up to |max_wait_time_ms| ms. Returns -1 if no packet has arrived
|
||||
// after |max_wait_time_ms| ms.
|
||||
int64_t NextTimestamp(uint32_t max_wait_time_ms,
|
||||
FrameType* incoming_frame_type,
|
||||
int64_t* render_time_ms);
|
||||
|
||||
// Checks if the packet sequence will be complete if the next frame would be
|
||||
// grabbed for decoding. That is, if a frame has been lost between the
|
||||
// last decoded frame and the next, or if the next frame is missing one
|
||||
// or more packets.
|
||||
bool CompleteSequenceWithNextFrame();
|
||||
|
||||
// TODO(mikhal/stefan): Merge all GetFrameForDecoding into one.
|
||||
// Wait |max_wait_time_ms| for a complete frame to arrive. After timeout NULL
|
||||
// is returned.
|
||||
VCMEncodedFrame* GetCompleteFrameForDecoding(uint32_t max_wait_time_ms);
|
||||
|
||||
// Get a frame for decoding (even an incomplete) without delay.
|
||||
VCMEncodedFrame* GetFrameForDecoding();
|
||||
|
||||
// Releases a frame returned from the jitter buffer, should be called when
|
||||
// done with decoding.
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
|
||||
// Returns the frame assigned to this timestamp.
|
||||
int GetFrame(const VCMPacket& packet, VCMEncodedFrame*&);
|
||||
VCMEncodedFrame* GetFrame(const VCMPacket& packet); // Deprecated.
|
||||
|
||||
// Returns the time in ms when the latest packet was inserted into the frame.
|
||||
// Retransmitted is set to true if any of the packets belonging to the frame
|
||||
// has been retransmitted.
|
||||
int64_t LastPacketTime(VCMEncodedFrame* frame, bool* retransmitted) const;
|
||||
|
||||
// Inserts a packet into a frame returned from GetFrame().
|
||||
VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame,
|
||||
const VCMPacket& packet);
|
||||
|
||||
// Returns the estimated jitter in milliseconds.
|
||||
uint32_t EstimatedJitterMs();
|
||||
|
||||
// Updates the round-trip time estimate.
|
||||
void UpdateRtt(uint32_t rtt_ms);
|
||||
|
||||
// Set the NACK mode. |highRttNackThreshold| is an RTT threshold in ms above
|
||||
// which NACK will be disabled if the NACK mode is |kNackHybrid|, -1 meaning
|
||||
// that NACK is always enabled in the hybrid mode.
|
||||
// |lowRttNackThreshold| is an RTT threshold in ms below which we expect to
|
||||
// rely on NACK only, and therefore are using larger buffers to have time to
|
||||
// wait for retransmissions.
|
||||
void SetNackMode(VCMNackMode mode, int low_rtt_nack_threshold_ms,
|
||||
int high_rtt_nack_threshold_ms);
|
||||
|
||||
// Returns the current NACK mode.
|
||||
VCMNackMode nack_mode() const;
|
||||
|
||||
// Creates a list of missing sequence numbers.
|
||||
uint16_t* CreateNackList(uint16_t* nack_list_size, bool* list_extended);
|
||||
|
||||
int64_t LastDecodedTimestamp() const;
|
||||
|
||||
private:
|
||||
// In NACK-only mode this function doesn't return or release non-complete
|
||||
// frames unless we have a complete key frame. In hybrid mode, we may release
|
||||
// "decodable", incomplete frames.
|
||||
VCMEncodedFrame* GetFrameForDecodingNACK();
|
||||
|
||||
void ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame);
|
||||
|
||||
// Gets an empty frame, creating a new frame if necessary (i.e. increases
|
||||
// jitter buffer size).
|
||||
VCMFrameBuffer* GetEmptyFrame();
|
||||
|
||||
// Recycles oldest frames until a key frame is found. Used if jitter buffer is
|
||||
// completely full. Returns true if a key frame was found.
|
||||
bool RecycleFramesUntilKeyFrame();
|
||||
|
||||
// Sets the state of |frame| to complete if it's not too old to be decoded.
|
||||
// Also updates the frame statistics. Signals the |frame_event| if this is
|
||||
// the next frame to be decoded.
|
||||
VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frame);
|
||||
|
||||
// Finds the oldest complete frame, used for getting next frame to decode.
|
||||
// Can return a decodable, incomplete frame if |enable_decodable| is true.
|
||||
FrameList::iterator FindOldestCompleteContinuousFrame(bool enable_decodable);
|
||||
|
||||
void CleanUpOldFrames();
|
||||
|
||||
// Sets the "decodable" and "frame loss" flags of a frame depending on which
|
||||
// packets have been received and which are missing.
|
||||
// A frame is "decodable" if enough packets of that frame has been received
|
||||
// for it to be usable by the decoder.
|
||||
// A frame has the "frame loss" flag set if packets are missing after the
|
||||
// last decoded frame and before |frame|.
|
||||
void VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame);
|
||||
|
||||
// Returns true if |packet| is likely to have been retransmitted.
|
||||
bool IsPacketRetransmitted(const VCMPacket& packet) const;
|
||||
|
||||
// The following three functions update the jitter estimate with the
|
||||
// payload size, receive time and RTP timestamp of a frame.
|
||||
void UpdateJitterEstimate(const VCMJitterSample& sample,
|
||||
bool incomplete_frame);
|
||||
void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame);
|
||||
void UpdateJitterEstimate(int64_t latest_packet_time_ms,
|
||||
uint32_t timestamp,
|
||||
unsigned int frame_size,
|
||||
bool incomplete_frame);
|
||||
|
||||
// Returns the lowest and highest known sequence numbers, where the lowest is
|
||||
// the last decoded sequence number if a frame has been decoded.
|
||||
// -1 is returned if a sequence number cannot be determined.
|
||||
void GetLowHighSequenceNumbers(int32_t* low_seq_num,
|
||||
int32_t* high_seq_num) const;
|
||||
|
||||
// Returns true if we should wait for retransmissions, false otherwise.
|
||||
bool WaitForRetransmissions();
|
||||
|
||||
int vcm_id_;
|
||||
int receiver_id_;
|
||||
TickTimeBase* clock_;
|
||||
// If we are running (have started) or not.
|
||||
bool running_;
|
||||
CriticalSectionWrapper* crit_sect_;
|
||||
bool master_;
|
||||
// Event to signal when we have a frame ready for decoder.
|
||||
VCMEvent frame_event_;
|
||||
// Event to signal when we have received a packet.
|
||||
VCMEvent packet_event_;
|
||||
// Number of allocated frames.
|
||||
int max_number_of_frames_;
|
||||
// Array of pointers to the frames in jitter buffer.
|
||||
VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames];
|
||||
FrameList frame_list_;
|
||||
VCMDecodingState last_decoded_state_;
|
||||
bool first_packet_;
|
||||
|
||||
// Statistics.
|
||||
int num_not_decodable_packets_;
|
||||
// Frame counter for each type (key, delta, golden, key-delta).
|
||||
unsigned int receive_statistics_[4];
|
||||
// Latest calculated frame rates of incoming stream.
|
||||
unsigned int incoming_frame_rate_;
|
||||
unsigned int incoming_frame_count_;
|
||||
int64_t time_last_incoming_frame_count_;
|
||||
unsigned int incoming_bit_count_;
|
||||
unsigned int incoming_bit_rate_;
|
||||
unsigned int drop_count_; // Frame drop counter.
|
||||
// Number of frames in a row that have been too old.
|
||||
int num_consecutive_old_frames_;
|
||||
// Number of packets in a row that have been too old.
|
||||
int num_consecutive_old_packets_;
|
||||
// Number of packets discarded by the jitter buffer.
|
||||
int num_discarded_packets_;
|
||||
|
||||
// Jitter estimation.
|
||||
// Filter for estimating jitter.
|
||||
VCMJitterEstimator jitter_estimate_;
|
||||
// Calculates network delays used for jitter calculations.
|
||||
VCMInterFrameDelay inter_frame_delay_;
|
||||
VCMJitterSample waiting_for_completion_;
|
||||
WebRtc_UWord32 rtt_ms_;
|
||||
|
||||
// NACK and retransmissions.
|
||||
VCMNackMode nack_mode_;
|
||||
int low_rtt_nack_threshold_ms_;
|
||||
int high_rtt_nack_threshold_ms_;
|
||||
// Holds the internal NACK list (the missing sequence numbers).
|
||||
int32_t nack_seq_nums_internal_[kNackHistoryLength];
|
||||
uint16_t nack_seq_nums_[kNackHistoryLength];
|
||||
unsigned int nack_seq_nums_length_;
|
||||
bool waiting_for_key_frame_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
|
||||
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "jitter_buffer_common.h"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
WebRtc_UWord32 LatestTimestamp(WebRtc_UWord32 timestamp1,
|
||||
WebRtc_UWord32 timestamp2,
|
||||
bool* has_wrapped) {
|
||||
bool wrap = (timestamp2 < 0x0000ffff && timestamp1 > 0xffff0000) ||
|
||||
(timestamp2 > 0xffff0000 && timestamp1 < 0x0000ffff);
|
||||
if (has_wrapped != NULL)
|
||||
*has_wrapped = wrap;
|
||||
if (timestamp1 > timestamp2 && !wrap)
|
||||
return timestamp1;
|
||||
else if (timestamp1 <= timestamp2 && !wrap)
|
||||
return timestamp2;
|
||||
else if (timestamp1 < timestamp2 && wrap)
|
||||
return timestamp1;
|
||||
else
|
||||
return timestamp2;
|
||||
}
|
||||
|
||||
WebRtc_Word32 LatestSequenceNumber(WebRtc_Word32 seq_num1,
|
||||
WebRtc_Word32 seq_num2,
|
||||
bool* has_wrapped) {
|
||||
if (seq_num1 < 0 && seq_num2 < 0)
|
||||
return -1;
|
||||
else if (seq_num1 < 0)
|
||||
return seq_num2;
|
||||
else if (seq_num2 < 0)
|
||||
return seq_num1;
|
||||
|
||||
bool wrap = (seq_num1 < 0x00ff && seq_num2 > 0xff00) ||
|
||||
(seq_num1 > 0xff00 && seq_num2 < 0x00ff);
|
||||
|
||||
if (has_wrapped != NULL)
|
||||
*has_wrapped = wrap;
|
||||
|
||||
if (seq_num2 > seq_num1 && !wrap)
|
||||
return seq_num2;
|
||||
else if (seq_num2 <= seq_num1 && !wrap)
|
||||
return seq_num1;
|
||||
else if (seq_num2 < seq_num1 && wrap)
|
||||
return seq_num2;
|
||||
else
|
||||
return seq_num1;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum { kMaxNumberOfFrames = 100 };
|
||||
enum { kStartNumberOfFrames = 6 }; // in packets, 6 packets are approximately 198 ms,
|
||||
// we need at least one more for process
|
||||
enum { kMaxVideoDelayMs = 2000 }; // in ms
|
||||
|
||||
enum VCMJitterBufferEnum
|
||||
{
|
||||
kMaxConsecutiveOldFrames = 60,
|
||||
kMaxConsecutiveOldPackets = 300,
|
||||
kMaxPacketsInSession = 800,
|
||||
kBufferIncStepSizeBytes = 30000, // >20 packets
|
||||
kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte
|
||||
};
|
||||
|
||||
enum VCMFrameBufferEnum
|
||||
{
|
||||
kStateError = -4,
|
||||
kFlushIndicator = -3, // Indicator that a flush has occurred.
|
||||
kTimeStampError = -2,
|
||||
kSizeError = -1,
|
||||
kNoError = 0,
|
||||
kIncomplete = 1, // Frame incomplete
|
||||
kFirstPacket = 2,
|
||||
kCompleteSession = 3, // at least one layer in the frame complete.
|
||||
kDecodableSession = 4, // Frame incomplete, but ready to be decoded
|
||||
kDuplicatePacket = 5 // We're receiving a duplicate packet.
|
||||
};
|
||||
|
||||
enum VCMFrameBufferStateEnum
|
||||
{
|
||||
kStateFree, // Unused frame in the JB
|
||||
kStateEmpty, // frame popped by the RTP receiver
|
||||
kStateIncomplete, // frame that have one or more packet(s) stored
|
||||
kStateComplete, // frame that have all packets
|
||||
kStateDecoding, // frame popped by the decoding thread
|
||||
kStateDecodable // Hybrid mode - frame can be decoded
|
||||
};
|
||||
|
||||
enum { kH264StartCodeLengthBytes = 4};
|
||||
|
||||
// Used to indicate if a received packet contain a complete NALU (or equivalent)
|
||||
enum VCMNaluCompleteness
|
||||
{
|
||||
kNaluUnset = 0, //Packet has not been filled.
|
||||
kNaluComplete = 1, //Packet can be decoded as is.
|
||||
kNaluStart, // Packet contain beginning of NALU
|
||||
kNaluIncomplete, //Packet is not beginning or end of NALU
|
||||
kNaluEnd, // Packet is the end of a NALU
|
||||
};
|
||||
|
||||
// Returns the latest of the two timestamps, compensating for wrap arounds.
|
||||
// This function assumes that the two timestamps are close in time.
|
||||
WebRtc_UWord32 LatestTimestamp(WebRtc_UWord32 timestamp1,
|
||||
WebRtc_UWord32 timestamp2,
|
||||
bool* has_wrapped);
|
||||
|
||||
// Returns the latest of the two sequence numbers, compensating for wrap
|
||||
// arounds. This function assumes that the two sequence numbers are close in
|
||||
// time.
|
||||
WebRtc_Word32 LatestSequenceNumber(WebRtc_Word32 seq_num1,
|
||||
WebRtc_Word32 seq_num2,
|
||||
bool* has_wrapped);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer.h"
|
||||
#include "modules/video_coding/main/source/media_opt_util.h"
|
||||
#include "modules/video_coding/main/source/mock/fake_tick_time.h"
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class StreamGenerator {
|
||||
public:
|
||||
StreamGenerator(uint16_t start_seq_num, uint32_t start_timestamp,
|
||||
int64_t current_time)
|
||||
: packets_(),
|
||||
sequence_number_(start_seq_num),
|
||||
timestamp_(start_timestamp),
|
||||
start_time_(current_time) {}
|
||||
|
||||
void Init(uint16_t start_seq_num, uint32_t start_timestamp,
|
||||
int64_t current_time) {
|
||||
packets_.clear();
|
||||
sequence_number_ = start_seq_num;
|
||||
timestamp_ = start_timestamp;
|
||||
start_time_ = current_time;
|
||||
}
|
||||
|
||||
void GenerateFrame(FrameType type, int num_media_packets,
|
||||
int num_empty_packets, int64_t current_time) {
|
||||
timestamp_ += 90 * (current_time - start_time_);
|
||||
// Move the sequence number counter if all packets from the previous frame
|
||||
// wasn't collected.
|
||||
sequence_number_ += packets_.size();
|
||||
packets_.clear();
|
||||
for (int i = 0; i < num_media_packets; ++i) {
|
||||
packets_.push_back(GeneratePacket(sequence_number_,
|
||||
timestamp_,
|
||||
(i == 0),
|
||||
(i == num_media_packets - 1),
|
||||
type));
|
||||
++sequence_number_;
|
||||
}
|
||||
for (int i = 0; i < num_empty_packets; ++i) {
|
||||
packets_.push_back(GeneratePacket(sequence_number_,
|
||||
timestamp_,
|
||||
false,
|
||||
false,
|
||||
kFrameEmpty));
|
||||
++sequence_number_;
|
||||
}
|
||||
}
|
||||
|
||||
static VCMPacket GeneratePacket(uint16_t sequence_number,
|
||||
uint32_t timestamp,
|
||||
bool first_packet,
|
||||
bool marker_bit,
|
||||
FrameType type) {
|
||||
VCMPacket packet;
|
||||
packet.seqNum = sequence_number;
|
||||
packet.timestamp = timestamp;
|
||||
packet.frameType = type;
|
||||
packet.isFirstPacket = first_packet;
|
||||
packet.markerBit = marker_bit;
|
||||
if (packet.isFirstPacket)
|
||||
packet.completeNALU = kNaluStart;
|
||||
else if (packet.markerBit)
|
||||
packet.completeNALU = kNaluEnd;
|
||||
else
|
||||
packet.completeNALU = kNaluIncomplete;
|
||||
return packet;
|
||||
}
|
||||
|
||||
bool PopPacket(VCMPacket* packet, int index) {
|
||||
std::list<VCMPacket>::iterator it = GetPacketIterator(index);
|
||||
if (it == packets_.end())
|
||||
return false;
|
||||
if (packet)
|
||||
*packet = (*it);
|
||||
packets_.erase(it);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GetPacket(VCMPacket* packet, int index) {
|
||||
std::list<VCMPacket>::iterator it = GetPacketIterator(index);
|
||||
if (it == packets_.end())
|
||||
return false;
|
||||
if (packet)
|
||||
*packet = (*it);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool NextPacket(VCMPacket* packet) {
|
||||
if (packets_.empty())
|
||||
return false;
|
||||
if (packet != NULL)
|
||||
*packet = packets_.front();
|
||||
packets_.pop_front();
|
||||
return true;
|
||||
}
|
||||
|
||||
uint16_t NextSequenceNumber() const {
|
||||
if (packets_.empty())
|
||||
return sequence_number_;
|
||||
return packets_.front().seqNum;
|
||||
}
|
||||
|
||||
int PacketsRemaining() const {
|
||||
return packets_.size();
|
||||
}
|
||||
|
||||
private:
|
||||
std::list<VCMPacket>::iterator GetPacketIterator(int index) {
|
||||
std::list<VCMPacket>::iterator it = packets_.begin();
|
||||
for (int i = 0; i < index; ++i) {
|
||||
++it;
|
||||
if (it == packets_.end()) break;
|
||||
}
|
||||
return it;
|
||||
}
|
||||
|
||||
std::list<VCMPacket> packets_;
|
||||
uint16_t sequence_number_;
|
||||
uint32_t timestamp_;
|
||||
int64_t start_time_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
|
||||
};
|
||||
|
||||
class TestRunningJitterBuffer : public ::testing::Test {
|
||||
protected:
|
||||
enum { kDataBufferSize = 10 };
|
||||
enum { kDefaultFrameRate = 25 };
|
||||
enum { kDefaultFramePeriodMs = 1000 / kDefaultFrameRate };
|
||||
|
||||
virtual void SetUp() {
|
||||
clock_ = new FakeTickTime(0);
|
||||
jitter_buffer_ = new VCMJitterBuffer(clock_);
|
||||
stream_generator = new StreamGenerator(0, 0,
|
||||
clock_->MillisecondTimestamp());
|
||||
jitter_buffer_->Start();
|
||||
memset(data_buffer_, 0, kDataBufferSize);
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
jitter_buffer_->Stop();
|
||||
delete stream_generator;
|
||||
delete jitter_buffer_;
|
||||
delete clock_;
|
||||
}
|
||||
|
||||
VCMFrameBufferEnum InsertPacketAndPop(int index) {
|
||||
VCMPacket packet;
|
||||
VCMEncodedFrame* frame;
|
||||
|
||||
packet.dataPtr = data_buffer_;
|
||||
bool packet_available = stream_generator->PopPacket(&packet, index);
|
||||
EXPECT_TRUE(packet_available);
|
||||
if (!packet_available)
|
||||
return kStateError; // Return here to avoid crashes below.
|
||||
EXPECT_EQ(VCM_OK, jitter_buffer_->GetFrame(packet, frame));
|
||||
return jitter_buffer_->InsertPacket(frame, packet);
|
||||
}
|
||||
|
||||
VCMFrameBufferEnum InsertPacket(int index) {
|
||||
VCMPacket packet;
|
||||
VCMEncodedFrame* frame;
|
||||
|
||||
packet.dataPtr = data_buffer_;
|
||||
bool packet_available = stream_generator->GetPacket(&packet, index);
|
||||
EXPECT_TRUE(packet_available);
|
||||
if (!packet_available)
|
||||
return kStateError; // Return here to avoid crashes below.
|
||||
EXPECT_EQ(VCM_OK, jitter_buffer_->GetFrame(packet, frame));
|
||||
return jitter_buffer_->InsertPacket(frame, packet);
|
||||
}
|
||||
|
||||
void InsertFrame(FrameType frame_type) {
|
||||
stream_generator->GenerateFrame(frame_type,
|
||||
(frame_type != kFrameEmpty) ? 1 : 0,
|
||||
(frame_type == kFrameEmpty) ? 1 : 0,
|
||||
clock_->MillisecondTimestamp());
|
||||
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
|
||||
clock_->IncrementDebugClock(kDefaultFramePeriodMs);
|
||||
}
|
||||
|
||||
void InsertFrames(int num_frames, FrameType frame_type) {
|
||||
for (int i = 0; i < num_frames; ++i) {
|
||||
InsertFrame(frame_type);
|
||||
}
|
||||
}
|
||||
|
||||
void DropFrame(int num_packets) {
|
||||
stream_generator->GenerateFrame(kVideoFrameDelta, num_packets, 0,
|
||||
clock_->MillisecondTimestamp());
|
||||
clock_->IncrementDebugClock(kDefaultFramePeriodMs);
|
||||
}
|
||||
|
||||
bool DecodeCompleteFrame() {
|
||||
VCMEncodedFrame* frame = jitter_buffer_->GetCompleteFrameForDecoding(0);
|
||||
bool ret = (frame != NULL);
|
||||
jitter_buffer_->ReleaseFrame(frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool DecodeFrame() {
|
||||
VCMEncodedFrame* frame = jitter_buffer_->GetFrameForDecoding();
|
||||
bool ret = (frame != NULL);
|
||||
jitter_buffer_->ReleaseFrame(frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
VCMJitterBuffer* jitter_buffer_;
|
||||
StreamGenerator* stream_generator;
|
||||
FakeTickTime* clock_;
|
||||
uint8_t data_buffer_[kDataBufferSize];
|
||||
};
|
||||
|
||||
class TestJitterBufferNack : public TestRunningJitterBuffer {
|
||||
protected:
|
||||
virtual void SetUp() {
|
||||
TestRunningJitterBuffer::SetUp();
|
||||
jitter_buffer_->SetNackMode(kNackInfinite, -1, -1);
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
TestRunningJitterBuffer::TearDown();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, TestFull) {
|
||||
// Insert a key frame and decode it.
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
DropFrame(1);
|
||||
// Fill the jitter buffer.
|
||||
InsertFrames(kMaxNumberOfFrames, kVideoFrameDelta);
|
||||
// Make sure we can't decode these frames.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
// This frame will make the jitter buffer recycle frames until a key frame.
|
||||
// Since none is found it will have to wait until the next key frame before
|
||||
// decoding.
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, TestEmptyPackets) {
|
||||
// Make sure a frame can get complete even though empty packets are missing.
|
||||
stream_generator->GenerateFrame(kVideoFrameKey, 3, 3,
|
||||
clock_->MillisecondTimestamp());
|
||||
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(4));
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(4));
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, TestEmptyPackets) {
|
||||
// Make sure empty packets doesn't clog the jitter buffer.
|
||||
jitter_buffer_->SetNackMode(kNackHybrid, kLowRttNackMs, -1);
|
||||
InsertFrames(kMaxNumberOfFrames, kFrameEmpty);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, TestNackListFull) {
|
||||
// Insert a key frame and decode it.
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Generate and drop |kNackHistoryLength| packets to fill the NACK list.
|
||||
DropFrame(kNackHistoryLength);
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
|
||||
uint16_t nack_list_length = kNackHistoryLength;
|
||||
bool extended;
|
||||
uint16_t* nack_list = jitter_buffer_->CreateNackList(&nack_list_length,
|
||||
&extended);
|
||||
// Verify that the jitter buffer requests a key frame.
|
||||
EXPECT_TRUE(nack_list_length == 0xffff && nack_list == NULL);
|
||||
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
EXPECT_FALSE(DecodeFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, TestNackBeforeDecode) {
|
||||
DropFrame(10);
|
||||
// Insert a frame and try to generate a NACK list. Shouldn't get one.
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
uint16_t nack_list_size = 0;
|
||||
bool extended = false;
|
||||
uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
|
||||
// No list generated, and a key frame request is signaled.
|
||||
EXPECT_TRUE(list == NULL);
|
||||
EXPECT_EQ(0xFFFF, nack_list_size);
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, TestNormalOperation) {
|
||||
EXPECT_EQ(kNackInfinite, jitter_buffer_->nack_mode());
|
||||
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeFrame());
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
|
||||
// ----------------------------------------------------------------
|
||||
stream_generator->GenerateFrame(kVideoFrameKey, 100, 0,
|
||||
clock_->MillisecondTimestamp());
|
||||
clock_->IncrementDebugClock(kDefaultFramePeriodMs);
|
||||
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
|
||||
// Verify that the frame is incomplete.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
while (stream_generator->PacketsRemaining() > 1) {
|
||||
if (stream_generator->NextSequenceNumber() % 10 != 0)
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
else
|
||||
stream_generator->NextPacket(NULL); // Drop packet
|
||||
}
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
EXPECT_EQ(0, stream_generator->PacketsRemaining());
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
EXPECT_FALSE(DecodeFrame());
|
||||
uint16_t nack_list_size = 0;
|
||||
bool extended = false;
|
||||
uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
|
||||
// Verify the NACK list.
|
||||
const int kExpectedNackSize = 9;
|
||||
ASSERT_EQ(kExpectedNackSize, nack_list_size);
|
||||
for (int i = 0; i < nack_list_size; ++i)
|
||||
EXPECT_EQ((1 + i) * 10, list[i]);
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, TestNormalOperationWrap) {
|
||||
// ------- ------------------------------------------------------------
|
||||
// | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
|
||||
// ------- ------------------------------------------------------------
|
||||
stream_generator->Init(65532, 0, clock_->MillisecondTimestamp());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
stream_generator->GenerateFrame(kVideoFrameDelta, 100, 0,
|
||||
clock_->MillisecondTimestamp());
|
||||
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
|
||||
while (stream_generator->PacketsRemaining() > 1) {
|
||||
if (stream_generator->NextSequenceNumber() % 10 != 0)
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
else
|
||||
stream_generator->NextPacket(NULL); // Drop packet
|
||||
}
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
EXPECT_EQ(0, stream_generator->PacketsRemaining());
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
uint16_t nack_list_size = 0;
|
||||
bool extended = false;
|
||||
uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
|
||||
// Verify the NACK list.
|
||||
const int kExpectedNackSize = 10;
|
||||
ASSERT_EQ(kExpectedNackSize, nack_list_size);
|
||||
for (int i = 0; i < nack_list_size; ++i)
|
||||
EXPECT_EQ(i * 10, list[i]);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
421
webrtc/modules/video_coding/main/source/jitter_estimator.cc
Normal file
421
webrtc/modules/video_coding/main/source/jitter_estimator.cc
Normal file
@ -0,0 +1,421 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "jitter_estimator.h"
|
||||
#include "rtt_filter.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMJitterEstimator::VCMJitterEstimator(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId) :
|
||||
_vcmId(vcmId),
|
||||
_receiverId(receiverId),
|
||||
_phi(0.97),
|
||||
_psi(0.9999),
|
||||
_alphaCountMax(400),
|
||||
_thetaLow(0.000001),
|
||||
_nackLimit(3),
|
||||
_numStdDevDelayOutlier(15),
|
||||
_numStdDevFrameSizeOutlier(3),
|
||||
_noiseStdDevs(2.33), // ~Less than 1% chance
|
||||
// (look up in normal distribution table)...
|
||||
_noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
|
||||
_rttFilter(vcmId, receiverId)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMJitterEstimator&
|
||||
VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
|
||||
{
|
||||
if (this != &rhs)
|
||||
{
|
||||
memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
|
||||
memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
|
||||
|
||||
_vcmId = rhs._vcmId;
|
||||
_receiverId = rhs._receiverId;
|
||||
_avgFrameSize = rhs._avgFrameSize;
|
||||
_varFrameSize = rhs._varFrameSize;
|
||||
_maxFrameSize = rhs._maxFrameSize;
|
||||
_fsSum = rhs._fsSum;
|
||||
_fsCount = rhs._fsCount;
|
||||
_lastUpdateT = rhs._lastUpdateT;
|
||||
_prevEstimate = rhs._prevEstimate;
|
||||
_prevFrameSize = rhs._prevFrameSize;
|
||||
_avgNoise = rhs._avgNoise;
|
||||
_alphaCount = rhs._alphaCount;
|
||||
_filterJitterEstimate = rhs._filterJitterEstimate;
|
||||
_startupCount = rhs._startupCount;
|
||||
_latestNackTimestamp = rhs._latestNackTimestamp;
|
||||
_nackCount = rhs._nackCount;
|
||||
_rttFilter = rhs._rttFilter;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Resets the JitterEstimate
|
||||
void
|
||||
VCMJitterEstimator::Reset()
|
||||
{
|
||||
_theta[0] = 1/(512e3/8);
|
||||
_theta[1] = 0;
|
||||
_varNoise = 4.0;
|
||||
|
||||
_thetaCov[0][0] = 1e-4;
|
||||
_thetaCov[1][1] = 1e2;
|
||||
_thetaCov[0][1] = _thetaCov[1][0] = 0;
|
||||
_Qcov[0][0] = 2.5e-10;
|
||||
_Qcov[1][1] = 1e-10;
|
||||
_Qcov[0][1] = _Qcov[1][0] = 0;
|
||||
_avgFrameSize = 500;
|
||||
_maxFrameSize = 500;
|
||||
_varFrameSize = 100;
|
||||
_lastUpdateT = -1;
|
||||
_prevEstimate = -1.0;
|
||||
_prevFrameSize = 0;
|
||||
_avgNoise = 0.0;
|
||||
_alphaCount = 1;
|
||||
_filterJitterEstimate = 0.0;
|
||||
_latestNackTimestamp = 0;
|
||||
_nackCount = 0;
|
||||
_fsSum = 0;
|
||||
_fsCount = 0;
|
||||
_startupCount = 0;
|
||||
_rttFilter.Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::ResetNackCount()
|
||||
{
|
||||
_nackCount = 0;
|
||||
}
|
||||
|
||||
// Updates the estimates with the new measurements
|
||||
void
|
||||
VCMJitterEstimator::UpdateEstimate(WebRtc_Word64 frameDelayMS, WebRtc_UWord32 frameSizeBytes,
|
||||
bool incompleteFrame /* = false */)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Jitter estimate updated with: frameSize=%d frameDelayMS=%d",
|
||||
frameSizeBytes, frameDelayMS);
|
||||
if (frameSizeBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
int deltaFS = frameSizeBytes - _prevFrameSize;
|
||||
if (_fsCount < kFsAccuStartupSamples)
|
||||
{
|
||||
_fsSum += frameSizeBytes;
|
||||
_fsCount++;
|
||||
}
|
||||
else if (_fsCount == kFsAccuStartupSamples)
|
||||
{
|
||||
// Give the frame size filter
|
||||
_avgFrameSize = static_cast<double>(_fsSum) /
|
||||
static_cast<double>(_fsCount);
|
||||
_fsCount++;
|
||||
}
|
||||
if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
|
||||
{
|
||||
double avgFrameSize = _phi * _avgFrameSize +
|
||||
(1 - _phi) * frameSizeBytes;
|
||||
if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
|
||||
{
|
||||
// Only update the average frame size if this sample wasn't a
|
||||
// key frame
|
||||
_avgFrameSize = avgFrameSize;
|
||||
}
|
||||
// Update the variance anyway since we want to capture cases where we only get
|
||||
// key frames.
|
||||
_varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
|
||||
(frameSizeBytes - avgFrameSize) *
|
||||
(frameSizeBytes - avgFrameSize), 1.0);
|
||||
}
|
||||
|
||||
// Update max frameSize estimate
|
||||
_maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
|
||||
|
||||
if (_prevFrameSize == 0)
|
||||
{
|
||||
_prevFrameSize = frameSizeBytes;
|
||||
return;
|
||||
}
|
||||
_prevFrameSize = frameSizeBytes;
|
||||
|
||||
// Only update the Kalman filter if the sample is not considered
|
||||
// an extreme outlier. Even if it is an extreme outlier from a
|
||||
// delay point of view, if the frame size also is large the
|
||||
// deviation is probably due to an incorrect line slope.
|
||||
double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
|
||||
|
||||
if (abs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
|
||||
frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
|
||||
{
|
||||
// Update the variance of the deviation from the
|
||||
// line given by the Kalman filter
|
||||
EstimateRandomJitter(deviation, incompleteFrame);
|
||||
// Prevent updating with frames which have been congested by a large
|
||||
// frame, and therefore arrives almost at the same time as that frame.
|
||||
// This can occur when we receive a large frame (key frame) which
|
||||
// has been delayed. The next frame is of normal size (delta frame),
|
||||
// and thus deltaFS will be << 0. This removes all frame samples
|
||||
// which arrives after a key frame.
|
||||
if ((!incompleteFrame || deviation >= 0.0) &&
|
||||
static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
|
||||
{
|
||||
// Update the Kalman filter with the new data
|
||||
KalmanEstimateChannel(frameDelayMS, deltaFS);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
|
||||
EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
|
||||
}
|
||||
// Post process the total estimated jitter
|
||||
if (_startupCount >= kStartupDelaySamples)
|
||||
{
|
||||
PostProcessEstimate();
|
||||
}
|
||||
else
|
||||
{
|
||||
_startupCount++;
|
||||
}
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Framesize statistics: max=%f average=%f", _maxFrameSize, _avgFrameSize);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"The estimated slope is: theta=(%f, %f)", _theta[0], _theta[1]);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Random jitter: mean=%f variance=%f", _avgNoise, _varNoise);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Current jitter estimate: %f", _filterJitterEstimate);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Current max RTT: %u", _rttFilter.RttMs());
|
||||
}
|
||||
|
||||
// Updates the nack/packet ratio
|
||||
void
|
||||
VCMJitterEstimator::FrameNacked()
|
||||
{
|
||||
// Wait until _nackLimit retransmissions has been received,
|
||||
// then always add ~1 RTT delay.
|
||||
// TODO(holmer): Should we ever remove the additional delay if the
|
||||
// the packet losses seem to have stopped? We could for instance scale
|
||||
// the number of RTTs to add with the amount of retransmissions in a given
|
||||
// time interval, or similar.
|
||||
if (_nackCount < _nackLimit)
|
||||
{
|
||||
_nackCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Updates Kalman estimate of the channel
|
||||
// The caller is expected to sanity check the inputs.
|
||||
void
|
||||
VCMJitterEstimator::KalmanEstimateChannel(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes)
|
||||
{
|
||||
double Mh[2];
|
||||
double hMh_sigma;
|
||||
double kalmanGain[2];
|
||||
double measureRes;
|
||||
double t00, t01;
|
||||
|
||||
// Kalman filtering
|
||||
|
||||
// Prediction
|
||||
// M = M + Q
|
||||
_thetaCov[0][0] += _Qcov[0][0];
|
||||
_thetaCov[0][1] += _Qcov[0][1];
|
||||
_thetaCov[1][0] += _Qcov[1][0];
|
||||
_thetaCov[1][1] += _Qcov[1][1];
|
||||
|
||||
// Kalman gain
|
||||
// K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
|
||||
// h = [dFS 1]
|
||||
// Mh = M*h'
|
||||
// hMh_sigma = h*M*h' + R
|
||||
Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
|
||||
Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
|
||||
// sigma weights measurements with a small deltaFS as noisy and
|
||||
// measurements with large deltaFS as good
|
||||
if (_maxFrameSize < 1.0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
double sigma = (300.0 * exp(-abs(static_cast<double>(deltaFSBytes)) /
|
||||
(1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
|
||||
if (sigma < 1.0)
|
||||
{
|
||||
sigma = 1.0;
|
||||
}
|
||||
hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
|
||||
if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
|
||||
{
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
kalmanGain[0] = Mh[0] / hMh_sigma;
|
||||
kalmanGain[1] = Mh[1] / hMh_sigma;
|
||||
|
||||
// Correction
|
||||
// theta = theta + K*(dT - h*theta)
|
||||
measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
|
||||
_theta[0] += kalmanGain[0] * measureRes;
|
||||
_theta[1] += kalmanGain[1] * measureRes;
|
||||
|
||||
if (_theta[0] < _thetaLow)
|
||||
{
|
||||
_theta[0] = _thetaLow;
|
||||
}
|
||||
|
||||
// M = (I - K*h)*M
|
||||
t00 = _thetaCov[0][0];
|
||||
t01 = _thetaCov[0][1];
|
||||
_thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
|
||||
kalmanGain[0] * _thetaCov[1][0];
|
||||
_thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
|
||||
kalmanGain[0] * _thetaCov[1][1];
|
||||
_thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
|
||||
kalmanGain[1] * deltaFSBytes * t00;
|
||||
_thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
|
||||
kalmanGain[1] * deltaFSBytes * t01;
|
||||
|
||||
// Covariance matrix, must be positive semi-definite
|
||||
assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
|
||||
_thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
|
||||
_thetaCov[0][0] >= 0);
|
||||
}
|
||||
|
||||
// Calculate difference in delay between a sample and the
|
||||
// expected delay estimated by the Kalman filter
|
||||
double
|
||||
VCMJitterEstimator::DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes) const
|
||||
{
|
||||
return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
|
||||
}
|
||||
|
||||
// Estimates the random jitter by calculating the variance of the
|
||||
// sample distance from the line given by theta.
|
||||
void
|
||||
VCMJitterEstimator::EstimateRandomJitter(double d_dT, bool incompleteFrame)
|
||||
{
|
||||
double alpha;
|
||||
if (_alphaCount == 0)
|
||||
{
|
||||
assert(_alphaCount > 0);
|
||||
return;
|
||||
}
|
||||
alpha = static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
|
||||
_alphaCount++;
|
||||
if (_alphaCount > _alphaCountMax)
|
||||
{
|
||||
_alphaCount = _alphaCountMax;
|
||||
}
|
||||
double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
|
||||
double varNoise = alpha * _varNoise +
|
||||
(1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
|
||||
if (!incompleteFrame || varNoise > _varNoise)
|
||||
{
|
||||
_avgNoise = avgNoise;
|
||||
_varNoise = varNoise;
|
||||
}
|
||||
if (_varNoise < 1.0)
|
||||
{
|
||||
// The variance should never be zero, since we might get
|
||||
// stuck and consider all samples as outliers.
|
||||
_varNoise = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
double
|
||||
VCMJitterEstimator::NoiseThreshold() const
|
||||
{
|
||||
double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
|
||||
if (noiseThreshold < 1.0)
|
||||
{
|
||||
noiseThreshold = 1.0;
|
||||
}
|
||||
return noiseThreshold;
|
||||
}
|
||||
|
||||
// Calculates the current jitter estimate from the filtered estimates
|
||||
double
|
||||
VCMJitterEstimator::CalculateEstimate()
|
||||
{
|
||||
double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
|
||||
|
||||
// A very low estimate (or negative) is neglected
|
||||
if (ret < 1.0) {
|
||||
if (_prevEstimate <= 0.01)
|
||||
{
|
||||
ret = 1.0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = _prevEstimate;
|
||||
}
|
||||
}
|
||||
if (ret > 10000.0) // Sanity
|
||||
{
|
||||
ret = 10000.0;
|
||||
}
|
||||
_prevEstimate = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::PostProcessEstimate()
|
||||
{
|
||||
_filterJitterEstimate = CalculateEstimate();
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::UpdateRtt(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
_rttFilter.Update(rttMs);
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes)
|
||||
{
|
||||
if (_maxFrameSize < frameSizeBytes)
|
||||
{
|
||||
_maxFrameSize = frameSizeBytes;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the current filtered estimate if available,
|
||||
// otherwise tries to calculate an estimate.
|
||||
double
|
||||
VCMJitterEstimator::GetJitterEstimate(double rttMultiplier)
|
||||
{
|
||||
double jitterMS = CalculateEstimate();
|
||||
if (_filterJitterEstimate > jitterMS)
|
||||
{
|
||||
jitterMS = _filterJitterEstimate;
|
||||
}
|
||||
if (_nackCount >= _nackLimit)
|
||||
{
|
||||
return jitterMS + _rttFilter.RttMs() * rttMultiplier;
|
||||
}
|
||||
return jitterMS;
|
||||
}
|
||||
|
||||
}
|
||||
154
webrtc/modules/video_coding/main/source/jitter_estimator.h
Normal file
154
webrtc/modules/video_coding/main/source/jitter_estimator.h
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "rtt_filter.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMJitterEstimator
|
||||
{
|
||||
public:
|
||||
VCMJitterEstimator(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
|
||||
|
||||
VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
|
||||
|
||||
// Resets the estimate to the initial state
|
||||
void Reset();
|
||||
void ResetNackCount();
|
||||
|
||||
// Updates the jitter estimate with the new data.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - frameSize : Frame size of the current frame.
|
||||
// - incompleteFrame : Flags if the frame is used to update the estimate before it
|
||||
// was complete. Default is false.
|
||||
void UpdateEstimate(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_UWord32 frameSizeBytes,
|
||||
bool incompleteFrame = false);
|
||||
|
||||
// Returns the current jitter estimate in milliseconds and adds
|
||||
// also adds an RTT dependent term in cases of retransmission.
|
||||
// Input:
|
||||
// - rttMultiplier : RTT param multiplier (when applicable).
|
||||
//
|
||||
// Return value : Jitter estimate in milliseconds
|
||||
double GetJitterEstimate(double rttMultiplier);
|
||||
|
||||
// Updates the nack counter.
|
||||
void FrameNacked();
|
||||
|
||||
// Updates the RTT filter.
|
||||
//
|
||||
// Input:
|
||||
// - rttMs : RTT in ms
|
||||
void UpdateRtt(WebRtc_UWord32 rttMs);
|
||||
|
||||
void UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes);
|
||||
|
||||
// A constant describing the delay from the jitter buffer
|
||||
// to the delay on the receiving side which is not accounted
|
||||
// for by the jitter buffer nor the decoding delay estimate.
|
||||
static const WebRtc_UWord32 OPERATING_SYSTEM_JITTER = 10;
|
||||
|
||||
protected:
|
||||
// These are protected for better testing possibilities
|
||||
double _theta[2]; // Estimated line parameters (slope, offset)
|
||||
double _varNoise; // Variance of the time-deviation from the line
|
||||
|
||||
private:
|
||||
// Updates the Kalman filter for the line describing
|
||||
// the frame size dependent jitter.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - deltaFSBytes : Frame size delta, i.e.
|
||||
// : frame size at time T minus frame size at time T-1
|
||||
void KalmanEstimateChannel(WebRtc_Word64 frameDelayMS, WebRtc_Word32 deltaFSBytes);
|
||||
|
||||
// Updates the random jitter estimate, i.e. the variance
|
||||
// of the time deviations from the line given by the Kalman filter.
|
||||
//
|
||||
// Input:
|
||||
// - d_dT : The deviation from the kalman estimate
|
||||
// - incompleteFrame : True if the frame used to update the estimate
|
||||
// with was incomplete
|
||||
void EstimateRandomJitter(double d_dT, bool incompleteFrame);
|
||||
|
||||
double NoiseThreshold() const;
|
||||
|
||||
// Calculates the current jitter estimate.
|
||||
//
|
||||
// Return value : The current jitter estimate in milliseconds
|
||||
double CalculateEstimate();
|
||||
|
||||
// Post process the calculated estimate
|
||||
void PostProcessEstimate();
|
||||
|
||||
// Calculates the difference in delay between a sample and the
|
||||
// expected delay estimated by the Kalman filter.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - deltaFS : Frame size delta, i.e. frame size at time
|
||||
// T minus frame size at time T-1
|
||||
//
|
||||
// Return value : The difference in milliseconds
|
||||
double DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes) const;
|
||||
|
||||
// Constants, filter parameters
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
const double _phi;
|
||||
const double _psi;
|
||||
const WebRtc_UWord32 _alphaCountMax;
|
||||
const double _thetaLow;
|
||||
const WebRtc_UWord32 _nackLimit;
|
||||
const WebRtc_Word32 _numStdDevDelayOutlier;
|
||||
const WebRtc_Word32 _numStdDevFrameSizeOutlier;
|
||||
const double _noiseStdDevs;
|
||||
const double _noiseStdDevOffset;
|
||||
|
||||
double _thetaCov[2][2]; // Estimate covariance
|
||||
double _Qcov[2][2]; // Process noise covariance
|
||||
double _avgFrameSize; // Average frame size
|
||||
double _varFrameSize; // Frame size variance
|
||||
double _maxFrameSize; // Largest frame size received (descending
|
||||
// with a factor _psi)
|
||||
WebRtc_UWord32 _fsSum;
|
||||
WebRtc_UWord32 _fsCount;
|
||||
|
||||
WebRtc_Word64 _lastUpdateT;
|
||||
double _prevEstimate; // The previously returned jitter estimate
|
||||
WebRtc_UWord32 _prevFrameSize; // Frame size of the previous frame
|
||||
double _avgNoise; // Average of the random jitter
|
||||
WebRtc_UWord32 _alphaCount;
|
||||
double _filterJitterEstimate; // The filtered sum of jitter estimates
|
||||
|
||||
WebRtc_UWord32 _startupCount;
|
||||
|
||||
WebRtc_Word64 _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
|
||||
WebRtc_UWord32 _nackCount; // Keeps track of the number of nacks received,
|
||||
// but never goes above _nackLimit
|
||||
VCMRttFilter _rttFilter;
|
||||
|
||||
enum { kStartupDelaySamples = 30 };
|
||||
enum { kFsAccuStartupSamples = 5 };
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
953
webrtc/modules/video_coding/main/source/media_opt_util.cc
Normal file
953
webrtc/modules/video_coding/main/source/media_opt_util.cc
Normal file
@ -0,0 +1,953 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/media_opt_util.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <math.h>
|
||||
#include <float.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
|
||||
#include "modules/video_coding/main/interface/video_coding_defines.h"
|
||||
#include "modules/video_coding/main/source/er_tables_xor.h"
|
||||
#include "modules/video_coding/main/source/fec_tables_xor.h"
|
||||
#include "modules/video_coding/main/source/nack_fec_tables.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMProtectionMethod::VCMProtectionMethod():
|
||||
_effectivePacketLoss(0),
|
||||
_protectionFactorK(0),
|
||||
_protectionFactorD(0),
|
||||
_residualPacketLossFec(0.0f),
|
||||
_scaleProtKey(2.0f),
|
||||
_maxPayloadSize(1460),
|
||||
_qmRobustness(new VCMQmRobustness()),
|
||||
_useUepProtectionK(false),
|
||||
_useUepProtectionD(true),
|
||||
_corrFecCost(1.0),
|
||||
_type(kNone),
|
||||
_efficiency(0)
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
VCMProtectionMethod::~VCMProtectionMethod()
|
||||
{
|
||||
delete _qmRobustness;
|
||||
}
|
||||
void
|
||||
VCMProtectionMethod::UpdateContentMetrics(const
|
||||
VideoContentMetrics* contentMetrics)
|
||||
{
|
||||
_qmRobustness->UpdateContent(contentMetrics);
|
||||
}
|
||||
|
||||
VCMNackFecMethod::VCMNackFecMethod(int lowRttNackThresholdMs,
|
||||
int highRttNackThresholdMs)
|
||||
: VCMFecMethod(),
|
||||
_lowRttNackMs(lowRttNackThresholdMs),
|
||||
_highRttNackMs(highRttNackThresholdMs),
|
||||
_maxFramesFec(1) {
|
||||
assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
|
||||
assert(highRttNackThresholdMs == -1 ||
|
||||
lowRttNackThresholdMs <= highRttNackThresholdMs);
|
||||
assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
|
||||
_type = kNackFec;
|
||||
}
|
||||
|
||||
VCMNackFecMethod::~VCMNackFecMethod()
|
||||
{
|
||||
//
|
||||
}
|
||||
bool
|
||||
VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Hybrid Nack FEC has three operational modes:
|
||||
// 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
|
||||
// (_protectionFactorD) to zero. -1 means no FEC.
|
||||
// 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
|
||||
// -1 means always allow NACK.
|
||||
// 3. Medium RTT values - Hybrid mode: We will only nack the
|
||||
// residual following the decoding of the FEC (refer to JB logic). FEC
|
||||
// delta protection factor will be adjusted based on the RTT.
|
||||
|
||||
// Otherwise: we count on FEC; if the RTT is below a threshold, then we
|
||||
// nack the residual, based on a decision made in the JB.
|
||||
|
||||
// Compute the protection factors
|
||||
VCMFecMethod::ProtectionFactor(parameters);
|
||||
if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs)
|
||||
{
|
||||
_protectionFactorD = 0;
|
||||
VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
|
||||
}
|
||||
|
||||
// When in Hybrid mode (RTT range), adjust FEC rates based on the
|
||||
// RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
|
||||
else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
|
||||
{
|
||||
// TODO(mikhal): Disabling adjustment temporarily.
|
||||
// WebRtc_UWord16 rttIndex = (WebRtc_UWord16) parameters->rtt;
|
||||
float adjustRtt = 1.0f;// (float)VCMNackFecTable[rttIndex] / 100.0f;
|
||||
|
||||
// Adjust FEC with NACK on (for delta frame only)
|
||||
// table depends on RTT relative to rttMax (NACK Threshold)
|
||||
_protectionFactorD = static_cast<WebRtc_UWord8>
|
||||
(adjustRtt *
|
||||
static_cast<float>(_protectionFactorD));
|
||||
// update FEC rates after applying adjustment
|
||||
VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int VCMNackFecMethod::ComputeMaxFramesFec(
|
||||
const VCMProtectionParameters* parameters) {
|
||||
if (parameters->numLayers > 2) {
|
||||
// For more than 2 temporal layers we will only have FEC on the base layer,
|
||||
// and the base layers will be pretty far apart. Therefore we force one
|
||||
// frame FEC.
|
||||
return 1;
|
||||
}
|
||||
// We set the max number of frames to base the FEC on so that on average
|
||||
// we will have complete frames in one RTT. Note that this is an upper
|
||||
// bound, and that the actual number of frames used for FEC is decided by the
|
||||
// RTP module based on the actual number of packets and the protection factor.
|
||||
float base_layer_framerate = parameters->frameRate /
|
||||
static_cast<float>(1 << (parameters->numLayers - 1));
|
||||
int max_frames_fec = std::max(static_cast<int>(
|
||||
2.0f * base_layer_framerate * parameters->rtt /
|
||||
1000.0f + 0.5f), 1);
|
||||
// |kUpperLimitFramesFec| is the upper limit on how many frames we
|
||||
// allow any FEC to be based on.
|
||||
if (max_frames_fec > kUpperLimitFramesFec) {
|
||||
max_frames_fec = kUpperLimitFramesFec;
|
||||
}
|
||||
return max_frames_fec;
|
||||
}
|
||||
|
||||
int VCMNackFecMethod::MaxFramesFec() const {
|
||||
return _maxFramesFec;
|
||||
}
|
||||
|
||||
bool VCMNackFecMethod::BitRateTooLowForFec(
|
||||
const VCMProtectionParameters* parameters) {
|
||||
// Bitrate below which we turn off FEC, regardless of reported packet loss.
|
||||
// The condition should depend on resolution and content. For now, use
|
||||
// threshold on bytes per frame, with some effect for the frame size.
|
||||
// The condition for turning off FEC is also based on other factors,
|
||||
// such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
|
||||
int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
|
||||
int max_bytes_per_frame = kMaxBytesPerFrameForFec;
|
||||
int num_pixels = parameters->codecWidth * parameters->codecHeight;
|
||||
if (num_pixels <= 352 * 288) {
|
||||
max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
|
||||
} else if (num_pixels > 640 * 480) {
|
||||
max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
|
||||
}
|
||||
// TODO (marpan): add condition based on maximum frames used for FEC,
|
||||
// and expand condition based on frame size.
|
||||
if (estimate_bytes_per_frame < max_bytes_per_frame &&
|
||||
parameters->numLayers < 3 &&
|
||||
parameters->rtt < kMaxRttTurnOffFec) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Set the effective packet loss for encoder (based on FEC code).
|
||||
// Compute the effective packet loss and residual packet loss due to FEC.
|
||||
VCMFecMethod::EffectivePacketLoss(parameters);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
ProtectionFactor(parameters);
|
||||
EffectivePacketLoss(parameters);
|
||||
_maxFramesFec = ComputeMaxFramesFec(parameters);
|
||||
if (BitRateTooLowForFec(parameters)) {
|
||||
_protectionFactorK = 0;
|
||||
_protectionFactorD = 0;
|
||||
}
|
||||
|
||||
// Efficiency computation is based on FEC and NACK
|
||||
|
||||
// Add FEC cost: ignore I frames for now
|
||||
float fecRate = static_cast<float> (_protectionFactorD) / 255.0f;
|
||||
_efficiency = parameters->bitRate * fecRate * _corrFecCost;
|
||||
|
||||
// Add NACK cost, when applicable
|
||||
if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
|
||||
{
|
||||
// nackCost = (bitRate - nackCost) * (lossPr)
|
||||
_efficiency += parameters->bitRate * _residualPacketLossFec /
|
||||
(1.0f + _residualPacketLossFec);
|
||||
}
|
||||
|
||||
// Protection/fec rates obtained above are defined relative to total number
|
||||
// of packets (total rate: source + fec) FEC in RTP module assumes
|
||||
// protection factor is defined relative to source number of packets so we
|
||||
// should convert the factor to reduce mismatch between mediaOpt's rate and
|
||||
// the actual one
|
||||
_protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
|
||||
_protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
VCMNackMethod::VCMNackMethod():
|
||||
VCMProtectionMethod()
|
||||
{
|
||||
_type = kNack;
|
||||
}
|
||||
|
||||
VCMNackMethod::~VCMNackMethod()
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackMethod::EffectivePacketLoss(const VCMProtectionParameters* parameter)
|
||||
{
|
||||
// Effective Packet Loss, NA in current version.
|
||||
_effectivePacketLoss = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Compute the effective packet loss
|
||||
EffectivePacketLoss(parameters);
|
||||
|
||||
// nackCost = (bitRate - nackCost) * (lossPr)
|
||||
_efficiency = parameters->bitRate * parameters->lossPr /
|
||||
(1.0f + parameters->lossPr);
|
||||
return true;
|
||||
}
|
||||
|
||||
VCMFecMethod::VCMFecMethod():
|
||||
VCMProtectionMethod()
|
||||
{
|
||||
_type = kFec;
|
||||
}
|
||||
VCMFecMethod::~VCMFecMethod()
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMFecMethod::BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta,
|
||||
WebRtc_UWord8 packetFrameKey) const
|
||||
{
|
||||
WebRtc_UWord8 boostRateKey = 2;
|
||||
// Default: ratio scales the FEC protection up for I frames
|
||||
WebRtc_UWord8 ratio = 1;
|
||||
|
||||
if (packetFrameDelta > 0)
|
||||
{
|
||||
ratio = (WebRtc_Word8) (packetFrameKey / packetFrameDelta);
|
||||
}
|
||||
ratio = VCM_MAX(boostRateKey, ratio);
|
||||
|
||||
return ratio;
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMFecMethod::ConvertFECRate(WebRtc_UWord8 codeRateRTP) const
|
||||
{
|
||||
return static_cast<WebRtc_UWord8> (VCM_MIN(255,(0.5 + 255.0 * codeRateRTP /
|
||||
(float)(255 - codeRateRTP))));
|
||||
}
|
||||
|
||||
// Update FEC with protectionFactorD
|
||||
void
|
||||
VCMFecMethod::UpdateProtectionFactorD(WebRtc_UWord8 protectionFactorD)
|
||||
{
|
||||
_protectionFactorD = protectionFactorD;
|
||||
}
|
||||
|
||||
// Update FEC with protectionFactorK
|
||||
void
|
||||
VCMFecMethod::UpdateProtectionFactorK(WebRtc_UWord8 protectionFactorK)
|
||||
{
|
||||
_protectionFactorK = protectionFactorK;
|
||||
}
|
||||
|
||||
// AvgRecoveryFEC: computes the residual packet loss (RPL) function.
|
||||
// This is the average recovery from the FEC, assuming random packet loss model.
|
||||
// Computed off-line for a range of FEC code parameters and loss rates.
|
||||
float
|
||||
VCMFecMethod::AvgRecoveryFEC(const VCMProtectionParameters* parameters) const
|
||||
{
|
||||
// Total (avg) bits available per frame: total rate over actual/sent frame
|
||||
// rate units are kbits/frame
|
||||
const WebRtc_UWord16 bitRatePerFrame = static_cast<WebRtc_UWord16>
|
||||
(parameters->bitRate / (parameters->frameRate));
|
||||
|
||||
// Total (average) number of packets per frame (source and fec):
|
||||
const WebRtc_UWord8 avgTotPackets = 1 + static_cast<WebRtc_UWord8>
|
||||
(static_cast<float> (bitRatePerFrame * 1000.0) /
|
||||
static_cast<float> (8.0 * _maxPayloadSize) + 0.5);
|
||||
|
||||
const float protectionFactor = static_cast<float>(_protectionFactorD) /
|
||||
255.0;
|
||||
|
||||
WebRtc_UWord8 fecPacketsPerFrame = static_cast<WebRtc_UWord8>
|
||||
(0.5 + protectionFactor * avgTotPackets);
|
||||
|
||||
WebRtc_UWord8 sourcePacketsPerFrame = avgTotPackets - fecPacketsPerFrame;
|
||||
|
||||
if ( (fecPacketsPerFrame == 0) || (sourcePacketsPerFrame == 0) )
|
||||
{
|
||||
// No protection, or rate too low: so average recovery from FEC == 0.
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
// Table defined up to kMaxNumPackets
|
||||
if (sourcePacketsPerFrame > kMaxNumPackets)
|
||||
{
|
||||
sourcePacketsPerFrame = kMaxNumPackets;
|
||||
}
|
||||
|
||||
// Table defined up to kMaxNumPackets
|
||||
if (fecPacketsPerFrame > kMaxNumPackets)
|
||||
{
|
||||
fecPacketsPerFrame = kMaxNumPackets;
|
||||
}
|
||||
|
||||
// Code index for tables: up to (kMaxNumPackets * kMaxNumPackets)
|
||||
WebRtc_UWord16 codeIndexTable[kMaxNumPackets * kMaxNumPackets];
|
||||
WebRtc_UWord16 k = 0;
|
||||
for (WebRtc_UWord8 i = 1; i <= kMaxNumPackets; i++)
|
||||
{
|
||||
for (WebRtc_UWord8 j = 1; j <= i; j++)
|
||||
{
|
||||
codeIndexTable[(j - 1) * kMaxNumPackets + i - 1] = k;
|
||||
k += 1;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord8 lossRate = static_cast<WebRtc_UWord8> (255.0 *
|
||||
parameters->lossPr + 0.5f);
|
||||
|
||||
// Constrain lossRate to 50%: tables defined up to 50%
|
||||
if (lossRate >= kPacketLossMax)
|
||||
{
|
||||
lossRate = kPacketLossMax - 1;
|
||||
}
|
||||
|
||||
const WebRtc_UWord16 codeIndex = (fecPacketsPerFrame - 1) * kMaxNumPackets +
|
||||
(sourcePacketsPerFrame - 1);
|
||||
|
||||
const WebRtc_UWord16 indexTable = codeIndexTable[codeIndex] * kPacketLossMax +
|
||||
lossRate;
|
||||
|
||||
// Check on table index
|
||||
assert(indexTable < kSizeAvgFECRecoveryXOR);
|
||||
float avgFecRecov = static_cast<float>(kAvgFECRecoveryXOR[indexTable]);
|
||||
|
||||
return avgFecRecov;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// FEC PROTECTION SETTINGS: varies with packet loss and bitrate
|
||||
|
||||
// No protection if (filtered) packetLoss is 0
|
||||
WebRtc_UWord8 packetLoss = (WebRtc_UWord8) (255 * parameters->lossPr);
|
||||
if (packetLoss == 0)
|
||||
{
|
||||
_protectionFactorK = 0;
|
||||
_protectionFactorD = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Parameters for FEC setting:
|
||||
// first partition size, thresholds, table pars, spatial resoln fac.
|
||||
|
||||
// First partition protection: ~ 20%
|
||||
WebRtc_UWord8 firstPartitionProt = (WebRtc_UWord8) (255 * 0.20);
|
||||
|
||||
// Minimum protection level needed to generate one FEC packet for one
|
||||
// source packet/frame (in RTP sender)
|
||||
WebRtc_UWord8 minProtLevelFec = 85;
|
||||
|
||||
// Threshold on packetLoss and bitRrate/frameRate (=average #packets),
|
||||
// above which we allocate protection to cover at least first partition.
|
||||
WebRtc_UWord8 lossThr = 0;
|
||||
WebRtc_UWord8 packetNumThr = 1;
|
||||
|
||||
// Parameters for range of rate index of table.
|
||||
const WebRtc_UWord8 ratePar1 = 5;
|
||||
const WebRtc_UWord8 ratePar2 = 49;
|
||||
|
||||
// Spatial resolution size, relative to a reference size.
|
||||
float spatialSizeToRef = static_cast<float>
|
||||
(parameters->codecWidth * parameters->codecHeight) /
|
||||
(static_cast<float>(704 * 576));
|
||||
// resolnFac: This parameter will generally increase/decrease the FEC rate
|
||||
// (for fixed bitRate and packetLoss) based on system size.
|
||||
// Use a smaller exponent (< 1) to control/soften system size effect.
|
||||
const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
|
||||
|
||||
const int bitRatePerFrame = BitsPerFrame(parameters);
|
||||
|
||||
|
||||
// Average number of packets per frame (source and fec):
|
||||
const WebRtc_UWord8 avgTotPackets = 1 + (WebRtc_UWord8)
|
||||
((float) bitRatePerFrame * 1000.0
|
||||
/ (float) (8.0 * _maxPayloadSize) + 0.5);
|
||||
|
||||
// FEC rate parameters: for P and I frame
|
||||
WebRtc_UWord8 codeRateDelta = 0;
|
||||
WebRtc_UWord8 codeRateKey = 0;
|
||||
|
||||
// Get index for table: the FEC protection depends on an effective rate.
|
||||
// The range on the rate index corresponds to rates (bps)
|
||||
// from ~200k to ~8000k, for 30fps
|
||||
const WebRtc_UWord16 effRateFecTable = static_cast<WebRtc_UWord16>
|
||||
(resolnFac * bitRatePerFrame);
|
||||
WebRtc_UWord8 rateIndexTable =
|
||||
(WebRtc_UWord8) VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) /
|
||||
ratePar1, ratePar2), 0);
|
||||
|
||||
// Restrict packet loss range to 50:
|
||||
// current tables defined only up to 50%
|
||||
if (packetLoss >= kPacketLossMax)
|
||||
{
|
||||
packetLoss = kPacketLossMax - 1;
|
||||
}
|
||||
WebRtc_UWord16 indexTable = rateIndexTable * kPacketLossMax + packetLoss;
|
||||
|
||||
// Check on table index
|
||||
assert(indexTable < kSizeCodeRateXORTable);
|
||||
|
||||
// Protection factor for P frame
|
||||
codeRateDelta = kCodeRateXORTable[indexTable];
|
||||
|
||||
if (packetLoss > lossThr && avgTotPackets > packetNumThr)
|
||||
{
|
||||
// Set a minimum based on first partition size.
|
||||
if (codeRateDelta < firstPartitionProt)
|
||||
{
|
||||
codeRateDelta = firstPartitionProt;
|
||||
}
|
||||
}
|
||||
|
||||
// Check limit on amount of protection for P frame; 50% is max.
|
||||
if (codeRateDelta >= kPacketLossMax)
|
||||
{
|
||||
codeRateDelta = kPacketLossMax - 1;
|
||||
}
|
||||
|
||||
float adjustFec = 1.0f;
|
||||
// Avoid additional adjustments when layers are active.
|
||||
// TODO(mikhal/marco): Update adjusmtent based on layer info.
|
||||
if (parameters->numLayers == 1)
|
||||
{
|
||||
adjustFec = _qmRobustness->AdjustFecFactor(codeRateDelta,
|
||||
parameters->bitRate,
|
||||
parameters->frameRate,
|
||||
parameters->rtt,
|
||||
packetLoss);
|
||||
}
|
||||
|
||||
codeRateDelta = static_cast<WebRtc_UWord8>(codeRateDelta * adjustFec);
|
||||
|
||||
// For Key frame:
|
||||
// Effectively at a higher rate, so we scale/boost the rate
|
||||
// The boost factor may depend on several factors: ratio of packet
|
||||
// number of I to P frames, how much protection placed on P frames, etc.
|
||||
const WebRtc_UWord8 packetFrameDelta = (WebRtc_UWord8)
|
||||
(0.5 + parameters->packetsPerFrame);
|
||||
const WebRtc_UWord8 packetFrameKey = (WebRtc_UWord8)
|
||||
(0.5 + parameters->packetsPerFrameKey);
|
||||
const WebRtc_UWord8 boostKey = BoostCodeRateKey(packetFrameDelta,
|
||||
packetFrameKey);
|
||||
|
||||
rateIndexTable = (WebRtc_UWord8) VCM_MAX(VCM_MIN(
|
||||
1 + (boostKey * effRateFecTable - ratePar1) /
|
||||
ratePar1,ratePar2),0);
|
||||
WebRtc_UWord16 indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
|
||||
|
||||
indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
|
||||
|
||||
// Check on table index
|
||||
assert(indexTableKey < kSizeCodeRateXORTable);
|
||||
|
||||
// Protection factor for I frame
|
||||
codeRateKey = kCodeRateXORTable[indexTableKey];
|
||||
|
||||
// Boosting for Key frame.
|
||||
int boostKeyProt = _scaleProtKey * codeRateDelta;
|
||||
if (boostKeyProt >= kPacketLossMax)
|
||||
{
|
||||
boostKeyProt = kPacketLossMax - 1;
|
||||
}
|
||||
|
||||
// Make sure I frame protection is at least larger than P frame protection,
|
||||
// and at least as high as filtered packet loss.
|
||||
codeRateKey = static_cast<WebRtc_UWord8> (VCM_MAX(packetLoss,
|
||||
VCM_MAX(boostKeyProt, codeRateKey)));
|
||||
|
||||
// Check limit on amount of protection for I frame: 50% is max.
|
||||
if (codeRateKey >= kPacketLossMax)
|
||||
{
|
||||
codeRateKey = kPacketLossMax - 1;
|
||||
}
|
||||
|
||||
_protectionFactorK = codeRateKey;
|
||||
_protectionFactorD = codeRateDelta;
|
||||
|
||||
// Generally there is a rate mis-match between the FEC cost estimated
|
||||
// in mediaOpt and the actual FEC cost sent out in RTP module.
|
||||
// This is more significant at low rates (small # of source packets), where
|
||||
// the granularity of the FEC decreases. In this case, non-zero protection
|
||||
// in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
|
||||
// is based on rounding off protectionFactor on actual source packet number).
|
||||
// The correction factor (_corrFecCost) attempts to corrects this, at least
|
||||
// for cases of low rates (small #packets) and low protection levels.
|
||||
|
||||
float numPacketsFl = 1.0f + ((float) bitRatePerFrame * 1000.0
|
||||
/ (float) (8.0 * _maxPayloadSize) + 0.5);
|
||||
|
||||
const float estNumFecGen = 0.5f + static_cast<float> (_protectionFactorD *
|
||||
numPacketsFl / 255.0f);
|
||||
|
||||
|
||||
// We reduce cost factor (which will reduce overhead for FEC and
|
||||
// hybrid method) and not the protectionFactor.
|
||||
_corrFecCost = 1.0f;
|
||||
if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec)
|
||||
{
|
||||
_corrFecCost = 0.5f;
|
||||
}
|
||||
if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec)
|
||||
{
|
||||
_corrFecCost = 0.0f;
|
||||
}
|
||||
|
||||
// TODO (marpan): Set the UEP protection on/off for Key and Delta frames
|
||||
_useUepProtectionK = _qmRobustness->SetUepProtection(codeRateKey,
|
||||
parameters->bitRate,
|
||||
packetLoss,
|
||||
0);
|
||||
|
||||
_useUepProtectionD = _qmRobustness->SetUepProtection(codeRateDelta,
|
||||
parameters->bitRate,
|
||||
packetLoss,
|
||||
1);
|
||||
|
||||
// DONE WITH FEC PROTECTION SETTINGS
|
||||
return true;
|
||||
}
|
||||
|
||||
int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
|
||||
// When temporal layers are available FEC will only be applied on the base
|
||||
// layer.
|
||||
const float bitRateRatio =
|
||||
kVp8LayerRateAlloction[parameters->numLayers - 1][0];
|
||||
float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
|
||||
float bitRate = parameters->bitRate * bitRateRatio;
|
||||
float frameRate = parameters->frameRate * frameRateRatio;
|
||||
|
||||
// TODO(mikhal): Update factor following testing.
|
||||
float adjustmentFactor = 1;
|
||||
|
||||
// Average bits per frame (units of kbits)
|
||||
return static_cast<int>(adjustmentFactor * bitRate / frameRate);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Effective packet loss to encoder is based on RPL (residual packet loss)
|
||||
// this is a soft setting based on degree of FEC protection
|
||||
// RPL = received/input packet loss - average_FEC_recovery
|
||||
// note: received/input packet loss may be filtered based on FilteredLoss
|
||||
|
||||
// The packet loss:
|
||||
WebRtc_UWord8 packetLoss = (WebRtc_UWord8) (255 * parameters->lossPr);
|
||||
|
||||
float avgFecRecov = AvgRecoveryFEC(parameters);
|
||||
|
||||
// Residual Packet Loss:
|
||||
_residualPacketLossFec = (float) (packetLoss - avgFecRecov) / 255.0f;
|
||||
|
||||
// Effective Packet Loss, NA in current version.
|
||||
_effectivePacketLoss = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Compute the protection factor
|
||||
ProtectionFactor(parameters);
|
||||
|
||||
// Compute the effective packet loss
|
||||
EffectivePacketLoss(parameters);
|
||||
|
||||
// Compute the bit cost
|
||||
// Ignore key frames for now.
|
||||
float fecRate = static_cast<float> (_protectionFactorD) / 255.0f;
|
||||
if (fecRate >= 0.0f)
|
||||
{
|
||||
// use this formula if the fecRate (protection factor) is defined
|
||||
// relative to number of source packets
|
||||
// this is the case for the previous tables:
|
||||
// _efficiency = parameters->bitRate * ( 1.0 - 1.0 / (1.0 + fecRate));
|
||||
|
||||
// in the new tables, the fecRate is defined relative to total number of
|
||||
// packets (total rate), so overhead cost is:
|
||||
_efficiency = parameters->bitRate * fecRate * _corrFecCost;
|
||||
}
|
||||
else
|
||||
{
|
||||
_efficiency = 0.0f;
|
||||
}
|
||||
|
||||
// Protection/fec rates obtained above is defined relative to total number
|
||||
// of packets (total rate: source+fec) FEC in RTP module assumes protection
|
||||
// factor is defined relative to source number of packets so we should
|
||||
// convert the factor to reduce mismatch between mediaOpt suggested rate and
|
||||
// the actual rate
|
||||
_protectionFactorK = ConvertFECRate(_protectionFactorK);
|
||||
_protectionFactorD = ConvertFECRate(_protectionFactorD);
|
||||
|
||||
return true;
|
||||
}
|
||||
VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
|
||||
_selectedMethod(NULL),
|
||||
_currentParameters(),
|
||||
_rtt(0),
|
||||
_lossPr(0.0f),
|
||||
_bitRate(0.0f),
|
||||
_frameRate(0.0f),
|
||||
_keyFrameSize(0.0f),
|
||||
_fecRateKey(0),
|
||||
_fecRateDelta(0),
|
||||
_lastPrUpdateT(0),
|
||||
_lossPr255(0.9999f),
|
||||
_lossPrHistory(),
|
||||
_shortMaxLossPr255(0),
|
||||
_packetsPerFrame(0.9999f),
|
||||
_packetsPerFrameKey(0.9999f),
|
||||
_residualPacketLossFec(0),
|
||||
_codecWidth(0),
|
||||
_codecHeight(0),
|
||||
_numLayers(1)
|
||||
{
|
||||
Reset(nowMs);
|
||||
}
|
||||
|
||||
VCMLossProtectionLogic::~VCMLossProtectionLogic()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMLossProtectionLogic::SetMethod(enum VCMProtectionMethodEnum newMethodType)
|
||||
{
|
||||
if (_selectedMethod != NULL)
|
||||
{
|
||||
if (_selectedMethod->Type() == newMethodType)
|
||||
{
|
||||
// Nothing to update
|
||||
return false;
|
||||
}
|
||||
// New method - delete existing one
|
||||
delete _selectedMethod;
|
||||
}
|
||||
VCMProtectionMethod *newMethod = NULL;
|
||||
switch (newMethodType)
|
||||
{
|
||||
case kNack:
|
||||
{
|
||||
newMethod = new VCMNackMethod();
|
||||
break;
|
||||
}
|
||||
case kFec:
|
||||
{
|
||||
newMethod = new VCMFecMethod();
|
||||
break;
|
||||
}
|
||||
case kNackFec:
|
||||
{
|
||||
// Default to always having NACK enabled for the hybrid mode.
|
||||
newMethod = new VCMNackFecMethod(kLowRttNackMs, -1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
_selectedMethod = newMethod;
|
||||
return true;
|
||||
}
|
||||
bool
|
||||
VCMLossProtectionLogic::RemoveMethod(enum VCMProtectionMethodEnum method)
|
||||
{
|
||||
if (_selectedMethod == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else if (_selectedMethod->Type() == method)
|
||||
{
|
||||
delete _selectedMethod;
|
||||
_selectedMethod = NULL;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
float
|
||||
VCMLossProtectionLogic::RequiredBitRate() const
|
||||
{
|
||||
float RequiredBitRate = 0.0f;
|
||||
if (_selectedMethod != NULL)
|
||||
{
|
||||
RequiredBitRate = _selectedMethod->RequiredBitRate();
|
||||
}
|
||||
return RequiredBitRate;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateRtt(WebRtc_UWord32 rtt)
|
||||
{
|
||||
_rtt = rtt;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateResidualPacketLoss(float residualPacketLoss)
|
||||
{
|
||||
_residualPacketLossFec = residualPacketLoss;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateMaxLossHistory(WebRtc_UWord8 lossPr255,
|
||||
WebRtc_Word64 now)
|
||||
{
|
||||
if (_lossPrHistory[0].timeMs >= 0 &&
|
||||
now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
|
||||
{
|
||||
if (lossPr255 > _shortMaxLossPr255)
|
||||
{
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only add a new value to the history once a second
|
||||
if (_lossPrHistory[0].timeMs == -1)
|
||||
{
|
||||
// First, no shift
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shift
|
||||
for (WebRtc_Word32 i = (kLossPrHistorySize - 2); i >= 0; i--)
|
||||
{
|
||||
_lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
|
||||
_lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
|
||||
}
|
||||
}
|
||||
if (_shortMaxLossPr255 == 0)
|
||||
{
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
|
||||
_lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
|
||||
_lossPrHistory[0].timeMs = now;
|
||||
_shortMaxLossPr255 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMLossProtectionLogic::MaxFilteredLossPr(WebRtc_Word64 nowMs) const
|
||||
{
|
||||
WebRtc_UWord8 maxFound = _shortMaxLossPr255;
|
||||
if (_lossPrHistory[0].timeMs == -1)
|
||||
{
|
||||
return maxFound;
|
||||
}
|
||||
for (WebRtc_Word32 i = 0; i < kLossPrHistorySize; i++)
|
||||
{
|
||||
if (_lossPrHistory[i].timeMs == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nowMs - _lossPrHistory[i].timeMs >
|
||||
kLossPrHistorySize * kLossPrShortFilterWinMs)
|
||||
{
|
||||
// This sample (and all samples after this) is too old
|
||||
break;
|
||||
}
|
||||
if (_lossPrHistory[i].lossPr255 > maxFound)
|
||||
{
|
||||
// This sample is the largest one this far into the history
|
||||
maxFound = _lossPrHistory[i].lossPr255;
|
||||
}
|
||||
}
|
||||
return maxFound;
|
||||
}
|
||||
|
||||
WebRtc_UWord8 VCMLossProtectionLogic::FilteredLoss(
|
||||
int64_t nowMs,
|
||||
FilterPacketLossMode filter_mode,
|
||||
WebRtc_UWord8 lossPr255) {
|
||||
|
||||
// Update the max window filter.
|
||||
UpdateMaxLossHistory(lossPr255, nowMs);
|
||||
|
||||
// Update the recursive average filter.
|
||||
_lossPr255.Apply(static_cast<float> (nowMs - _lastPrUpdateT),
|
||||
static_cast<float> (lossPr255));
|
||||
_lastPrUpdateT = nowMs;
|
||||
|
||||
// Filtered loss: default is received loss (no filtering).
|
||||
WebRtc_UWord8 filtered_loss = lossPr255;
|
||||
|
||||
switch (filter_mode) {
|
||||
case kNoFilter:
|
||||
break;
|
||||
case kAvgFilter:
|
||||
filtered_loss = static_cast<WebRtc_UWord8> (_lossPr255.Value() + 0.5);
|
||||
break;
|
||||
case kMaxFilter:
|
||||
filtered_loss = MaxFilteredLossPr(nowMs);
|
||||
break;
|
||||
}
|
||||
|
||||
return filtered_loss;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc)
|
||||
{
|
||||
_lossPr = (float) packetLossEnc / (float) 255.0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateBitRate(float bitRate)
|
||||
{
|
||||
_bitRate = bitRate;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets, int64_t nowMs)
|
||||
{
|
||||
_packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
|
||||
nPackets);
|
||||
_lastPacketPerFrameUpdateT = nowMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs)
|
||||
{
|
||||
_packetsPerFrameKey.Apply(static_cast<float>(nowMs -
|
||||
_lastPacketPerFrameUpdateTKey), nPackets);
|
||||
_lastPacketPerFrameUpdateTKey = nowMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
|
||||
{
|
||||
_keyFrameSize = keyFrameSize;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateFrameSize(WebRtc_UWord16 width,
|
||||
WebRtc_UWord16 height)
|
||||
{
|
||||
_codecWidth = width;
|
||||
_codecHeight = height;
|
||||
}
|
||||
|
||||
void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
|
||||
_numLayers = (numLayers == 0) ? 1 : numLayers;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMLossProtectionLogic::UpdateMethod()
|
||||
{
|
||||
if (_selectedMethod == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_currentParameters.rtt = _rtt;
|
||||
_currentParameters.lossPr = _lossPr;
|
||||
_currentParameters.bitRate = _bitRate;
|
||||
_currentParameters.frameRate = _frameRate; // rename actual frame rate?
|
||||
_currentParameters.keyFrameSize = _keyFrameSize;
|
||||
_currentParameters.fecRateDelta = _fecRateDelta;
|
||||
_currentParameters.fecRateKey = _fecRateKey;
|
||||
_currentParameters.packetsPerFrame = _packetsPerFrame.Value();
|
||||
_currentParameters.packetsPerFrameKey = _packetsPerFrameKey.Value();
|
||||
_currentParameters.residualPacketLossFec = _residualPacketLossFec;
|
||||
_currentParameters.codecWidth = _codecWidth;
|
||||
_currentParameters.codecHeight = _codecHeight;
|
||||
_currentParameters.numLayers = _numLayers;
|
||||
return _selectedMethod->UpdateParameters(&_currentParameters);
|
||||
}
|
||||
|
||||
VCMProtectionMethod*
|
||||
VCMLossProtectionLogic::SelectedMethod() const
|
||||
{
|
||||
return _selectedMethod;
|
||||
}
|
||||
|
||||
VCMProtectionMethodEnum
|
||||
VCMLossProtectionLogic::SelectedType() const
|
||||
{
|
||||
return _selectedMethod->Type();
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::Reset(int64_t nowMs)
|
||||
{
|
||||
_lastPrUpdateT = nowMs;
|
||||
_lastPacketPerFrameUpdateT = nowMs;
|
||||
_lastPacketPerFrameUpdateTKey = nowMs;
|
||||
_lossPr255.Reset(0.9999f);
|
||||
_packetsPerFrame.Reset(0.9999f);
|
||||
_fecRateDelta = _fecRateKey = 0;
|
||||
for (WebRtc_Word32 i = 0; i < kLossPrHistorySize; i++)
|
||||
{
|
||||
_lossPrHistory[i].lossPr255 = 0;
|
||||
_lossPrHistory[i].timeMs = -1;
|
||||
}
|
||||
_shortMaxLossPr255 = 0;
|
||||
Release();
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::Release()
|
||||
{
|
||||
delete _selectedMethod;
|
||||
_selectedMethod = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
395
webrtc/modules/video_coding/main/source/media_opt_util.h
Normal file
395
webrtc/modules/video_coding/main/source/media_opt_util.h
Normal file
@ -0,0 +1,395 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "trace.h"
|
||||
#include "exp_filter.h"
|
||||
#include "internal_defines.h"
|
||||
#include "qm_select.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
// Number of time periods used for (max) window filter for packet loss
|
||||
// TODO (marpan): set reasonable window size for filtered packet loss,
|
||||
// adjustment should be based on logged/real data of loss stats/correlation.
|
||||
enum { kLossPrHistorySize = 10 };
|
||||
|
||||
// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
|
||||
enum { kLossPrShortFilterWinMs = 1000 };
|
||||
|
||||
// The type of filter used on the received packet loss reports.
|
||||
enum FilterPacketLossMode {
|
||||
kNoFilter, // No filtering on received loss.
|
||||
kAvgFilter, // Recursive average filter.
|
||||
kMaxFilter // Max-window filter, over the time interval of:
|
||||
// (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
|
||||
};
|
||||
|
||||
// Thresholds for hybrid NACK/FEC
|
||||
// common to media optimization and the jitter buffer.
|
||||
enum HybridNackTH {
|
||||
kHighRttNackMs = 100,
|
||||
kLowRttNackMs = 20
|
||||
};
|
||||
|
||||
struct VCMProtectionParameters
|
||||
{
|
||||
VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f),
|
||||
packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f),
|
||||
keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0),
|
||||
residualPacketLossFec(0.0f), codecWidth(0), codecHeight(0),
|
||||
numLayers(1)
|
||||
{}
|
||||
|
||||
int rtt;
|
||||
float lossPr;
|
||||
float bitRate;
|
||||
float packetsPerFrame;
|
||||
float packetsPerFrameKey;
|
||||
float frameRate;
|
||||
float keyFrameSize;
|
||||
WebRtc_UWord8 fecRateDelta;
|
||||
WebRtc_UWord8 fecRateKey;
|
||||
float residualPacketLossFec;
|
||||
WebRtc_UWord16 codecWidth;
|
||||
WebRtc_UWord16 codecHeight;
|
||||
int numLayers;
|
||||
};
|
||||
|
||||
|
||||
/******************************/
|
||||
/* VCMProtectionMethod class */
|
||||
/****************************/
|
||||
|
||||
enum VCMProtectionMethodEnum
|
||||
{
|
||||
kNack,
|
||||
kFec,
|
||||
kNackFec,
|
||||
kNone
|
||||
};
|
||||
|
||||
class VCMLossProbabilitySample
|
||||
{
|
||||
public:
|
||||
VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
|
||||
|
||||
WebRtc_UWord8 lossPr255;
|
||||
WebRtc_Word64 timeMs;
|
||||
};
|
||||
|
||||
|
||||
class VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMProtectionMethod();
|
||||
virtual ~VCMProtectionMethod();
|
||||
|
||||
// Updates the efficiency of the method using the parameters provided
|
||||
//
|
||||
// Input:
|
||||
// - parameters : Parameters used to calculate efficiency
|
||||
//
|
||||
// Return value : True if this method is recommended in
|
||||
// the given conditions.
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
|
||||
|
||||
// Returns the protection type
|
||||
//
|
||||
// Return value : The protection type
|
||||
enum VCMProtectionMethodEnum Type() const { return _type; }
|
||||
|
||||
// Returns the bit rate required by this protection method
|
||||
// during these conditions.
|
||||
//
|
||||
// Return value : Required bit rate
|
||||
virtual float RequiredBitRate() { return _efficiency; }
|
||||
|
||||
// Returns the effective packet loss for ER, required by this protection method
|
||||
//
|
||||
// Return value : Required effective packet loss
|
||||
virtual WebRtc_UWord8 RequiredPacketLossER() { return _effectivePacketLoss; }
|
||||
|
||||
// Extracts the FEC protection factor for Key frame, required by this protection method
|
||||
//
|
||||
// Return value : Required protectionFactor for Key frame
|
||||
virtual WebRtc_UWord8 RequiredProtectionFactorK() { return _protectionFactorK; }
|
||||
|
||||
// Extracts the FEC protection factor for Delta frame, required by this protection method
|
||||
//
|
||||
// Return value : Required protectionFactor for delta frame
|
||||
virtual WebRtc_UWord8 RequiredProtectionFactorD() { return _protectionFactorD; }
|
||||
|
||||
// Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
|
||||
//
|
||||
// Return value : Required Unequal protection on/off state.
|
||||
virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
|
||||
|
||||
// Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
|
||||
//
|
||||
// Return value : Required Unequal protection on/off state.
|
||||
virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
|
||||
|
||||
virtual int MaxFramesFec() const { return 1; }
|
||||
|
||||
// Updates content metrics
|
||||
void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
|
||||
|
||||
protected:
|
||||
|
||||
WebRtc_UWord8 _effectivePacketLoss;
|
||||
WebRtc_UWord8 _protectionFactorK;
|
||||
WebRtc_UWord8 _protectionFactorD;
|
||||
// Estimation of residual loss after the FEC
|
||||
float _residualPacketLossFec;
|
||||
float _scaleProtKey;
|
||||
WebRtc_Word32 _maxPayloadSize;
|
||||
|
||||
VCMQmRobustness* _qmRobustness;
|
||||
bool _useUepProtectionK;
|
||||
bool _useUepProtectionD;
|
||||
float _corrFecCost;
|
||||
enum VCMProtectionMethodEnum _type;
|
||||
float _efficiency;
|
||||
};
|
||||
|
||||
class VCMNackMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMNackMethod();
|
||||
virtual ~VCMNackMethod();
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
// Get the effective packet loss
|
||||
bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
|
||||
};
|
||||
|
||||
class VCMFecMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMFecMethod();
|
||||
virtual ~VCMFecMethod();
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
// Get the effective packet loss for ER
|
||||
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
|
||||
// Get the FEC protection factors
|
||||
bool ProtectionFactor(const VCMProtectionParameters* parameters);
|
||||
// Get the boost for key frame protection
|
||||
WebRtc_UWord8 BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta,
|
||||
WebRtc_UWord8 packetFrameKey) const;
|
||||
// Convert the rates: defined relative to total# packets or source# packets
|
||||
WebRtc_UWord8 ConvertFECRate(WebRtc_UWord8 codeRate) const;
|
||||
// Get the average effective recovery from FEC: for random loss model
|
||||
float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
|
||||
// Update FEC with protectionFactorD
|
||||
void UpdateProtectionFactorD(WebRtc_UWord8 protectionFactorD);
|
||||
// Update FEC with protectionFactorK
|
||||
void UpdateProtectionFactorK(WebRtc_UWord8 protectionFactorK);
|
||||
// Compute the bits per frame. Account for temporal layers when applicable.
|
||||
int BitsPerFrame(const VCMProtectionParameters* parameters);
|
||||
|
||||
protected:
|
||||
enum { kUpperLimitFramesFec = 6 };
|
||||
// Thresholds values for the bytes/frame and round trip time, below which we
|
||||
// may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
|
||||
// Max bytes/frame for VGA, corresponds to ~140k at 25fps.
|
||||
enum { kMaxBytesPerFrameForFec = 700 };
|
||||
// Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
|
||||
enum { kMaxBytesPerFrameForFecLow = 400 };
|
||||
// Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
|
||||
enum { kMaxBytesPerFrameForFecHigh = 1000 };
|
||||
// Max round trip time threshold in ms.
|
||||
enum { kMaxRttTurnOffFec = 200 };
|
||||
};
|
||||
|
||||
|
||||
class VCMNackFecMethod : public VCMFecMethod
|
||||
{
|
||||
public:
|
||||
VCMNackFecMethod(int lowRttNackThresholdMs,
|
||||
int highRttNackThresholdMs);
|
||||
virtual ~VCMNackFecMethod();
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
// Get the effective packet loss for ER
|
||||
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
|
||||
// Get the protection factors
|
||||
bool ProtectionFactor(const VCMProtectionParameters* parameters);
|
||||
// Get the max number of frames the FEC is allowed to be based on.
|
||||
int MaxFramesFec() const;
|
||||
// Turn off the FEC based on low bitrate and other factors.
|
||||
bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
|
||||
private:
|
||||
int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
|
||||
|
||||
int _lowRttNackMs;
|
||||
int _highRttNackMs;
|
||||
int _maxFramesFec;
|
||||
};
|
||||
|
||||
class VCMLossProtectionLogic
|
||||
{
|
||||
public:
|
||||
VCMLossProtectionLogic(int64_t nowMs);
|
||||
~VCMLossProtectionLogic();
|
||||
|
||||
// Set the protection method to be used
|
||||
//
|
||||
// Input:
|
||||
// - newMethodType : New requested protection method type. If one
|
||||
// is already set, it will be deleted and replaced
|
||||
// Return value: Returns true on update
|
||||
bool SetMethod(enum VCMProtectionMethodEnum newMethodType);
|
||||
|
||||
// Remove requested protection method
|
||||
// Input:
|
||||
// - method : method to be removed (if currently selected)
|
||||
//
|
||||
// Return value: Returns true on update
|
||||
bool RemoveMethod(enum VCMProtectionMethodEnum method);
|
||||
|
||||
// Return required bit rate per selected protectin method
|
||||
float RequiredBitRate() const;
|
||||
|
||||
// Update the round-trip time
|
||||
//
|
||||
// Input:
|
||||
// - rtt : Round-trip time in seconds.
|
||||
void UpdateRtt(WebRtc_UWord32 rtt);
|
||||
|
||||
// Update residual packet loss
|
||||
//
|
||||
// Input:
|
||||
// - residualPacketLoss : residual packet loss:
|
||||
// effective loss after FEC recovery
|
||||
void UpdateResidualPacketLoss(float _residualPacketLoss);
|
||||
|
||||
// Update the filtered packet loss.
|
||||
//
|
||||
// Input:
|
||||
// - packetLossEnc : The reported packet loss filtered
|
||||
// (max window or average)
|
||||
void UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc);
|
||||
|
||||
// Update the current target bit rate.
|
||||
//
|
||||
// Input:
|
||||
// - bitRate : The current target bit rate in kbits/s
|
||||
void UpdateBitRate(float bitRate);
|
||||
|
||||
// Update the number of packets per frame estimate, for delta frames
|
||||
//
|
||||
// Input:
|
||||
// - nPackets : Number of packets in the latest sent frame.
|
||||
void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
|
||||
|
||||
// Update the number of packets per frame estimate, for key frames
|
||||
//
|
||||
// Input:
|
||||
// - nPackets : umber of packets in the latest sent frame.
|
||||
void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
|
||||
|
||||
// Update the keyFrameSize estimate
|
||||
//
|
||||
// Input:
|
||||
// - keyFrameSize : The size of the latest sent key frame.
|
||||
void UpdateKeyFrameSize(float keyFrameSize);
|
||||
|
||||
// Update the frame rate
|
||||
//
|
||||
// Input:
|
||||
// - frameRate : The current target frame rate.
|
||||
void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
|
||||
|
||||
// Update the frame size
|
||||
//
|
||||
// Input:
|
||||
// - width : The codec frame width.
|
||||
// - height : The codec frame height.
|
||||
void UpdateFrameSize(WebRtc_UWord16 width, WebRtc_UWord16 height);
|
||||
|
||||
// Update the number of active layers
|
||||
//
|
||||
// Input:
|
||||
// - numLayers : Number of layers used.
|
||||
void UpdateNumLayers(int numLayers);
|
||||
|
||||
// The amount of packet loss to cover for with FEC.
|
||||
//
|
||||
// Input:
|
||||
// - fecRateKey : Packet loss to cover for with FEC when
|
||||
// sending key frames.
|
||||
// - fecRateDelta : Packet loss to cover for with FEC when
|
||||
// sending delta frames.
|
||||
void UpdateFECRates(WebRtc_UWord8 fecRateKey, WebRtc_UWord8 fecRateDelta)
|
||||
{ _fecRateKey = fecRateKey;
|
||||
_fecRateDelta = fecRateDelta; }
|
||||
|
||||
// Update the protection methods with the current VCMProtectionParameters
|
||||
// and set the requested protection settings.
|
||||
// Return value : Returns true on update
|
||||
bool UpdateMethod();
|
||||
|
||||
// Returns the method currently selected.
|
||||
//
|
||||
// Return value : The protection method currently selected.
|
||||
VCMProtectionMethod* SelectedMethod() const;
|
||||
|
||||
// Return the protection type of the currently selected method
|
||||
VCMProtectionMethodEnum SelectedType() const;
|
||||
|
||||
// Updates the filtered loss for the average and max window packet loss,
|
||||
// and returns the filtered loss probability in the interval [0, 255].
|
||||
// The returned filtered loss value depends on the parameter |filter_mode|.
|
||||
// The input parameter |lossPr255| is the received packet loss.
|
||||
|
||||
// Return value : The filtered loss probability
|
||||
WebRtc_UWord8 FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode,
|
||||
WebRtc_UWord8 lossPr255);
|
||||
|
||||
void Reset(int64_t nowMs);
|
||||
|
||||
void Release();
|
||||
|
||||
private:
|
||||
// Sets the available loss protection methods.
|
||||
void UpdateMaxLossHistory(WebRtc_UWord8 lossPr255, WebRtc_Word64 now);
|
||||
WebRtc_UWord8 MaxFilteredLossPr(WebRtc_Word64 nowMs) const;
|
||||
VCMProtectionMethod* _selectedMethod;
|
||||
VCMProtectionParameters _currentParameters;
|
||||
WebRtc_UWord32 _rtt;
|
||||
float _lossPr;
|
||||
float _bitRate;
|
||||
float _frameRate;
|
||||
float _keyFrameSize;
|
||||
WebRtc_UWord8 _fecRateKey;
|
||||
WebRtc_UWord8 _fecRateDelta;
|
||||
WebRtc_Word64 _lastPrUpdateT;
|
||||
WebRtc_Word64 _lastPacketPerFrameUpdateT;
|
||||
WebRtc_Word64 _lastPacketPerFrameUpdateTKey;
|
||||
VCMExpFilter _lossPr255;
|
||||
VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
|
||||
WebRtc_UWord8 _shortMaxLossPr255;
|
||||
VCMExpFilter _packetsPerFrame;
|
||||
VCMExpFilter _packetsPerFrameKey;
|
||||
float _residualPacketLossFec;
|
||||
WebRtc_UWord16 _codecWidth;
|
||||
WebRtc_UWord16 _codecHeight;
|
||||
int _numLayers;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
673
webrtc/modules/video_coding/main/source/media_optimization.cc
Normal file
673
webrtc/modules/video_coding/main/source/media_optimization.cc
Normal file
@ -0,0 +1,673 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "media_optimization.h"
|
||||
|
||||
#include "content_metrics_processing.h"
|
||||
#include "frame_dropper.h"
|
||||
#include "qm_select.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMMediaOptimization::VCMMediaOptimization(WebRtc_Word32 id,
|
||||
TickTimeBase* clock):
|
||||
_id(id),
|
||||
_clock(clock),
|
||||
_maxBitRate(0),
|
||||
_sendCodecType(kVideoCodecUnknown),
|
||||
_codecWidth(0),
|
||||
_codecHeight(0),
|
||||
_userFrameRate(0),
|
||||
_fractionLost(0),
|
||||
_sendStatisticsZeroEncode(0),
|
||||
_maxPayloadSize(1460),
|
||||
_targetBitRate(0),
|
||||
_incomingFrameRate(0),
|
||||
_enableQm(false),
|
||||
_videoProtectionCallback(NULL),
|
||||
_videoQMSettingsCallback(NULL),
|
||||
_encodedFrameSamples(),
|
||||
_avgSentBitRateBps(0.0f),
|
||||
_keyFrameCnt(0),
|
||||
_deltaFrameCnt(0),
|
||||
_lastQMUpdateTime(0),
|
||||
_lastChangeTime(0),
|
||||
_numLayers(0)
|
||||
{
|
||||
memset(_sendStatistics, 0, sizeof(_sendStatistics));
|
||||
memset(_incomingFrameTimes, -1, sizeof(_incomingFrameTimes));
|
||||
|
||||
_frameDropper = new VCMFrameDropper(_id);
|
||||
_lossProtLogic = new VCMLossProtectionLogic(_clock->MillisecondTimestamp());
|
||||
_content = new VCMContentMetricsProcessing();
|
||||
_qmResolution = new VCMQmResolution();
|
||||
}
|
||||
|
||||
VCMMediaOptimization::~VCMMediaOptimization(void)
|
||||
{
|
||||
_lossProtLogic->Release();
|
||||
delete _lossProtLogic;
|
||||
delete _frameDropper;
|
||||
delete _content;
|
||||
delete _qmResolution;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::Reset()
|
||||
{
|
||||
memset(_incomingFrameTimes, -1, sizeof(_incomingFrameTimes));
|
||||
_incomingFrameRate = 0.0;
|
||||
_frameDropper->Reset();
|
||||
_lossProtLogic->Reset(_clock->MillisecondTimestamp());
|
||||
_frameDropper->SetRates(0, 0);
|
||||
_content->Reset();
|
||||
_qmResolution->Reset();
|
||||
_lossProtLogic->UpdateFrameRate(_incomingFrameRate);
|
||||
_lossProtLogic->Reset(_clock->MillisecondTimestamp());
|
||||
_sendStatisticsZeroEncode = 0;
|
||||
_targetBitRate = 0;
|
||||
_codecWidth = 0;
|
||||
_codecHeight = 0;
|
||||
_userFrameRate = 0;
|
||||
_keyFrameCnt = 0;
|
||||
_deltaFrameCnt = 0;
|
||||
_lastQMUpdateTime = 0;
|
||||
_lastChangeTime = 0;
|
||||
for (WebRtc_Word32 i = 0; i < kBitrateMaxFrameSamples; i++)
|
||||
{
|
||||
_encodedFrameSamples[i]._sizeBytes = -1;
|
||||
_encodedFrameSamples[i]._timeCompleteMs = -1;
|
||||
}
|
||||
_avgSentBitRateBps = 0.0f;
|
||||
_numLayers = 1;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMMediaOptimization::SetTargetRates(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord8 &fractionLost,
|
||||
WebRtc_UWord32 roundTripTimeMs)
|
||||
{
|
||||
VCMProtectionMethod *selectedMethod = _lossProtLogic->SelectedMethod();
|
||||
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
|
||||
_lossProtLogic->UpdateRtt(roundTripTimeMs);
|
||||
_lossProtLogic->UpdateResidualPacketLoss(static_cast<float>(fractionLost));
|
||||
|
||||
// Get frame rate for encoder: this is the actual/sent frame rate
|
||||
float actualFrameRate = SentFrameRate();
|
||||
|
||||
// sanity
|
||||
if (actualFrameRate < 1.0)
|
||||
{
|
||||
actualFrameRate = 1.0;
|
||||
}
|
||||
|
||||
// Update frame rate for the loss protection logic class: frame rate should
|
||||
// be the actual/sent rate
|
||||
_lossProtLogic->UpdateFrameRate(actualFrameRate);
|
||||
|
||||
_fractionLost = fractionLost;
|
||||
|
||||
// Returns the filtered packet loss, used for the protection setting.
|
||||
// The filtered loss may be the received loss (no filter), or some
|
||||
// filtered value (average or max window filter).
|
||||
// Use max window filter for now.
|
||||
FilterPacketLossMode filter_mode = kMaxFilter;
|
||||
WebRtc_UWord8 packetLossEnc = _lossProtLogic->FilteredLoss(
|
||||
_clock->MillisecondTimestamp(), filter_mode, fractionLost);
|
||||
|
||||
// For now use the filtered loss for computing the robustness settings
|
||||
_lossProtLogic->UpdateFilteredLossPr(packetLossEnc);
|
||||
|
||||
// Rate cost of the protection methods
|
||||
uint32_t protection_overhead_kbps = 0;
|
||||
|
||||
// Update protection settings, when applicable
|
||||
float sent_video_rate = 0.0f;
|
||||
if (selectedMethod)
|
||||
{
|
||||
// Update protection method with content metrics
|
||||
selectedMethod->UpdateContentMetrics(_content->ShortTermAvgData());
|
||||
|
||||
// Update method will compute the robustness settings for the given
|
||||
// protection method and the overhead cost
|
||||
// the protection method is set by the user via SetVideoProtection.
|
||||
_lossProtLogic->UpdateMethod();
|
||||
|
||||
// Update protection callback with protection settings.
|
||||
uint32_t sent_video_rate_bps = 0;
|
||||
uint32_t sent_nack_rate_bps = 0;
|
||||
uint32_t sent_fec_rate_bps = 0;
|
||||
// Get the bit cost of protection method, based on the amount of
|
||||
// overhead data actually transmitted (including headers) the last
|
||||
// second.
|
||||
UpdateProtectionCallback(selectedMethod,
|
||||
&sent_video_rate_bps,
|
||||
&sent_nack_rate_bps,
|
||||
&sent_fec_rate_bps);
|
||||
uint32_t sent_total_rate_bps = sent_video_rate_bps +
|
||||
sent_nack_rate_bps + sent_fec_rate_bps;
|
||||
// Estimate the overhead costs of the next second as staying the same
|
||||
// wrt the source bitrate.
|
||||
if (sent_total_rate_bps > 0) {
|
||||
protection_overhead_kbps = static_cast<uint32_t>(bitRate *
|
||||
static_cast<double>(sent_nack_rate_bps + sent_fec_rate_bps) /
|
||||
sent_total_rate_bps + 0.5);
|
||||
}
|
||||
// Cap the overhead estimate to 50%.
|
||||
if (protection_overhead_kbps > bitRate / 2)
|
||||
protection_overhead_kbps = bitRate / 2;
|
||||
|
||||
// Get the effective packet loss for encoder ER
|
||||
// when applicable, should be passed to encoder via fractionLost
|
||||
packetLossEnc = selectedMethod->RequiredPacketLossER();
|
||||
sent_video_rate = static_cast<float>(sent_video_rate_bps / 1000.0);
|
||||
}
|
||||
|
||||
// Source coding rate: total rate - protection overhead
|
||||
_targetBitRate = bitRate - protection_overhead_kbps;
|
||||
|
||||
// Update encoding rates following protection settings
|
||||
_frameDropper->SetRates(static_cast<float>(_targetBitRate),
|
||||
_incomingFrameRate);
|
||||
|
||||
if (_enableQm)
|
||||
{
|
||||
// Update QM with rates
|
||||
_qmResolution->UpdateRates((float)_targetBitRate, sent_video_rate,
|
||||
_incomingFrameRate, _fractionLost);
|
||||
// Check for QM selection
|
||||
bool selectQM = checkStatusForQMchange();
|
||||
if (selectQM)
|
||||
{
|
||||
SelectQuality();
|
||||
}
|
||||
// Reset the short-term averaged content data.
|
||||
_content->ResetShortTermAvgData();
|
||||
}
|
||||
|
||||
return _targetBitRate;
|
||||
}
|
||||
|
||||
int VCMMediaOptimization::UpdateProtectionCallback(
|
||||
VCMProtectionMethod *selected_method,
|
||||
uint32_t* video_rate_bps,
|
||||
uint32_t* nack_overhead_rate_bps,
|
||||
uint32_t* fec_overhead_rate_bps)
|
||||
{
|
||||
if (!_videoProtectionCallback)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
FecProtectionParams delta_fec_params;
|
||||
FecProtectionParams key_fec_params;
|
||||
// Get the FEC code rate for Key frames (set to 0 when NA)
|
||||
key_fec_params.fec_rate = selected_method->RequiredProtectionFactorK();
|
||||
|
||||
// Get the FEC code rate for Delta frames (set to 0 when NA)
|
||||
delta_fec_params.fec_rate =
|
||||
selected_method->RequiredProtectionFactorD();
|
||||
|
||||
// Get the FEC-UEP protection status for Key frames: UEP on/off
|
||||
key_fec_params.use_uep_protection =
|
||||
selected_method->RequiredUepProtectionK();
|
||||
|
||||
// Get the FEC-UEP protection status for Delta frames: UEP on/off
|
||||
delta_fec_params.use_uep_protection =
|
||||
selected_method->RequiredUepProtectionD();
|
||||
|
||||
// The RTP module currently requires the same |max_fec_frames| for both
|
||||
// key and delta frames.
|
||||
delta_fec_params.max_fec_frames = selected_method->MaxFramesFec();
|
||||
key_fec_params.max_fec_frames = selected_method->MaxFramesFec();
|
||||
|
||||
// Set the FEC packet mask type. |kFecMaskBursty| is more effective for
|
||||
// consecutive losses and little/no packet re-ordering. As we currently
|
||||
// do not have feedback data on the degree of correlated losses and packet
|
||||
// re-ordering, we keep default setting to |kFecMaskRandom| for now.
|
||||
delta_fec_params.fec_mask_type = kFecMaskRandom;
|
||||
key_fec_params.fec_mask_type = kFecMaskRandom;
|
||||
|
||||
// TODO(Marco): Pass FEC protection values per layer.
|
||||
return _videoProtectionCallback->ProtectionRequest(&delta_fec_params,
|
||||
&key_fec_params,
|
||||
video_rate_bps,
|
||||
nack_overhead_rate_bps,
|
||||
fec_overhead_rate_bps);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::DropFrame()
|
||||
{
|
||||
// leak appropriate number of bytes
|
||||
_frameDropper->Leak((WebRtc_UWord32)(InputFrameRate() + 0.5f));
|
||||
return _frameDropper->DropFrame();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SentFrameCount(VCMFrameCount &frameCount) const
|
||||
{
|
||||
frameCount.numDeltaFrames = _deltaFrameCnt;
|
||||
frameCount.numKeyFrames = _keyFrameCnt;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SetEncodingData(VideoCodecType sendCodecType,
|
||||
WebRtc_Word32 maxBitRate,
|
||||
WebRtc_UWord32 frameRate,
|
||||
WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord16 width,
|
||||
WebRtc_UWord16 height,
|
||||
int numLayers)
|
||||
{
|
||||
// Everything codec specific should be reset here since this means the codec
|
||||
// has changed. If native dimension values have changed, then either user
|
||||
// initiated change, or QM initiated change. Will be able to determine only
|
||||
// after the processing of the first frame.
|
||||
_lastChangeTime = _clock->MillisecondTimestamp();
|
||||
_content->Reset();
|
||||
_content->UpdateFrameRate(frameRate);
|
||||
|
||||
_maxBitRate = maxBitRate;
|
||||
_sendCodecType = sendCodecType;
|
||||
_targetBitRate = bitRate;
|
||||
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
|
||||
_lossProtLogic->UpdateFrameRate(static_cast<float>(frameRate));
|
||||
_lossProtLogic->UpdateFrameSize(width, height);
|
||||
_lossProtLogic->UpdateNumLayers(numLayers);
|
||||
_frameDropper->Reset();
|
||||
_frameDropper->SetRates(static_cast<float>(bitRate),
|
||||
static_cast<float>(frameRate));
|
||||
_userFrameRate = static_cast<float>(frameRate);
|
||||
_codecWidth = width;
|
||||
_codecHeight = height;
|
||||
_numLayers = (numLayers <= 1) ? 1 : numLayers; // Can also be zero.
|
||||
WebRtc_Word32 ret = VCM_OK;
|
||||
ret = _qmResolution->Initialize((float)_targetBitRate, _userFrameRate,
|
||||
_codecWidth, _codecHeight, _numLayers);
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::RegisterProtectionCallback(VCMProtectionCallback*
|
||||
protectionCallback)
|
||||
{
|
||||
_videoProtectionCallback = protectionCallback;
|
||||
return VCM_OK;
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::EnableFrameDropper(bool enable)
|
||||
{
|
||||
_frameDropper->Enable(enable);
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::EnableProtectionMethod(bool enable,
|
||||
VCMProtectionMethodEnum method)
|
||||
{
|
||||
bool updated = false;
|
||||
if (enable)
|
||||
{
|
||||
updated = _lossProtLogic->SetMethod(method);
|
||||
}
|
||||
else
|
||||
{
|
||||
_lossProtLogic->RemoveMethod(method);
|
||||
}
|
||||
if (updated)
|
||||
{
|
||||
_lossProtLogic->UpdateMethod();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::IsProtectionMethodEnabled(VCMProtectionMethodEnum method)
|
||||
{
|
||||
return (_lossProtLogic->SelectedType() == method);
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::SetMtu(WebRtc_Word32 mtu)
|
||||
{
|
||||
_maxPayloadSize = mtu;
|
||||
}
|
||||
|
||||
float
|
||||
VCMMediaOptimization::SentFrameRate()
|
||||
{
|
||||
if (_frameDropper)
|
||||
{
|
||||
return _frameDropper->ActualFrameRate((WebRtc_UWord32)(InputFrameRate()
|
||||
+ 0.5f));
|
||||
}
|
||||
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
|
||||
float
|
||||
VCMMediaOptimization::SentBitRate()
|
||||
{
|
||||
UpdateBitRateEstimate(-1, _clock->MillisecondTimestamp());
|
||||
return _avgSentBitRateBps / 1000.0f;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::MaxBitRate()
|
||||
{
|
||||
return _maxBitRate;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::UpdateWithEncodedData(WebRtc_Word32 encodedLength,
|
||||
FrameType encodedFrameType)
|
||||
{
|
||||
// look into the ViE version - debug mode - needs also number of layers.
|
||||
UpdateBitRateEstimate(encodedLength, _clock->MillisecondTimestamp());
|
||||
if(encodedLength > 0)
|
||||
{
|
||||
const bool deltaFrame = (encodedFrameType != kVideoFrameKey &&
|
||||
encodedFrameType != kVideoFrameGolden);
|
||||
|
||||
_frameDropper->Fill(encodedLength, deltaFrame);
|
||||
if (_maxPayloadSize > 0 && encodedLength > 0)
|
||||
{
|
||||
const float minPacketsPerFrame = encodedLength /
|
||||
static_cast<float>(_maxPayloadSize);
|
||||
if (deltaFrame)
|
||||
{
|
||||
_lossProtLogic->UpdatePacketsPerFrame(
|
||||
minPacketsPerFrame, _clock->MillisecondTimestamp());
|
||||
}
|
||||
else
|
||||
{
|
||||
_lossProtLogic->UpdatePacketsPerFrameKey(
|
||||
minPacketsPerFrame, _clock->MillisecondTimestamp());
|
||||
}
|
||||
|
||||
if (_enableQm)
|
||||
{
|
||||
// update quality select with encoded length
|
||||
_qmResolution->UpdateEncodedSize(encodedLength,
|
||||
encodedFrameType);
|
||||
}
|
||||
}
|
||||
if (!deltaFrame && encodedLength > 0)
|
||||
{
|
||||
_lossProtLogic->UpdateKeyFrameSize(static_cast<float>(encodedLength));
|
||||
}
|
||||
|
||||
// updating counters
|
||||
if (deltaFrame)
|
||||
{
|
||||
_deltaFrameCnt++;
|
||||
}
|
||||
else
|
||||
{
|
||||
_keyFrameCnt++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
|
||||
}
|
||||
|
||||
void VCMMediaOptimization::UpdateBitRateEstimate(WebRtc_Word64 encodedLength,
|
||||
WebRtc_Word64 nowMs)
|
||||
{
|
||||
int i = kBitrateMaxFrameSamples - 1;
|
||||
WebRtc_UWord32 frameSizeSum = 0;
|
||||
WebRtc_Word64 timeOldest = -1;
|
||||
// Find an empty slot for storing the new sample and at the same time
|
||||
// accumulate the history.
|
||||
for (; i >= 0; i--)
|
||||
{
|
||||
if (_encodedFrameSamples[i]._sizeBytes == -1)
|
||||
{
|
||||
// Found empty slot
|
||||
break;
|
||||
}
|
||||
if (nowMs - _encodedFrameSamples[i]._timeCompleteMs <
|
||||
kBitrateAverageWinMs)
|
||||
{
|
||||
frameSizeSum += static_cast<WebRtc_UWord32>
|
||||
(_encodedFrameSamples[i]._sizeBytes);
|
||||
if (timeOldest == -1)
|
||||
{
|
||||
timeOldest = _encodedFrameSamples[i]._timeCompleteMs;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (encodedLength > 0)
|
||||
{
|
||||
if (i < 0)
|
||||
{
|
||||
// No empty slot, shift
|
||||
for (i = kBitrateMaxFrameSamples - 2; i >= 0; i--)
|
||||
{
|
||||
_encodedFrameSamples[i + 1] = _encodedFrameSamples[i];
|
||||
}
|
||||
i++;
|
||||
}
|
||||
// Insert new sample
|
||||
_encodedFrameSamples[i]._sizeBytes = encodedLength;
|
||||
_encodedFrameSamples[i]._timeCompleteMs = nowMs;
|
||||
}
|
||||
if (timeOldest > -1)
|
||||
{
|
||||
// Update average bit rate
|
||||
float denom = static_cast<float>(nowMs - timeOldest);
|
||||
if (denom < 1.0)
|
||||
{
|
||||
denom = 1.0;
|
||||
}
|
||||
_avgSentBitRateBps = (frameSizeSum + encodedLength) * 8 * 1000 / denom;
|
||||
}
|
||||
else if (encodedLength > 0)
|
||||
{
|
||||
_avgSentBitRateBps = static_cast<float>(encodedLength * 8);
|
||||
}
|
||||
else
|
||||
{
|
||||
_avgSentBitRateBps = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::RegisterVideoQMCallback(VCMQMSettingsCallback*
|
||||
videoQMSettings)
|
||||
{
|
||||
_videoQMSettingsCallback = videoQMSettings;
|
||||
// Callback setting controls QM
|
||||
if (_videoQMSettingsCallback != NULL)
|
||||
{
|
||||
_enableQm = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
_enableQm = false;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::updateContentData(const VideoContentMetrics*
|
||||
contentMetrics)
|
||||
{
|
||||
// Updating content metrics
|
||||
if (contentMetrics == NULL)
|
||||
{
|
||||
// Disable QM if metrics are NULL
|
||||
_enableQm = false;
|
||||
_qmResolution->Reset();
|
||||
}
|
||||
else
|
||||
{
|
||||
_content->UpdateContentData(contentMetrics);
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SelectQuality()
|
||||
{
|
||||
// Reset quantities for QM select
|
||||
_qmResolution->ResetQM();
|
||||
|
||||
// Update QM will long-term averaged content metrics.
|
||||
_qmResolution->UpdateContent(_content->LongTermAvgData());
|
||||
|
||||
// Select quality mode
|
||||
VCMResolutionScale* qm = NULL;
|
||||
WebRtc_Word32 ret = _qmResolution->SelectResolution(&qm);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Check for updates to spatial/temporal modes
|
||||
QMUpdate(qm);
|
||||
|
||||
// Reset all the rate and related frame counters quantities
|
||||
_qmResolution->ResetRates();
|
||||
|
||||
// Reset counters
|
||||
_lastQMUpdateTime = _clock->MillisecondTimestamp();
|
||||
|
||||
// Reset content metrics
|
||||
_content->Reset();
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
|
||||
// Check timing constraints and look for significant change in:
|
||||
// (1) scene content
|
||||
// (2) target bit rate
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::checkStatusForQMchange()
|
||||
{
|
||||
|
||||
bool status = true;
|
||||
|
||||
// Check that we do not call QMSelect too often, and that we waited some time
|
||||
// (to sample the metrics) from the event lastChangeTime
|
||||
// lastChangeTime is the time where user changed the size/rate/frame rate
|
||||
// (via SetEncodingData)
|
||||
WebRtc_Word64 now = _clock->MillisecondTimestamp();
|
||||
if ((now - _lastQMUpdateTime) < kQmMinIntervalMs ||
|
||||
(now - _lastChangeTime) < kQmMinIntervalMs)
|
||||
{
|
||||
status = false;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
}
|
||||
|
||||
bool VCMMediaOptimization::QMUpdate(VCMResolutionScale* qm) {
|
||||
// Check for no change
|
||||
if (!qm->change_resolution_spatial && !qm->change_resolution_temporal) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check for change in frame rate.
|
||||
if (qm->change_resolution_temporal) {
|
||||
_incomingFrameRate = qm->frame_rate;
|
||||
// Reset frame rate estimate.
|
||||
memset(_incomingFrameTimes, -1, sizeof(_incomingFrameTimes));
|
||||
}
|
||||
|
||||
// Check for change in frame size.
|
||||
if (qm->change_resolution_spatial) {
|
||||
_codecWidth = qm->codec_width;
|
||||
_codecHeight = qm->codec_height;
|
||||
}
|
||||
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, _id,
|
||||
"Resolution change from QM select: W = %d, H = %d, FR = %f",
|
||||
qm->codec_width, qm->codec_height, qm->frame_rate);
|
||||
|
||||
// Update VPM with new target frame rate and frame size.
|
||||
// Note: use |qm->frame_rate| instead of |_incomingFrameRate| for updating
|
||||
// target frame rate in VPM frame dropper. The quantity |_incomingFrameRate|
|
||||
// will vary/fluctuate, and since we don't want to change the state of the
|
||||
// VPM frame dropper, unless a temporal action was selected, we use the
|
||||
// quantity |qm->frame_rate| for updating.
|
||||
_videoQMSettingsCallback->SetVideoQMSettings(qm->frame_rate,
|
||||
_codecWidth,
|
||||
_codecHeight);
|
||||
_content->UpdateFrameRate(qm->frame_rate);
|
||||
_qmResolution->UpdateCodecParameters(qm->frame_rate, _codecWidth,
|
||||
_codecHeight);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::UpdateIncomingFrameRate()
|
||||
{
|
||||
WebRtc_Word64 now = _clock->MillisecondTimestamp();
|
||||
if (_incomingFrameTimes[0] == 0)
|
||||
{
|
||||
// first no shift
|
||||
} else
|
||||
{
|
||||
// shift
|
||||
for(WebRtc_Word32 i = (kFrameCountHistorySize - 2); i >= 0 ; i--)
|
||||
{
|
||||
_incomingFrameTimes[i+1] = _incomingFrameTimes[i];
|
||||
}
|
||||
}
|
||||
_incomingFrameTimes[0] = now;
|
||||
ProcessIncomingFrameRate(now);
|
||||
}
|
||||
|
||||
// allowing VCM to keep track of incoming frame rate
|
||||
void
|
||||
VCMMediaOptimization::ProcessIncomingFrameRate(WebRtc_Word64 now)
|
||||
{
|
||||
WebRtc_Word32 num = 0;
|
||||
WebRtc_Word32 nrOfFrames = 0;
|
||||
for (num = 1; num < (kFrameCountHistorySize - 1); num++)
|
||||
{
|
||||
if (_incomingFrameTimes[num] <= 0 ||
|
||||
// don't use data older than 2 s
|
||||
now - _incomingFrameTimes[num] > kFrameHistoryWinMs)
|
||||
{
|
||||
break;
|
||||
} else
|
||||
{
|
||||
nrOfFrames++;
|
||||
}
|
||||
}
|
||||
if (num > 1)
|
||||
{
|
||||
const WebRtc_Word64 diff = now - _incomingFrameTimes[num-1];
|
||||
_incomingFrameRate = 1.0;
|
||||
if(diff >0)
|
||||
{
|
||||
_incomingFrameRate = nrOfFrames * 1000.0f / static_cast<float>(diff);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMMediaOptimization::InputFrameRate()
|
||||
{
|
||||
ProcessIncomingFrameRate(_clock->MillisecondTimestamp());
|
||||
return WebRtc_UWord32 (_incomingFrameRate + 0.5f);
|
||||
}
|
||||
|
||||
}
|
||||
209
webrtc/modules/video_coding/main/source/media_optimization.h
Normal file
209
webrtc/modules/video_coding/main/source/media_optimization.h
Normal file
@ -0,0 +1,209 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding.h"
|
||||
#include "trace.h"
|
||||
#include "media_opt_util.h"
|
||||
#include "qm_select.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum { kBitrateMaxFrameSamples = 60 };
|
||||
enum { kBitrateAverageWinMs = 1000 };
|
||||
|
||||
class TickTimeBase;
|
||||
class VCMContentMetricsProcessing;
|
||||
class VCMFrameDropper;
|
||||
|
||||
struct VCMEncodedFrameSample
|
||||
{
|
||||
VCMEncodedFrameSample() : _sizeBytes(-1), _timeCompleteMs(-1) {}
|
||||
|
||||
WebRtc_Word64 _sizeBytes;
|
||||
WebRtc_Word64 _timeCompleteMs;
|
||||
};
|
||||
|
||||
class VCMMediaOptimization
|
||||
{
|
||||
public:
|
||||
VCMMediaOptimization(WebRtc_Word32 id, TickTimeBase* clock);
|
||||
~VCMMediaOptimization(void);
|
||||
/*
|
||||
* Reset the Media Optimization module
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
/**
|
||||
* Set target Rates for the encoder given the channel parameters
|
||||
* Inputs: bitRate - target bitRate, in the conference case this is the rate
|
||||
* between the sending client and the server
|
||||
* fractionLost - packet loss in % in the network
|
||||
* roundTripTimeMs - round trip time in miliseconds
|
||||
* minBitRate - the bit rate of the end-point with lowest rate
|
||||
* maxBitRate - the bit rate of the end-point with highest rate
|
||||
*/
|
||||
WebRtc_UWord32 SetTargetRates(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord8 &fractionLost,
|
||||
WebRtc_UWord32 roundTripTimeMs);
|
||||
|
||||
/**
|
||||
* Inform media optimization of initial encoding state
|
||||
*/
|
||||
WebRtc_Word32 SetEncodingData(VideoCodecType sendCodecType,
|
||||
WebRtc_Word32 maxBitRate,
|
||||
WebRtc_UWord32 frameRate,
|
||||
WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord16 width,
|
||||
WebRtc_UWord16 height,
|
||||
int numTemporalLayers);
|
||||
/**
|
||||
* Enable protection method
|
||||
*/
|
||||
void EnableProtectionMethod(bool enable, VCMProtectionMethodEnum method);
|
||||
/**
|
||||
* Returns weather or not protection method is enabled
|
||||
*/
|
||||
bool IsProtectionMethodEnabled(VCMProtectionMethodEnum method);
|
||||
/**
|
||||
* Updates the max pay load size
|
||||
*/
|
||||
void SetMtu(WebRtc_Word32 mtu);
|
||||
/*
|
||||
* Get actual input frame rate
|
||||
*/
|
||||
WebRtc_UWord32 InputFrameRate();
|
||||
|
||||
/*
|
||||
* Get actual sent frame rate
|
||||
*/
|
||||
float SentFrameRate();
|
||||
/*
|
||||
* Get actual sent bit rate
|
||||
*/
|
||||
float SentBitRate();
|
||||
/*
|
||||
* Get maximum allowed bit rate
|
||||
*/
|
||||
WebRtc_Word32 MaxBitRate();
|
||||
/*
|
||||
* Inform Media Optimization of encoding output: Length and frame type
|
||||
*/
|
||||
WebRtc_Word32 UpdateWithEncodedData(WebRtc_Word32 encodedLength,
|
||||
FrameType encodedFrameType);
|
||||
/*
|
||||
* Register a protection callback to be used to inform the user about the
|
||||
* protection methods used
|
||||
*/
|
||||
WebRtc_Word32 RegisterProtectionCallback(VCMProtectionCallback*
|
||||
protectionCallback);
|
||||
/*
|
||||
* Register a quality settings callback to be used to inform VPM/user about
|
||||
*/
|
||||
WebRtc_Word32 RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings);
|
||||
void EnableFrameDropper(bool enable);
|
||||
|
||||
bool DropFrame();
|
||||
|
||||
/*
|
||||
* Get number of key/delta frames encoded
|
||||
*/
|
||||
WebRtc_Word32 SentFrameCount(VCMFrameCount &frameCount) const;
|
||||
|
||||
/*
|
||||
* update incoming frame rate value
|
||||
*/
|
||||
void UpdateIncomingFrameRate();
|
||||
|
||||
/**
|
||||
* Update content metric Data
|
||||
*/
|
||||
void updateContentData(const VideoContentMetrics* contentMetrics);
|
||||
|
||||
/**
|
||||
* Compute new Quality Mode
|
||||
*/
|
||||
WebRtc_Word32 SelectQuality();
|
||||
|
||||
private:
|
||||
|
||||
/*
|
||||
* Update protection callback with protection settings
|
||||
*/
|
||||
int UpdateProtectionCallback(VCMProtectionMethod *selected_method,
|
||||
uint32_t* total_video_rate_bps,
|
||||
uint32_t* nack_overhead_rate_bps,
|
||||
uint32_t* fec_overhead_rate_bps);
|
||||
|
||||
void UpdateBitRateEstimate(WebRtc_Word64 encodedLength, WebRtc_Word64 nowMs);
|
||||
/*
|
||||
* verify if QM settings differ from default, i.e. if an update is required
|
||||
* Compute actual values, as will be sent to the encoder
|
||||
*/
|
||||
bool QMUpdate(VCMResolutionScale* qm);
|
||||
/**
|
||||
* check if we should make a QM change
|
||||
* will return 1 if yes, 0 otherwise
|
||||
*/
|
||||
bool checkStatusForQMchange();
|
||||
|
||||
void ProcessIncomingFrameRate(WebRtc_Word64 now);
|
||||
|
||||
enum { kFrameCountHistorySize = 90};
|
||||
enum { kFrameHistoryWinMs = 2000};
|
||||
|
||||
WebRtc_Word32 _id;
|
||||
TickTimeBase* _clock;
|
||||
WebRtc_Word32 _maxBitRate;
|
||||
VideoCodecType _sendCodecType;
|
||||
WebRtc_UWord16 _codecWidth;
|
||||
WebRtc_UWord16 _codecHeight;
|
||||
float _userFrameRate;
|
||||
|
||||
VCMFrameDropper* _frameDropper;
|
||||
VCMLossProtectionLogic* _lossProtLogic;
|
||||
WebRtc_UWord8 _fractionLost;
|
||||
|
||||
|
||||
WebRtc_UWord32 _sendStatistics[4];
|
||||
WebRtc_UWord32 _sendStatisticsZeroEncode;
|
||||
WebRtc_Word32 _maxPayloadSize;
|
||||
WebRtc_UWord32 _targetBitRate;
|
||||
|
||||
float _incomingFrameRate;
|
||||
WebRtc_Word64 _incomingFrameTimes[kFrameCountHistorySize];
|
||||
|
||||
bool _enableQm;
|
||||
|
||||
VCMProtectionCallback* _videoProtectionCallback;
|
||||
VCMQMSettingsCallback* _videoQMSettingsCallback;
|
||||
|
||||
VCMEncodedFrameSample _encodedFrameSamples[kBitrateMaxFrameSamples];
|
||||
float _avgSentBitRateBps;
|
||||
|
||||
WebRtc_UWord32 _keyFrameCnt;
|
||||
WebRtc_UWord32 _deltaFrameCnt;
|
||||
|
||||
VCMContentMetricsProcessing* _content;
|
||||
VCMQmResolution* _qmResolution;
|
||||
|
||||
WebRtc_Word64 _lastQMUpdateTime;
|
||||
WebRtc_Word64 _lastChangeTime; // content/user triggered
|
||||
int _numLayers;
|
||||
|
||||
|
||||
}; // end of VCMMediaOptimization class definition
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MOCK_FAKE_TICK_TIME_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MOCK_FAKE_TICK_TIME_H_
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Provides a fake implementation of TickTimeBase, intended for offline
|
||||
// testing. This implementation does not query the system clock, but returns a
|
||||
// time value set by the user when creating the object, and incremented with
|
||||
// the method IncrementDebugClock.
|
||||
class FakeTickTime : public TickTimeBase {
|
||||
public:
|
||||
explicit FakeTickTime(int64_t start_time_ms) : fake_now_ms_(start_time_ms) {}
|
||||
virtual ~FakeTickTime() {}
|
||||
virtual int64_t MillisecondTimestamp() const {
|
||||
return fake_now_ms_;
|
||||
}
|
||||
virtual int64_t MicrosecondTimestamp() const {
|
||||
return 1000 * fake_now_ms_;
|
||||
}
|
||||
virtual void IncrementDebugClock(int64_t increase_ms) {
|
||||
assert(increase_ms <= std::numeric_limits<int64_t>::max() - fake_now_ms_);
|
||||
fake_now_ms_ += increase_ms;
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t fake_now_ms_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MOCK_FAKE_TICK_TIME_H_
|
||||
127
webrtc/modules/video_coding/main/source/nack_fec_tables.h
Normal file
127
webrtc/modules/video_coding/main/source/nack_fec_tables.h
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// Table for adjusting FEC rate for NACK/FEC protection method
|
||||
// Table values are built as a sigmoid function, ranging from 0 to
|
||||
// kHighRttNackMs (100), based on the HybridNackTH values defined in
|
||||
// media_opt_util.h.
|
||||
const WebRtc_UWord16 VCMNackFecTable[100] = {
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
15,
|
||||
18,
|
||||
21,
|
||||
24,
|
||||
28,
|
||||
32,
|
||||
37,
|
||||
41,
|
||||
46,
|
||||
51,
|
||||
56,
|
||||
61,
|
||||
66,
|
||||
70,
|
||||
74,
|
||||
78,
|
||||
81,
|
||||
84,
|
||||
86,
|
||||
89,
|
||||
90,
|
||||
92,
|
||||
93,
|
||||
95,
|
||||
95,
|
||||
96,
|
||||
97,
|
||||
97,
|
||||
98,
|
||||
98,
|
||||
99,
|
||||
99,
|
||||
99,
|
||||
99,
|
||||
99,
|
||||
99,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
100,
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
119
webrtc/modules/video_coding/main/source/packet.cc
Normal file
119
webrtc/modules/video_coding/main/source/packet.cc
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMPacket::VCMPacket()
|
||||
:
|
||||
payloadType(0),
|
||||
timestamp(0),
|
||||
seqNum(0),
|
||||
dataPtr(NULL),
|
||||
sizeBytes(0),
|
||||
markerBit(false),
|
||||
frameType(kFrameEmpty),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(false),
|
||||
completeNALU(kNaluUnset),
|
||||
insertStartCode(false),
|
||||
codecSpecificHeader() {
|
||||
}
|
||||
|
||||
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr,
|
||||
const WebRtc_UWord32 size,
|
||||
const WebRtcRTPHeader& rtpHeader) :
|
||||
payloadType(rtpHeader.header.payloadType),
|
||||
timestamp(rtpHeader.header.timestamp),
|
||||
seqNum(rtpHeader.header.sequenceNumber),
|
||||
dataPtr(ptr),
|
||||
sizeBytes(size),
|
||||
markerBit(rtpHeader.header.markerBit),
|
||||
|
||||
frameType(rtpHeader.frameType),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(rtpHeader.type.Video.isFirstPacket),
|
||||
completeNALU(kNaluComplete),
|
||||
insertStartCode(false),
|
||||
codecSpecificHeader(rtpHeader.type.Video)
|
||||
{
|
||||
CopyCodecSpecifics(rtpHeader.type.Video);
|
||||
}
|
||||
|
||||
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr, WebRtc_UWord32 size, WebRtc_UWord16 seq, WebRtc_UWord32 ts, bool mBit) :
|
||||
payloadType(0),
|
||||
timestamp(ts),
|
||||
seqNum(seq),
|
||||
dataPtr(ptr),
|
||||
sizeBytes(size),
|
||||
markerBit(mBit),
|
||||
|
||||
frameType(kVideoFrameDelta),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(false),
|
||||
completeNALU(kNaluComplete),
|
||||
insertStartCode(false),
|
||||
codecSpecificHeader()
|
||||
{}
|
||||
|
||||
void VCMPacket::Reset() {
|
||||
payloadType = 0;
|
||||
timestamp = 0;
|
||||
seqNum = 0;
|
||||
dataPtr = NULL;
|
||||
sizeBytes = 0;
|
||||
markerBit = false;
|
||||
frameType = kFrameEmpty;
|
||||
codec = kVideoCodecUnknown;
|
||||
isFirstPacket = false;
|
||||
completeNALU = kNaluUnset;
|
||||
insertStartCode = false;
|
||||
memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
|
||||
}
|
||||
|
||||
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader)
|
||||
{
|
||||
switch(videoHeader.codec)
|
||||
{
|
||||
case kRTPVideoVP8:
|
||||
{
|
||||
// Handle all packets within a frame as depending on the previous packet
|
||||
// TODO(holmer): This should be changed to make fragments independent
|
||||
// when the VP8 RTP receiver supports fragments.
|
||||
if (isFirstPacket && markerBit)
|
||||
completeNALU = kNaluComplete;
|
||||
else if (isFirstPacket)
|
||||
completeNALU = kNaluStart;
|
||||
else if (markerBit)
|
||||
completeNALU = kNaluEnd;
|
||||
else
|
||||
completeNALU = kNaluIncomplete;
|
||||
|
||||
codec = kVideoCodecVP8;
|
||||
break;
|
||||
}
|
||||
case kRTPVideoI420:
|
||||
{
|
||||
codec = kVideoCodecI420;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
codec = kVideoCodecUnknown;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
57
webrtc/modules/video_coding/main/source/packet.h
Normal file
57
webrtc/modules/video_coding/main/source/packet.h
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMPacket
|
||||
{
|
||||
public:
|
||||
VCMPacket();
|
||||
VCMPacket(const WebRtc_UWord8* ptr,
|
||||
const WebRtc_UWord32 size,
|
||||
const WebRtcRTPHeader& rtpHeader);
|
||||
VCMPacket(const WebRtc_UWord8* ptr,
|
||||
WebRtc_UWord32 size,
|
||||
WebRtc_UWord16 seqNum,
|
||||
WebRtc_UWord32 timestamp,
|
||||
bool markerBit);
|
||||
|
||||
void Reset();
|
||||
|
||||
WebRtc_UWord8 payloadType;
|
||||
WebRtc_UWord32 timestamp;
|
||||
WebRtc_UWord16 seqNum;
|
||||
const WebRtc_UWord8* dataPtr;
|
||||
WebRtc_UWord32 sizeBytes;
|
||||
bool markerBit;
|
||||
|
||||
FrameType frameType;
|
||||
webrtc::VideoCodecType codec;
|
||||
|
||||
bool isFirstPacket; // Is this first packet in a frame.
|
||||
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
|
||||
bool insertStartCode; // True if a start code should be inserted before this
|
||||
// packet.
|
||||
RTPVideoHeader codecSpecificHeader;
|
||||
|
||||
protected:
|
||||
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
959
webrtc/modules/video_coding/main/source/qm_select.cc
Normal file
959
webrtc/modules/video_coding/main/source/qm_select.cc
Normal file
@ -0,0 +1,959 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/qm_select.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/source/internal_defines.h"
|
||||
#include "modules/video_coding/main/source/qm_select_data.h"
|
||||
#include "modules/video_coding/main/interface/video_coding_defines.h"
|
||||
#include "system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// QM-METHOD class
|
||||
|
||||
VCMQmMethod::VCMQmMethod()
|
||||
: content_metrics_(NULL),
|
||||
width_(0),
|
||||
height_(0),
|
||||
user_frame_rate_(0.0f),
|
||||
native_width_(0),
|
||||
native_height_(0),
|
||||
native_frame_rate_(0.0f),
|
||||
image_type_(kVGA),
|
||||
framerate_level_(kFrameRateHigh),
|
||||
init_(false) {
|
||||
ResetQM();
|
||||
}
|
||||
|
||||
VCMQmMethod::~VCMQmMethod() {
|
||||
}
|
||||
|
||||
void VCMQmMethod::ResetQM() {
|
||||
aspect_ratio_ = 1.0f;
|
||||
motion_.Reset();
|
||||
spatial_.Reset();
|
||||
content_class_ = 0;
|
||||
}
|
||||
|
||||
uint8_t VCMQmMethod::ComputeContentClass() {
|
||||
ComputeMotionNFD();
|
||||
ComputeSpatial();
|
||||
return content_class_ = 3 * motion_.level + spatial_.level;
|
||||
}
|
||||
|
||||
void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
|
||||
content_metrics_ = contentMetrics;
|
||||
}
|
||||
|
||||
void VCMQmMethod::ComputeMotionNFD() {
|
||||
if (content_metrics_) {
|
||||
motion_.value = content_metrics_->motion_magnitude;
|
||||
}
|
||||
// Determine motion level.
|
||||
if (motion_.value < kLowMotionNfd) {
|
||||
motion_.level = kLow;
|
||||
} else if (motion_.value > kHighMotionNfd) {
|
||||
motion_.level = kHigh;
|
||||
} else {
|
||||
motion_.level = kDefault;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmMethod::ComputeSpatial() {
|
||||
float spatial_err = 0.0;
|
||||
float spatial_err_h = 0.0;
|
||||
float spatial_err_v = 0.0;
|
||||
if (content_metrics_) {
|
||||
spatial_err = content_metrics_->spatial_pred_err;
|
||||
spatial_err_h = content_metrics_->spatial_pred_err_h;
|
||||
spatial_err_v = content_metrics_->spatial_pred_err_v;
|
||||
}
|
||||
// Spatial measure: take average of 3 prediction errors.
|
||||
spatial_.value = (spatial_err + spatial_err_h + spatial_err_v) / 3.0f;
|
||||
|
||||
// Reduce thresholds for large scenes/higher pixel correlation.
|
||||
float scale2 = image_type_ > kVGA ? kScaleTexture : 1.0;
|
||||
|
||||
if (spatial_.value > scale2 * kHighTexture) {
|
||||
spatial_.level = kHigh;
|
||||
} else if (spatial_.value < scale2 * kLowTexture) {
|
||||
spatial_.level = kLow;
|
||||
} else {
|
||||
spatial_.level = kDefault;
|
||||
}
|
||||
}
|
||||
|
||||
ImageType VCMQmMethod::GetImageType(uint16_t width,
|
||||
uint16_t height) {
|
||||
// Get the image type for the encoder frame size.
|
||||
uint32_t image_size = width * height;
|
||||
if (image_size == kSizeOfImageType[kQCIF]) {
|
||||
return kQCIF;
|
||||
} else if (image_size == kSizeOfImageType[kHCIF]) {
|
||||
return kHCIF;
|
||||
} else if (image_size == kSizeOfImageType[kQVGA]) {
|
||||
return kQVGA;
|
||||
} else if (image_size == kSizeOfImageType[kCIF]) {
|
||||
return kCIF;
|
||||
} else if (image_size == kSizeOfImageType[kHVGA]) {
|
||||
return kHVGA;
|
||||
} else if (image_size == kSizeOfImageType[kVGA]) {
|
||||
return kVGA;
|
||||
} else if (image_size == kSizeOfImageType[kQFULLHD]) {
|
||||
return kQFULLHD;
|
||||
} else if (image_size == kSizeOfImageType[kWHD]) {
|
||||
return kWHD;
|
||||
} else if (image_size == kSizeOfImageType[kFULLHD]) {
|
||||
return kFULLHD;
|
||||
} else {
|
||||
// No exact match, find closet one.
|
||||
return FindClosestImageType(width, height);
|
||||
}
|
||||
}
|
||||
|
||||
ImageType VCMQmMethod::FindClosestImageType(uint16_t width, uint16_t height) {
|
||||
float size = static_cast<float>(width * height);
|
||||
float min = size;
|
||||
int isel = 0;
|
||||
for (int i = 0; i < kNumImageTypes; ++i) {
|
||||
float dist = fabs(size - kSizeOfImageType[i]);
|
||||
if (dist < min) {
|
||||
min = dist;
|
||||
isel = i;
|
||||
}
|
||||
}
|
||||
return static_cast<ImageType>(isel);
|
||||
}
|
||||
|
||||
FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
|
||||
if (avg_framerate <= kLowFrameRate) {
|
||||
return kFrameRateLow;
|
||||
} else if (avg_framerate <= kMiddleFrameRate) {
|
||||
return kFrameRateMiddle1;
|
||||
} else if (avg_framerate <= kHighFrameRate) {
|
||||
return kFrameRateMiddle2;
|
||||
} else {
|
||||
return kFrameRateHigh;
|
||||
}
|
||||
}
|
||||
|
||||
// RESOLUTION CLASS
|
||||
|
||||
VCMQmResolution::VCMQmResolution()
|
||||
: qm_(new VCMResolutionScale()) {
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMQmResolution::~VCMQmResolution() {
|
||||
delete qm_;
|
||||
}
|
||||
|
||||
void VCMQmResolution::ResetRates() {
|
||||
sum_target_rate_ = 0.0f;
|
||||
sum_incoming_framerate_ = 0.0f;
|
||||
sum_rate_MM_ = 0.0f;
|
||||
sum_rate_MM_sgn_ = 0.0f;
|
||||
sum_packet_loss_ = 0.0f;
|
||||
buffer_level_ = kInitBufferLevel * target_bitrate_;
|
||||
frame_cnt_ = 0;
|
||||
frame_cnt_delta_ = 0;
|
||||
low_buffer_cnt_ = 0;
|
||||
update_rate_cnt_ = 0;
|
||||
}
|
||||
|
||||
void VCMQmResolution::ResetDownSamplingState() {
|
||||
state_dec_factor_spatial_ = 1.0;
|
||||
state_dec_factor_temporal_ = 1.0;
|
||||
for (int i = 0; i < kDownActionHistorySize; i++) {
|
||||
down_action_history_[i].spatial = kNoChangeSpatial;
|
||||
down_action_history_[i].temporal = kNoChangeTemporal;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmResolution::Reset() {
|
||||
target_bitrate_ = 0.0f;
|
||||
incoming_framerate_ = 0.0f;
|
||||
buffer_level_ = 0.0f;
|
||||
per_frame_bandwidth_ = 0.0f;
|
||||
avg_target_rate_ = 0.0f;
|
||||
avg_incoming_framerate_ = 0.0f;
|
||||
avg_ratio_buffer_low_ = 0.0f;
|
||||
avg_rate_mismatch_ = 0.0f;
|
||||
avg_rate_mismatch_sgn_ = 0.0f;
|
||||
avg_packet_loss_ = 0.0f;
|
||||
encoder_state_ = kStableEncoding;
|
||||
num_layers_ = 1;
|
||||
ResetRates();
|
||||
ResetDownSamplingState();
|
||||
ResetQM();
|
||||
}
|
||||
|
||||
EncoderState VCMQmResolution::GetEncoderState() {
|
||||
return encoder_state_;
|
||||
}
|
||||
|
||||
// Initialize state after re-initializing the encoder,
|
||||
// i.e., after SetEncodingData() in mediaOpt.
|
||||
int VCMQmResolution::Initialize(float bitrate,
|
||||
float user_framerate,
|
||||
uint16_t width,
|
||||
uint16_t height,
|
||||
int num_layers) {
|
||||
if (user_framerate == 0.0f || width == 0 || height == 0) {
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
Reset();
|
||||
target_bitrate_ = bitrate;
|
||||
incoming_framerate_ = user_framerate;
|
||||
UpdateCodecParameters(user_framerate, width, height);
|
||||
native_width_ = width;
|
||||
native_height_ = height;
|
||||
native_frame_rate_ = user_framerate;
|
||||
num_layers_ = num_layers;
|
||||
// Initial buffer level.
|
||||
buffer_level_ = kInitBufferLevel * target_bitrate_;
|
||||
// Per-frame bandwidth.
|
||||
per_frame_bandwidth_ = target_bitrate_ / user_framerate;
|
||||
init_ = true;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
|
||||
uint16_t height) {
|
||||
width_ = width;
|
||||
height_ = height;
|
||||
// |user_frame_rate| is the target frame rate for VPM frame dropper.
|
||||
user_frame_rate_ = frame_rate;
|
||||
image_type_ = GetImageType(width, height);
|
||||
}
|
||||
|
||||
// Update rate data after every encoded frame.
|
||||
void VCMQmResolution::UpdateEncodedSize(int encoded_size,
|
||||
FrameType encoded_frame_type) {
|
||||
frame_cnt_++;
|
||||
// Convert to Kbps.
|
||||
float encoded_size_kbits = static_cast<float>((encoded_size * 8.0) / 1000.0);
|
||||
|
||||
// Update the buffer level:
|
||||
// Note this is not the actual encoder buffer level.
|
||||
// |buffer_level_| is reset to an initial value after SelectResolution is
|
||||
// called, and does not account for frame dropping by encoder or VCM.
|
||||
buffer_level_ += per_frame_bandwidth_ - encoded_size_kbits;
|
||||
|
||||
// Counter for occurrences of low buffer level:
|
||||
// low/negative values means encoder is likely dropping frames.
|
||||
if (buffer_level_ <= kPercBufferThr * kInitBufferLevel * target_bitrate_) {
|
||||
low_buffer_cnt_++;
|
||||
}
|
||||
}
|
||||
|
||||
// Update various quantities after SetTargetRates in MediaOpt.
|
||||
void VCMQmResolution::UpdateRates(float target_bitrate,
|
||||
float encoder_sent_rate,
|
||||
float incoming_framerate,
|
||||
uint8_t packet_loss) {
|
||||
// Sum the target bitrate: this is the encoder rate from previous update
|
||||
// (~1sec), i.e, before the update for next ~1sec.
|
||||
sum_target_rate_ += target_bitrate_;
|
||||
update_rate_cnt_++;
|
||||
|
||||
// Sum the received (from RTCP reports) packet loss rates.
|
||||
sum_packet_loss_ += static_cast<float>(packet_loss / 255.0);
|
||||
|
||||
// Sum the sequence rate mismatch:
|
||||
// Mismatch here is based on the difference between the target rate
|
||||
// used (in previous ~1sec) and the average actual encoding rate measured
|
||||
// at previous ~1sec.
|
||||
float diff = target_bitrate_ - encoder_sent_rate;
|
||||
if (target_bitrate_ > 0.0)
|
||||
sum_rate_MM_ += fabs(diff) / target_bitrate_;
|
||||
int sgnDiff = diff > 0 ? 1 : (diff < 0 ? -1 : 0);
|
||||
// To check for consistent under(+)/over_shooting(-) of target rate.
|
||||
sum_rate_MM_sgn_ += sgnDiff;
|
||||
|
||||
// Update with the current new target and frame rate:
|
||||
// these values are ones the encoder will use for the current/next ~1sec.
|
||||
target_bitrate_ = target_bitrate;
|
||||
incoming_framerate_ = incoming_framerate;
|
||||
sum_incoming_framerate_ += incoming_framerate_;
|
||||
// Update the per_frame_bandwidth:
|
||||
// this is the per_frame_bw for the current/next ~1sec.
|
||||
per_frame_bandwidth_ = 0.0f;
|
||||
if (incoming_framerate_ > 0.0f) {
|
||||
per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
|
||||
}
|
||||
}
|
||||
|
||||
// Select the resolution factors: frame size and frame rate change (qm scales).
|
||||
// Selection is for going down in resolution, or for going back up
|
||||
// (if a previous down-sampling action was taken).
|
||||
|
||||
// In the current version the following constraints are imposed:
|
||||
// 1) We only allow for one action, either down or up, at a given time.
|
||||
// 2) The possible down-sampling actions are: spatial by 1/2x1/2, 3/4x3/4;
|
||||
// temporal/frame rate reduction by 1/2 and 2/3.
|
||||
// 3) The action for going back up is the reverse of last (spatial or temporal)
|
||||
// down-sampling action. The list of down-sampling actions from the
|
||||
// Initialize() state are kept in |down_action_history_|.
|
||||
// 4) The total amount of down-sampling (spatial and/or temporal) from the
|
||||
// Initialize() state (native resolution) is limited by various factors.
|
||||
int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
|
||||
if (!init_) {
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
if (content_metrics_ == NULL) {
|
||||
Reset();
|
||||
*qm = qm_;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
// Check conditions on down-sampling state.
|
||||
assert(state_dec_factor_spatial_ >= 1.0f);
|
||||
assert(state_dec_factor_temporal_ >= 1.0f);
|
||||
assert(state_dec_factor_spatial_ <= kMaxSpatialDown);
|
||||
assert(state_dec_factor_temporal_ <= kMaxTempDown);
|
||||
assert(state_dec_factor_temporal_ * state_dec_factor_spatial_ <=
|
||||
kMaxTotalDown);
|
||||
|
||||
// Compute content class for selection.
|
||||
content_class_ = ComputeContentClass();
|
||||
// Compute various rate quantities for selection.
|
||||
ComputeRatesForSelection();
|
||||
|
||||
// Get the encoder state.
|
||||
ComputeEncoderState();
|
||||
|
||||
// Default settings: no action.
|
||||
SetDefaultAction();
|
||||
*qm = qm_;
|
||||
|
||||
// Check for going back up in resolution, if we have had some down-sampling
|
||||
// relative to native state in Initialize().
|
||||
if (down_action_history_[0].spatial != kNoChangeSpatial ||
|
||||
down_action_history_[0].temporal != kNoChangeTemporal) {
|
||||
if (GoingUpResolution()) {
|
||||
*qm = qm_;
|
||||
return VCM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for going down in resolution.
|
||||
if (GoingDownResolution()) {
|
||||
*qm = qm_;
|
||||
return VCM_OK;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMQmResolution::SetDefaultAction() {
|
||||
qm_->codec_width = width_;
|
||||
qm_->codec_height = height_;
|
||||
qm_->frame_rate = user_frame_rate_;
|
||||
qm_->change_resolution_spatial = false;
|
||||
qm_->change_resolution_temporal = false;
|
||||
qm_->spatial_width_fact = 1.0f;
|
||||
qm_->spatial_height_fact = 1.0f;
|
||||
qm_->temporal_fact = 1.0f;
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
}
|
||||
|
||||
void VCMQmResolution::ComputeRatesForSelection() {
|
||||
avg_target_rate_ = 0.0f;
|
||||
avg_incoming_framerate_ = 0.0f;
|
||||
avg_ratio_buffer_low_ = 0.0f;
|
||||
avg_rate_mismatch_ = 0.0f;
|
||||
avg_rate_mismatch_sgn_ = 0.0f;
|
||||
avg_packet_loss_ = 0.0f;
|
||||
if (frame_cnt_ > 0) {
|
||||
avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) /
|
||||
static_cast<float>(frame_cnt_);
|
||||
}
|
||||
if (update_rate_cnt_ > 0) {
|
||||
avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) /
|
||||
static_cast<float>(update_rate_cnt_);
|
||||
avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
|
||||
static_cast<float>(update_rate_cnt_);
|
||||
avg_target_rate_ = static_cast<float>(sum_target_rate_) /
|
||||
static_cast<float>(update_rate_cnt_);
|
||||
avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
|
||||
static_cast<float>(update_rate_cnt_);
|
||||
avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
|
||||
static_cast<float>(update_rate_cnt_);
|
||||
}
|
||||
// For selection we may want to weight some quantities more heavily
|
||||
// with the current (i.e., next ~1sec) rate values.
|
||||
avg_target_rate_ = kWeightRate * avg_target_rate_ +
|
||||
(1.0 - kWeightRate) * target_bitrate_;
|
||||
avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
|
||||
(1.0 - kWeightRate) * incoming_framerate_;
|
||||
// Use base layer frame rate for temporal layers: this will favor spatial.
|
||||
assert(num_layers_ > 0);
|
||||
framerate_level_ = FrameRateLevel(
|
||||
avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1)));
|
||||
}
|
||||
|
||||
void VCMQmResolution::ComputeEncoderState() {
|
||||
// Default.
|
||||
encoder_state_ = kStableEncoding;
|
||||
|
||||
// Assign stressed state if:
|
||||
// 1) occurrences of low buffer levels is high, or
|
||||
// 2) rate mis-match is high, and consistent over-shooting by encoder.
|
||||
if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
|
||||
((avg_rate_mismatch_ > kMaxRateMisMatch) &&
|
||||
(avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
|
||||
encoder_state_ = kStressedEncoding;
|
||||
}
|
||||
// Assign easy state if:
|
||||
// 1) rate mis-match is high, and
|
||||
// 2) consistent under-shooting by encoder.
|
||||
if ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
|
||||
(avg_rate_mismatch_sgn_ > kRateUnderShoot)) {
|
||||
encoder_state_ = kEasyEncoding;
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMQmResolution::GoingUpResolution() {
|
||||
// For going up, we check for undoing the previous down-sampling action.
|
||||
|
||||
float fac_width = kFactorWidthSpatial[down_action_history_[0].spatial];
|
||||
float fac_height = kFactorHeightSpatial[down_action_history_[0].spatial];
|
||||
float fac_temp = kFactorTemporal[down_action_history_[0].temporal];
|
||||
// For going up spatially, we allow for going up by 3/4x3/4 at each stage.
|
||||
// So if the last spatial action was 1/2x1/2 it would be undone in 2 stages.
|
||||
// Modify the fac_width/height for this case.
|
||||
if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
|
||||
fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
|
||||
kFactorWidthSpatial[kOneHalfSpatialUniform];
|
||||
fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
|
||||
kFactorHeightSpatial[kOneHalfSpatialUniform];
|
||||
}
|
||||
|
||||
// Check if we should go up both spatially and temporally.
|
||||
if (down_action_history_[0].spatial != kNoChangeSpatial &&
|
||||
down_action_history_[0].temporal != kNoChangeTemporal) {
|
||||
if (ConditionForGoingUp(fac_width, fac_height, fac_temp,
|
||||
kTransRateScaleUpSpatialTemp)) {
|
||||
action_.spatial = down_action_history_[0].spatial;
|
||||
action_.temporal = down_action_history_[0].temporal;
|
||||
UpdateDownsamplingState(kUpResolution);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Check if we should go up either spatially or temporally.
|
||||
bool selected_up_spatial = false;
|
||||
bool selected_up_temporal = false;
|
||||
if (down_action_history_[0].spatial != kNoChangeSpatial) {
|
||||
selected_up_spatial = ConditionForGoingUp(fac_width, fac_height, 1.0f,
|
||||
kTransRateScaleUpSpatial);
|
||||
}
|
||||
if (down_action_history_[0].temporal != kNoChangeTemporal) {
|
||||
selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp,
|
||||
kTransRateScaleUpTemp);
|
||||
}
|
||||
if (selected_up_spatial && !selected_up_temporal) {
|
||||
action_.spatial = down_action_history_[0].spatial;
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
UpdateDownsamplingState(kUpResolution);
|
||||
return true;
|
||||
} else if (!selected_up_spatial && selected_up_temporal) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
action_.temporal = down_action_history_[0].temporal;
|
||||
UpdateDownsamplingState(kUpResolution);
|
||||
return true;
|
||||
} else if (selected_up_spatial && selected_up_temporal) {
|
||||
PickSpatialOrTemporal();
|
||||
UpdateDownsamplingState(kUpResolution);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VCMQmResolution::ConditionForGoingUp(float fac_width,
|
||||
float fac_height,
|
||||
float fac_temp,
|
||||
float scale_fac) {
|
||||
float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height,
|
||||
fac_temp, scale_fac);
|
||||
// Go back up if:
|
||||
// 1) target rate is above threshold and current encoder state is stable, or
|
||||
// 2) encoder state is easy (encoder is significantly under-shooting target).
|
||||
if (((avg_target_rate_ > estimated_transition_rate_up) &&
|
||||
(encoder_state_ == kStableEncoding)) ||
|
||||
(encoder_state_ == kEasyEncoding)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMQmResolution::GoingDownResolution() {
|
||||
float estimated_transition_rate_down =
|
||||
GetTransitionRate(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
float max_rate = kFrameRateFac[framerate_level_] * kMaxRateQm[image_type_];
|
||||
// Resolution reduction if:
|
||||
// (1) target rate is below transition rate, or
|
||||
// (2) encoder is in stressed state and target rate below a max threshold.
|
||||
if ((avg_target_rate_ < estimated_transition_rate_down ) ||
|
||||
(encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
|
||||
// Get the down-sampling action: based on content class, and how low
|
||||
// average target rate is relative to transition rate.
|
||||
uint8_t spatial_fact =
|
||||
kSpatialAction[content_class_ +
|
||||
9 * RateClass(estimated_transition_rate_down)];
|
||||
uint8_t temp_fact =
|
||||
kTemporalAction[content_class_ +
|
||||
9 * RateClass(estimated_transition_rate_down)];
|
||||
|
||||
switch (spatial_fact) {
|
||||
case 4: {
|
||||
action_.spatial = kOneQuarterSpatialUniform;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
action_.spatial = kOneHalfSpatialUniform;
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
switch (temp_fact) {
|
||||
case 3: {
|
||||
action_.temporal = kTwoThirdsTemporal;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
action_.temporal = kOneHalfTemporal;
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
// Only allow for one action (spatial or temporal) at a given time.
|
||||
assert(action_.temporal == kNoChangeTemporal ||
|
||||
action_.spatial == kNoChangeSpatial);
|
||||
|
||||
// Adjust cases not captured in tables, mainly based on frame rate, and
|
||||
// also check for odd frame sizes.
|
||||
AdjustAction();
|
||||
|
||||
// Update down-sampling state.
|
||||
if (action_.spatial != kNoChangeSpatial ||
|
||||
action_.temporal != kNoChangeTemporal) {
|
||||
UpdateDownsamplingState(kDownResolution);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
float VCMQmResolution::GetTransitionRate(float fac_width,
|
||||
float fac_height,
|
||||
float fac_temp,
|
||||
float scale_fac) {
|
||||
ImageType image_type = GetImageType(
|
||||
static_cast<uint16_t>(fac_width * width_),
|
||||
static_cast<uint16_t>(fac_height * height_));
|
||||
|
||||
FrameRateLevelClass framerate_level =
|
||||
FrameRateLevel(fac_temp * avg_incoming_framerate_);
|
||||
// If we are checking for going up temporally, and this is the last
|
||||
// temporal action, then use native frame rate.
|
||||
if (down_action_history_[1].temporal == kNoChangeTemporal &&
|
||||
fac_temp > 1.0f) {
|
||||
framerate_level = FrameRateLevel(native_frame_rate_);
|
||||
}
|
||||
|
||||
// The maximum allowed rate below which down-sampling is allowed:
|
||||
// Nominal values based on image format (frame size and frame rate).
|
||||
float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
|
||||
|
||||
uint8_t image_class = image_type > kVGA ? 1: 0;
|
||||
uint8_t table_index = image_class * 9 + content_class_;
|
||||
// Scale factor for down-sampling transition threshold:
|
||||
// factor based on the content class and the image size.
|
||||
float scaleTransRate = kScaleTransRateQm[table_index];
|
||||
// Threshold bitrate for resolution action.
|
||||
return static_cast<float> (scale_fac * scaleTransRate * max_rate);
|
||||
}
|
||||
|
||||
void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
|
||||
if (up_down == kUpResolution) {
|
||||
qm_->spatial_width_fact = 1.0f / kFactorWidthSpatial[action_.spatial];
|
||||
qm_->spatial_height_fact = 1.0f / kFactorHeightSpatial[action_.spatial];
|
||||
// If last spatial action was 1/2x1/2, we undo it in two steps, so the
|
||||
// spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
|
||||
if (action_.spatial == kOneQuarterSpatialUniform) {
|
||||
qm_->spatial_width_fact =
|
||||
1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] /
|
||||
kFactorWidthSpatial[kOneQuarterSpatialUniform];
|
||||
qm_->spatial_height_fact =
|
||||
1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
|
||||
kFactorHeightSpatial[kOneQuarterSpatialUniform];
|
||||
}
|
||||
qm_->temporal_fact = 1.0f / kFactorTemporal[action_.temporal];
|
||||
RemoveLastDownAction();
|
||||
} else if (up_down == kDownResolution) {
|
||||
ConstrainAmountOfDownSampling();
|
||||
ConvertSpatialFractionalToWhole();
|
||||
qm_->spatial_width_fact = kFactorWidthSpatial[action_.spatial];
|
||||
qm_->spatial_height_fact = kFactorHeightSpatial[action_.spatial];
|
||||
qm_->temporal_fact = kFactorTemporal[action_.temporal];
|
||||
InsertLatestDownAction();
|
||||
} else {
|
||||
// This function should only be called if either the Up or Down action
|
||||
// has been selected.
|
||||
assert(false);
|
||||
}
|
||||
UpdateCodecResolution();
|
||||
state_dec_factor_spatial_ = state_dec_factor_spatial_ *
|
||||
qm_->spatial_width_fact * qm_->spatial_height_fact;
|
||||
state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
|
||||
}
|
||||
|
||||
void VCMQmResolution::UpdateCodecResolution() {
|
||||
if (action_.spatial != kNoChangeSpatial) {
|
||||
qm_->change_resolution_spatial = true;
|
||||
qm_->codec_width = static_cast<uint16_t>(width_ /
|
||||
qm_->spatial_width_fact + 0.5f);
|
||||
qm_->codec_height = static_cast<uint16_t>(height_ /
|
||||
qm_->spatial_height_fact + 0.5f);
|
||||
// Size should not exceed native sizes.
|
||||
assert(qm_->codec_width <= native_width_);
|
||||
assert(qm_->codec_height <= native_height_);
|
||||
// New sizes should be multiple of 2, otherwise spatial should not have
|
||||
// been selected.
|
||||
assert(qm_->codec_width % 2 == 0);
|
||||
assert(qm_->codec_height % 2 == 0);
|
||||
}
|
||||
if (action_.temporal != kNoChangeTemporal) {
|
||||
qm_->change_resolution_temporal = true;
|
||||
// Update the frame rate based on the average incoming frame rate.
|
||||
qm_->frame_rate = avg_incoming_framerate_ / qm_->temporal_fact + 0.5f;
|
||||
if (down_action_history_[0].temporal == 0) {
|
||||
// When we undo the last temporal-down action, make sure we go back up
|
||||
// to the native frame rate. Since the incoming frame rate may
|
||||
// fluctuate over time, |avg_incoming_framerate_| scaled back up may
|
||||
// be smaller than |native_frame rate_|.
|
||||
qm_->frame_rate = native_frame_rate_;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t VCMQmResolution::RateClass(float transition_rate) {
|
||||
return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0:
|
||||
(avg_target_rate_ >= transition_rate ? 2 : 1);
|
||||
}
|
||||
|
||||
// TODO(marpan): Would be better to capture these frame rate adjustments by
|
||||
// extending the table data (qm_select_data.h).
|
||||
void VCMQmResolution::AdjustAction() {
|
||||
// If the spatial level is default state (neither low or high), motion level
|
||||
// is not high, and spatial action was selected, switch to 2/3 frame rate
|
||||
// reduction if the average incoming frame rate is high.
|
||||
if (spatial_.level == kDefault && motion_.level != kHigh &&
|
||||
action_.spatial != kNoChangeSpatial &&
|
||||
framerate_level_ == kFrameRateHigh) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
action_.temporal = kTwoThirdsTemporal;
|
||||
}
|
||||
// If both motion and spatial level are low, and temporal down action was
|
||||
// selected, switch to spatial 3/4x3/4 if the frame rate is not above the
|
||||
// lower middle level (|kFrameRateMiddle1|).
|
||||
if (motion_.level == kLow && spatial_.level == kLow &&
|
||||
framerate_level_ <= kFrameRateMiddle1 &&
|
||||
action_.temporal != kNoChangeTemporal) {
|
||||
action_.spatial = kOneHalfSpatialUniform;
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
}
|
||||
// If spatial action is selected, and there has been too much spatial
|
||||
// reduction already (i.e., 1/4), then switch to temporal action if the
|
||||
// average frame rate is not low.
|
||||
if (action_.spatial != kNoChangeSpatial &&
|
||||
down_action_history_[0].spatial == kOneQuarterSpatialUniform &&
|
||||
framerate_level_ != kFrameRateLow) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
action_.temporal = kTwoThirdsTemporal;
|
||||
}
|
||||
// Never use temporal action if number of temporal layers is above 2.
|
||||
if (num_layers_ > 2) {
|
||||
if (action_.temporal != kNoChangeTemporal) {
|
||||
action_.spatial = kOneHalfSpatialUniform;
|
||||
}
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
}
|
||||
// If spatial action was selected, we need to make sure the frame sizes
|
||||
// are multiples of two. Otherwise switch to 2/3 temporal.
|
||||
if (action_.spatial != kNoChangeSpatial &&
|
||||
!EvenFrameSize()) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
// Only one action (spatial or temporal) is allowed at a given time, so need
|
||||
// to check whether temporal action is currently selected.
|
||||
action_.temporal = kTwoThirdsTemporal;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmResolution::ConvertSpatialFractionalToWhole() {
|
||||
// If 3/4 spatial is selected, check if there has been another 3/4,
|
||||
// and if so, combine them into 1/2. 1/2 scaling is more efficient than 9/16.
|
||||
// Note we define 3/4x3/4 spatial as kOneHalfSpatialUniform.
|
||||
if (action_.spatial == kOneHalfSpatialUniform) {
|
||||
bool found = false;
|
||||
int isel = kDownActionHistorySize;
|
||||
for (int i = 0; i < kDownActionHistorySize; ++i) {
|
||||
if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
|
||||
isel = i;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
action_.spatial = kOneQuarterSpatialUniform;
|
||||
state_dec_factor_spatial_ = state_dec_factor_spatial_ /
|
||||
(kFactorWidthSpatial[kOneHalfSpatialUniform] *
|
||||
kFactorHeightSpatial[kOneHalfSpatialUniform]);
|
||||
// Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
|
||||
ConstrainAmountOfDownSampling();
|
||||
if (action_.spatial == kNoChangeSpatial) {
|
||||
// Not allowed. Go back to 3/4x3/4 spatial.
|
||||
action_.spatial = kOneHalfSpatialUniform;
|
||||
state_dec_factor_spatial_ = state_dec_factor_spatial_ *
|
||||
kFactorWidthSpatial[kOneHalfSpatialUniform] *
|
||||
kFactorHeightSpatial[kOneHalfSpatialUniform];
|
||||
} else {
|
||||
// Switching is allowed. Remove 3/4x3/4 from the history, and update
|
||||
// the frame size.
|
||||
for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
|
||||
down_action_history_[i].spatial =
|
||||
down_action_history_[i + 1].spatial;
|
||||
}
|
||||
width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
|
||||
height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns false if the new frame sizes, under the current spatial action,
|
||||
// are not multiples of two.
|
||||
bool VCMQmResolution::EvenFrameSize() {
|
||||
if (action_.spatial == kOneHalfSpatialUniform) {
|
||||
if ((width_ * 3 / 4) % 2 != 0 || (height_ * 3 / 4) % 2 != 0) {
|
||||
return false;
|
||||
}
|
||||
} else if (action_.spatial == kOneQuarterSpatialUniform) {
|
||||
if ((width_ * 1 / 2) % 2 != 0 || (height_ * 1 / 2) % 2 != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void VCMQmResolution::InsertLatestDownAction() {
|
||||
if (action_.spatial != kNoChangeSpatial) {
|
||||
for (int i = kDownActionHistorySize - 1; i > 0; --i) {
|
||||
down_action_history_[i].spatial = down_action_history_[i - 1].spatial;
|
||||
}
|
||||
down_action_history_[0].spatial = action_.spatial;
|
||||
}
|
||||
if (action_.temporal != kNoChangeTemporal) {
|
||||
for (int i = kDownActionHistorySize - 1; i > 0; --i) {
|
||||
down_action_history_[i].temporal = down_action_history_[i - 1].temporal;
|
||||
}
|
||||
down_action_history_[0].temporal = action_.temporal;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmResolution::RemoveLastDownAction() {
|
||||
if (action_.spatial != kNoChangeSpatial) {
|
||||
// If the last spatial action was 1/2x1/2 we replace it with 3/4x3/4.
|
||||
if (action_.spatial == kOneQuarterSpatialUniform) {
|
||||
down_action_history_[0].spatial = kOneHalfSpatialUniform;
|
||||
} else {
|
||||
for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
|
||||
down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
|
||||
}
|
||||
down_action_history_[kDownActionHistorySize - 1].spatial =
|
||||
kNoChangeSpatial;
|
||||
}
|
||||
}
|
||||
if (action_.temporal != kNoChangeTemporal) {
|
||||
for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
|
||||
down_action_history_[i].temporal = down_action_history_[i + 1].temporal;
|
||||
}
|
||||
down_action_history_[kDownActionHistorySize - 1].temporal =
|
||||
kNoChangeTemporal;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmResolution::ConstrainAmountOfDownSampling() {
|
||||
// Sanity checks on down-sampling selection:
|
||||
// override the settings for too small image size and/or frame rate.
|
||||
// Also check the limit on current down-sampling states.
|
||||
|
||||
float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
|
||||
float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
|
||||
float temporal_fact = kFactorTemporal[action_.temporal];
|
||||
float new_dec_factor_spatial = state_dec_factor_spatial_ *
|
||||
spatial_width_fact * spatial_height_fact;
|
||||
float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
|
||||
|
||||
// No spatial sampling if current frame size is too small, or if the
|
||||
// amount of spatial down-sampling is above maximum spatial down-action.
|
||||
if ((width_ * height_) <= kMinImageSize ||
|
||||
new_dec_factor_spatial > kMaxSpatialDown) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
new_dec_factor_spatial = state_dec_factor_spatial_;
|
||||
}
|
||||
// No frame rate reduction if average frame rate is below some point, or if
|
||||
// the amount of temporal down-sampling is above maximum temporal down-action.
|
||||
if (avg_incoming_framerate_ <= kMinFrameRate ||
|
||||
new_dec_factor_temp > kMaxTempDown) {
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
new_dec_factor_temp = state_dec_factor_temporal_;
|
||||
}
|
||||
// Check if the total (spatial-temporal) down-action is above maximum allowed,
|
||||
// if so, disallow the current selected down-action.
|
||||
if (new_dec_factor_spatial * new_dec_factor_temp > kMaxTotalDown) {
|
||||
if (action_.spatial != kNoChangeSpatial) {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
} else if (action_.temporal != kNoChangeTemporal) {
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
} else {
|
||||
// We only allow for one action (spatial or temporal) at a given time, so
|
||||
// either spatial or temporal action is selected when this function is
|
||||
// called. If the selected action is disallowed from one of the above
|
||||
// 2 prior conditions (on spatial & temporal max down-action), then this
|
||||
// condition "total down-action > |kMaxTotalDown|" would not be entered.
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VCMQmResolution::PickSpatialOrTemporal() {
|
||||
// Pick the one that has had the most down-sampling thus far.
|
||||
if (state_dec_factor_spatial_ > state_dec_factor_temporal_) {
|
||||
action_.spatial = down_action_history_[0].spatial;
|
||||
action_.temporal = kNoChangeTemporal;
|
||||
} else {
|
||||
action_.spatial = kNoChangeSpatial;
|
||||
action_.temporal = down_action_history_[0].temporal;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(marpan): Update when we allow for directional spatial down-sampling.
|
||||
void VCMQmResolution::SelectSpatialDirectionMode(float transition_rate) {
|
||||
// Default is 4/3x4/3
|
||||
// For bit rates well below transitional rate, we select 2x2.
|
||||
if (avg_target_rate_ < transition_rate * kRateRedSpatial2X2) {
|
||||
qm_->spatial_width_fact = 2.0f;
|
||||
qm_->spatial_height_fact = 2.0f;
|
||||
}
|
||||
// Otherwise check prediction errors and aspect ratio.
|
||||
float spatial_err = 0.0f;
|
||||
float spatial_err_h = 0.0f;
|
||||
float spatial_err_v = 0.0f;
|
||||
if (content_metrics_) {
|
||||
spatial_err = content_metrics_->spatial_pred_err;
|
||||
spatial_err_h = content_metrics_->spatial_pred_err_h;
|
||||
spatial_err_v = content_metrics_->spatial_pred_err_v;
|
||||
}
|
||||
|
||||
// Favor 1x2 if aspect_ratio is 16:9.
|
||||
if (aspect_ratio_ >= 16.0f / 9.0f) {
|
||||
// Check if 1x2 has lowest prediction error.
|
||||
if (spatial_err_h < spatial_err && spatial_err_h < spatial_err_v) {
|
||||
qm_->spatial_width_fact = 2.0f;
|
||||
qm_->spatial_height_fact = 1.0f;
|
||||
}
|
||||
}
|
||||
// Check for 4/3x4/3 selection: favor 2x2 over 1x2 and 2x1.
|
||||
if (spatial_err < spatial_err_h * (1.0f + kSpatialErr2x2VsHoriz) &&
|
||||
spatial_err < spatial_err_v * (1.0f + kSpatialErr2X2VsVert)) {
|
||||
qm_->spatial_width_fact = 4.0f / 3.0f;
|
||||
qm_->spatial_height_fact = 4.0f / 3.0f;
|
||||
}
|
||||
// Check for 2x1 selection.
|
||||
if (spatial_err_v < spatial_err_h * (1.0f - kSpatialErrVertVsHoriz) &&
|
||||
spatial_err_v < spatial_err * (1.0f - kSpatialErr2X2VsVert)) {
|
||||
qm_->spatial_width_fact = 1.0f;
|
||||
qm_->spatial_height_fact = 2.0f;
|
||||
}
|
||||
}
|
||||
|
||||
// ROBUSTNESS CLASS
|
||||
|
||||
VCMQmRobustness::VCMQmRobustness() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMQmRobustness::~VCMQmRobustness() {
|
||||
}
|
||||
|
||||
void VCMQmRobustness::Reset() {
|
||||
prev_total_rate_ = 0.0f;
|
||||
prev_rtt_time_ = 0;
|
||||
prev_packet_loss_ = 0;
|
||||
prev_code_rate_delta_ = 0;
|
||||
ResetQM();
|
||||
}
|
||||
|
||||
// Adjust the FEC rate based on the content and the network state
|
||||
// (packet loss rate, total rate/bandwidth, round trip time).
|
||||
// Note that packetLoss here is the filtered loss value.
|
||||
float VCMQmRobustness::AdjustFecFactor(uint8_t code_rate_delta,
|
||||
float total_rate,
|
||||
float framerate,
|
||||
uint32_t rtt_time,
|
||||
uint8_t packet_loss) {
|
||||
// Default: no adjustment
|
||||
float adjust_fec = 1.0f;
|
||||
if (content_metrics_ == NULL) {
|
||||
return adjust_fec;
|
||||
}
|
||||
// Compute class state of the content.
|
||||
ComputeMotionNFD();
|
||||
ComputeSpatial();
|
||||
|
||||
// TODO(marpan): Set FEC adjustment factor.
|
||||
|
||||
// Keep track of previous values of network state:
|
||||
// adjustment may be also based on pattern of changes in network state.
|
||||
prev_total_rate_ = total_rate;
|
||||
prev_rtt_time_ = rtt_time;
|
||||
prev_packet_loss_ = packet_loss;
|
||||
prev_code_rate_delta_ = code_rate_delta;
|
||||
return adjust_fec;
|
||||
}
|
||||
|
||||
// Set the UEP (unequal-protection across packets) on/off for the FEC.
|
||||
bool VCMQmRobustness::SetUepProtection(uint8_t code_rate_delta,
|
||||
float total_rate,
|
||||
uint8_t packet_loss,
|
||||
bool frame_type) {
|
||||
// Default.
|
||||
return false;
|
||||
}
|
||||
} // end of namespace
|
||||
375
webrtc/modules/video_coding/main/source/qm_select.h
Normal file
375
webrtc/modules/video_coding/main/source/qm_select.h
Normal file
@ -0,0 +1,375 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
|
||||
#include "common_types.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
/******************************************************/
|
||||
/* Quality Modes: Resolution and Robustness settings */
|
||||
/******************************************************/
|
||||
|
||||
namespace webrtc {
|
||||
struct VideoContentMetrics;
|
||||
|
||||
struct VCMResolutionScale {
|
||||
VCMResolutionScale()
|
||||
: codec_width(640),
|
||||
codec_height(480),
|
||||
frame_rate(30.0f),
|
||||
spatial_width_fact(1.0f),
|
||||
spatial_height_fact(1.0f),
|
||||
temporal_fact(1.0f),
|
||||
change_resolution_spatial(false),
|
||||
change_resolution_temporal(false) {
|
||||
}
|
||||
uint16_t codec_width;
|
||||
uint16_t codec_height;
|
||||
float frame_rate;
|
||||
float spatial_width_fact;
|
||||
float spatial_height_fact;
|
||||
float temporal_fact;
|
||||
bool change_resolution_spatial;
|
||||
bool change_resolution_temporal;
|
||||
};
|
||||
|
||||
enum ImageType {
|
||||
kQCIF = 0, // 176x144
|
||||
kHCIF, // 264x216 = half(~3/4x3/4) CIF.
|
||||
kQVGA, // 320x240 = quarter VGA.
|
||||
kCIF, // 352x288
|
||||
kHVGA, // 480x360 = half(~3/4x3/4) VGA.
|
||||
kVGA, // 640x480
|
||||
kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
|
||||
kWHD, // 1280x720
|
||||
kFULLHD, // 1920x1080
|
||||
kNumImageTypes
|
||||
};
|
||||
|
||||
const uint32_t kSizeOfImageType[kNumImageTypes] =
|
||||
{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 };
|
||||
|
||||
enum FrameRateLevelClass {
|
||||
kFrameRateLow,
|
||||
kFrameRateMiddle1,
|
||||
kFrameRateMiddle2,
|
||||
kFrameRateHigh
|
||||
};
|
||||
|
||||
enum ContentLevelClass {
|
||||
kLow,
|
||||
kHigh,
|
||||
kDefault
|
||||
};
|
||||
|
||||
struct VCMContFeature {
|
||||
VCMContFeature()
|
||||
: value(0.0f),
|
||||
level(kDefault) {
|
||||
}
|
||||
void Reset() {
|
||||
value = 0.0f;
|
||||
level = kDefault;
|
||||
}
|
||||
float value;
|
||||
ContentLevelClass level;
|
||||
};
|
||||
|
||||
enum UpDownAction {
|
||||
kUpResolution,
|
||||
kDownResolution
|
||||
};
|
||||
|
||||
enum SpatialAction {
|
||||
kNoChangeSpatial,
|
||||
kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
|
||||
kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
|
||||
kNumModesSpatial
|
||||
};
|
||||
|
||||
enum TemporalAction {
|
||||
kNoChangeTemporal,
|
||||
kTwoThirdsTemporal, // 2/3 frame rate reduction
|
||||
kOneHalfTemporal, // 1/2 frame rate reduction
|
||||
kNumModesTemporal
|
||||
};
|
||||
|
||||
struct ResolutionAction {
|
||||
ResolutionAction()
|
||||
: spatial(kNoChangeSpatial),
|
||||
temporal(kNoChangeTemporal) {
|
||||
}
|
||||
SpatialAction spatial;
|
||||
TemporalAction temporal;
|
||||
};
|
||||
|
||||
// Down-sampling factors for spatial (width and height), and temporal.
|
||||
const float kFactorWidthSpatial[kNumModesSpatial] =
|
||||
{ 1.0f, 4.0f / 3.0f, 2.0f };
|
||||
|
||||
const float kFactorHeightSpatial[kNumModesSpatial] =
|
||||
{ 1.0f, 4.0f / 3.0f, 2.0f };
|
||||
|
||||
const float kFactorTemporal[kNumModesTemporal] =
|
||||
{ 1.0f, 1.5f, 2.0f };
|
||||
|
||||
enum EncoderState {
|
||||
kStableEncoding, // Low rate mis-match, stable buffer levels.
|
||||
kStressedEncoding, // Significant over-shooting of target rate,
|
||||
// Buffer under-flow, etc.
|
||||
kEasyEncoding // Significant under-shooting of target rate.
|
||||
};
|
||||
|
||||
// QmMethod class: main class for resolution and robustness settings
|
||||
|
||||
class VCMQmMethod {
|
||||
public:
|
||||
VCMQmMethod();
|
||||
virtual ~VCMQmMethod();
|
||||
|
||||
// Reset values
|
||||
void ResetQM();
|
||||
virtual void Reset() = 0;
|
||||
|
||||
// Compute content class.
|
||||
uint8_t ComputeContentClass();
|
||||
|
||||
// Update with the content metrics.
|
||||
void UpdateContent(const VideoContentMetrics* content_metrics);
|
||||
|
||||
// Compute spatial texture magnitude and level.
|
||||
// Spatial texture is a spatial prediction error measure.
|
||||
void ComputeSpatial();
|
||||
|
||||
// Compute motion magnitude and level for NFD metric.
|
||||
// NFD is normalized frame difference (normalized by spatial variance).
|
||||
void ComputeMotionNFD();
|
||||
|
||||
// Get the imageType (CIF, VGA, HD, etc) for the system width/height.
|
||||
ImageType GetImageType(uint16_t width, uint16_t height);
|
||||
|
||||
// Return the closest image type.
|
||||
ImageType FindClosestImageType(uint16_t width, uint16_t height);
|
||||
|
||||
// Get the frame rate level.
|
||||
FrameRateLevelClass FrameRateLevel(float frame_rate);
|
||||
|
||||
protected:
|
||||
// Content Data.
|
||||
const VideoContentMetrics* content_metrics_;
|
||||
|
||||
// Encoder frame sizes and native frame sizes.
|
||||
uint16_t width_;
|
||||
uint16_t height_;
|
||||
float user_frame_rate_;
|
||||
uint16_t native_width_;
|
||||
uint16_t native_height_;
|
||||
float native_frame_rate_;
|
||||
float aspect_ratio_;
|
||||
// Image type and frame rate leve, for the current encoder resolution.
|
||||
ImageType image_type_;
|
||||
FrameRateLevelClass framerate_level_;
|
||||
// Content class data.
|
||||
VCMContFeature motion_;
|
||||
VCMContFeature spatial_;
|
||||
uint8_t content_class_;
|
||||
bool init_;
|
||||
};
|
||||
|
||||
// Resolution settings class
|
||||
|
||||
class VCMQmResolution : public VCMQmMethod {
|
||||
public:
|
||||
VCMQmResolution();
|
||||
virtual ~VCMQmResolution();
|
||||
|
||||
// Reset all quantities.
|
||||
virtual void Reset();
|
||||
|
||||
// Reset rate quantities and counters after every SelectResolution() call.
|
||||
void ResetRates();
|
||||
|
||||
// Reset down-sampling state.
|
||||
void ResetDownSamplingState();
|
||||
|
||||
// Get the encoder state.
|
||||
EncoderState GetEncoderState();
|
||||
|
||||
// Initialize after SetEncodingData in media_opt.
|
||||
int Initialize(float bitrate,
|
||||
float user_framerate,
|
||||
uint16_t width,
|
||||
uint16_t height,
|
||||
int num_layers);
|
||||
|
||||
// Update the encoder frame size.
|
||||
void UpdateCodecParameters(float frame_rate, uint16_t width, uint16_t height);
|
||||
|
||||
// Update with actual bit rate (size of the latest encoded frame)
|
||||
// and frame type, after every encoded frame.
|
||||
void UpdateEncodedSize(int encoded_size,
|
||||
FrameType encoded_frame_type);
|
||||
|
||||
// Update with new target bitrate, actual encoder sent rate, frame_rate,
|
||||
// loss rate: every ~1 sec from SetTargetRates in media_opt.
|
||||
void UpdateRates(float target_bitrate,
|
||||
float encoder_sent_rate,
|
||||
float incoming_framerate,
|
||||
uint8_t packet_loss);
|
||||
|
||||
// Extract ST (spatio-temporal) resolution action.
|
||||
// Inputs: qm: Reference to the quality modes pointer.
|
||||
// Output: the spatial and/or temporal scale change.
|
||||
int SelectResolution(VCMResolutionScale** qm);
|
||||
|
||||
private:
|
||||
// Set the default resolution action.
|
||||
void SetDefaultAction();
|
||||
|
||||
// Compute rates for the selection of down-sampling action.
|
||||
void ComputeRatesForSelection();
|
||||
|
||||
// Compute the encoder state.
|
||||
void ComputeEncoderState();
|
||||
|
||||
// Return true if the action is to go back up in resolution.
|
||||
bool GoingUpResolution();
|
||||
|
||||
// Return true if the action is to go down in resolution.
|
||||
bool GoingDownResolution();
|
||||
|
||||
// Check the condition for going up in resolution by the scale factors:
|
||||
// |facWidth|, |facHeight|, |facTemp|.
|
||||
// |scaleFac| is a scale factor for the transition rate.
|
||||
bool ConditionForGoingUp(float fac_width,
|
||||
float fac_height,
|
||||
float fac_temp,
|
||||
float scale_fac);
|
||||
|
||||
// Get the bitrate threshold for the resolution action.
|
||||
// The case |facWidth|=|facHeight|=|facTemp|==1 is for down-sampling action.
|
||||
// |scaleFac| is a scale factor for the transition rate.
|
||||
float GetTransitionRate(float fac_width,
|
||||
float fac_height,
|
||||
float fac_temp,
|
||||
float scale_fac);
|
||||
|
||||
// Update the down-sampling state.
|
||||
void UpdateDownsamplingState(UpDownAction up_down);
|
||||
|
||||
// Update the codec frame size and frame rate.
|
||||
void UpdateCodecResolution();
|
||||
|
||||
// Return a state based on average target rate relative transition rate.
|
||||
uint8_t RateClass(float transition_rate);
|
||||
|
||||
// Adjust the action selected from the table.
|
||||
void AdjustAction();
|
||||
|
||||
// Covert 2 stages of 3/4 (=9/16) spatial decimation to 1/2.
|
||||
void ConvertSpatialFractionalToWhole();
|
||||
|
||||
// Returns true if the new frame sizes, under the selected spatial action,
|
||||
// are of even size.
|
||||
bool EvenFrameSize();
|
||||
|
||||
// Insert latest down-sampling action into the history list.
|
||||
void InsertLatestDownAction();
|
||||
|
||||
// Remove the last (first element) down-sampling action from the list.
|
||||
void RemoveLastDownAction();
|
||||
|
||||
// Check constraints on the amount of down-sampling allowed.
|
||||
void ConstrainAmountOfDownSampling();
|
||||
|
||||
// For going up in resolution: pick spatial or temporal action,
|
||||
// if both actions were separately selected.
|
||||
void PickSpatialOrTemporal();
|
||||
|
||||
// Select the directional (1x2 or 2x1) spatial down-sampling action.
|
||||
void SelectSpatialDirectionMode(float transition_rate);
|
||||
|
||||
enum { kDownActionHistorySize = 10};
|
||||
|
||||
VCMResolutionScale* qm_;
|
||||
// Encoder rate control parameters.
|
||||
float target_bitrate_;
|
||||
float incoming_framerate_;
|
||||
float per_frame_bandwidth_;
|
||||
float buffer_level_;
|
||||
|
||||
// Data accumulated every ~1sec from MediaOpt.
|
||||
float sum_target_rate_;
|
||||
float sum_incoming_framerate_;
|
||||
float sum_rate_MM_;
|
||||
float sum_rate_MM_sgn_;
|
||||
float sum_packet_loss_;
|
||||
// Counters.
|
||||
uint32_t frame_cnt_;
|
||||
uint32_t frame_cnt_delta_;
|
||||
uint32_t update_rate_cnt_;
|
||||
uint32_t low_buffer_cnt_;
|
||||
|
||||
// Resolution state parameters.
|
||||
float state_dec_factor_spatial_;
|
||||
float state_dec_factor_temporal_;
|
||||
|
||||
// Quantities used for selection.
|
||||
float avg_target_rate_;
|
||||
float avg_incoming_framerate_;
|
||||
float avg_ratio_buffer_low_;
|
||||
float avg_rate_mismatch_;
|
||||
float avg_rate_mismatch_sgn_;
|
||||
float avg_packet_loss_;
|
||||
EncoderState encoder_state_;
|
||||
ResolutionAction action_;
|
||||
// Short history of the down-sampling actions from the Initialize() state.
|
||||
// This is needed for going up in resolution. Since the total amount of
|
||||
// down-sampling actions are constrained, the length of the list need not be
|
||||
// large: i.e., (4/3) ^{kDownActionHistorySize} <= kMaxDownSample.
|
||||
ResolutionAction down_action_history_[kDownActionHistorySize];
|
||||
int num_layers_;
|
||||
};
|
||||
|
||||
// Robustness settings class.
|
||||
|
||||
class VCMQmRobustness : public VCMQmMethod {
|
||||
public:
|
||||
VCMQmRobustness();
|
||||
~VCMQmRobustness();
|
||||
|
||||
virtual void Reset();
|
||||
|
||||
// Adjust FEC rate based on content: every ~1 sec from SetTargetRates.
|
||||
// Returns an adjustment factor.
|
||||
float AdjustFecFactor(uint8_t code_rate_delta,
|
||||
float total_rate,
|
||||
float framerate,
|
||||
uint32_t rtt_time,
|
||||
uint8_t packet_loss);
|
||||
|
||||
// Set the UEP protection on/off.
|
||||
bool SetUepProtection(uint8_t code_rate_delta,
|
||||
float total_rate,
|
||||
uint8_t packet_loss,
|
||||
bool frame_type);
|
||||
|
||||
private:
|
||||
// Previous state of network parameters.
|
||||
float prev_total_rate_;
|
||||
uint32_t prev_rtt_time_;
|
||||
uint8_t prev_packet_loss_;
|
||||
uint8_t prev_code_rate_delta_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
|
||||
228
webrtc/modules/video_coding/main/source/qm_select_data.h
Normal file
228
webrtc/modules/video_coding/main/source/qm_select_data.h
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
|
||||
/***************************************************************
|
||||
*QMSelectData.h
|
||||
* This file includes parameters for content-aware media optimization
|
||||
****************************************************************/
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
//
|
||||
// PARAMETERS FOR RESOLUTION ADAPTATION
|
||||
//
|
||||
|
||||
// Initial level of buffer in secs.
|
||||
const float kInitBufferLevel = 0.5f;
|
||||
|
||||
// Threshold of (max) buffer size below which we consider too low (underflow).
|
||||
const float kPercBufferThr = 0.10f;
|
||||
|
||||
// Threshold on the occurrences of low buffer levels.
|
||||
const float kMaxBufferLow = 0.30f;
|
||||
|
||||
// Threshold on rate mismatch.
|
||||
const float kMaxRateMisMatch = 0.5f;
|
||||
|
||||
// Threshold on amount of under/over encoder shooting.
|
||||
const float kRateOverShoot = 0.75f;
|
||||
const float kRateUnderShoot = 0.75f;
|
||||
|
||||
// Factor to favor weighting the average rates with the current/last data.
|
||||
const float kWeightRate = 0.70f;
|
||||
|
||||
// Factor for transitional rate for going back up in resolution.
|
||||
const float kTransRateScaleUpSpatial = 1.25f;
|
||||
const float kTransRateScaleUpTemp = 1.25f;
|
||||
const float kTransRateScaleUpSpatialTemp = 1.25f;
|
||||
|
||||
// Threshold on packet loss rate, above which favor resolution reduction.
|
||||
const float kPacketLossThr = 0.1f;
|
||||
|
||||
// Factor for reducing transitional bitrate under packet loss.
|
||||
const float kPacketLossRateFac = 1.0f;
|
||||
|
||||
// Maximum possible transitional rate for down-sampling:
|
||||
// (units in kbps), for 30fps.
|
||||
const uint16_t kMaxRateQm[9] = {
|
||||
0, // QCIF
|
||||
50, // kHCIF
|
||||
125, // kQVGA
|
||||
200, // CIF
|
||||
280, // HVGA
|
||||
400, // VGA
|
||||
700, // QFULLHD
|
||||
1000, // WHD
|
||||
1500 // FULLHD
|
||||
};
|
||||
|
||||
// Frame rate scale for maximum transition rate.
|
||||
const float kFrameRateFac[4] = {
|
||||
0.5f, // Low
|
||||
0.7f, // Middle level 1
|
||||
0.85f, // Middle level 2
|
||||
1.0f, // High
|
||||
};
|
||||
|
||||
// Scale for transitional rate: based on content class
|
||||
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
|
||||
const float kScaleTransRateQm[18] = {
|
||||
// VGA and lower
|
||||
0.40f, // L, L
|
||||
0.50f, // L, H
|
||||
0.40f, // L, D
|
||||
0.60f, // H ,L
|
||||
0.60f, // H, H
|
||||
0.60f, // H, D
|
||||
0.50f, // D, L
|
||||
0.50f, // D, D
|
||||
0.50f, // D, H
|
||||
|
||||
// over VGA
|
||||
0.40f, // L, L
|
||||
0.50f, // L, H
|
||||
0.40f, // L, D
|
||||
0.60f, // H ,L
|
||||
0.60f, // H, H
|
||||
0.60f, // H, D
|
||||
0.50f, // D, L
|
||||
0.50f, // D, D
|
||||
0.50f, // D, H
|
||||
};
|
||||
|
||||
// Threshold on the target rate relative to transitional rate.
|
||||
const float kFacLowRate = 0.5f;
|
||||
|
||||
// Action for down-sampling:
|
||||
// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
|
||||
// rate = 0/1/2, for target rate state relative to transition rate.
|
||||
const uint8_t kSpatialAction[27] = {
|
||||
// rateClass = 0:
|
||||
1, // L, L
|
||||
1, // L, H
|
||||
1, // L, D
|
||||
4, // H ,L
|
||||
1, // H, H
|
||||
4, // H, D
|
||||
4, // D, L
|
||||
1, // D, H
|
||||
2, // D, D
|
||||
|
||||
// rateClass = 1:
|
||||
1, // L, L
|
||||
1, // L, H
|
||||
1, // L, D
|
||||
2, // H ,L
|
||||
1, // H, H
|
||||
2, // H, D
|
||||
2, // D, L
|
||||
1, // D, H
|
||||
2, // D, D
|
||||
|
||||
// rateClass = 2:
|
||||
1, // L, L
|
||||
1, // L, H
|
||||
1, // L, D
|
||||
2, // H ,L
|
||||
1, // H, H
|
||||
2, // H, D
|
||||
2, // D, L
|
||||
1, // D, H
|
||||
2, // D, D
|
||||
};
|
||||
|
||||
const uint8_t kTemporalAction[27] = {
|
||||
// rateClass = 0:
|
||||
3, // L, L
|
||||
2, // L, H
|
||||
2, // L, D
|
||||
1, // H ,L
|
||||
3, // H, H
|
||||
1, // H, D
|
||||
1, // D, L
|
||||
2, // D, H
|
||||
1, // D, D
|
||||
|
||||
// rateClass = 1:
|
||||
3, // L, L
|
||||
3, // L, H
|
||||
3, // L, D
|
||||
1, // H ,L
|
||||
3, // H, H
|
||||
1, // H, D
|
||||
1, // D, L
|
||||
3, // D, H
|
||||
1, // D, D
|
||||
|
||||
// rateClass = 2:
|
||||
1, // L, L
|
||||
3, // L, H
|
||||
3, // L, D
|
||||
1, // H ,L
|
||||
3, // H, H
|
||||
1, // H, D
|
||||
1, // D, L
|
||||
3, // D, H
|
||||
1, // D, D
|
||||
};
|
||||
|
||||
// Control the total amount of down-sampling allowed.
|
||||
const float kMaxSpatialDown = 8.0f;
|
||||
const float kMaxTempDown = 3.0f;
|
||||
const float kMaxTotalDown = 9.0f;
|
||||
|
||||
// Minimum image size for a spatial down-sampling.
|
||||
const int kMinImageSize = 176 * 144;
|
||||
|
||||
// Minimum frame rate for temporal down-sampling:
|
||||
// no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE.
|
||||
const int kMinFrameRate = 8;
|
||||
|
||||
//
|
||||
// PARAMETERS FOR FEC ADJUSTMENT: TODO (marpan)
|
||||
//
|
||||
|
||||
//
|
||||
// PARAMETETS FOR SETTING LOW/HIGH STATES OF CONTENT METRICS:
|
||||
//
|
||||
|
||||
// Thresholds for frame rate:
|
||||
const int kLowFrameRate = 10;
|
||||
const int kMiddleFrameRate = 15;
|
||||
const int kHighFrameRate = 25;
|
||||
|
||||
// Thresholds for motion: motion level is from NFD.
|
||||
const float kHighMotionNfd = 0.075f;
|
||||
const float kLowMotionNfd = 0.03f;
|
||||
|
||||
// Thresholds for spatial prediction error:
|
||||
// this is applied on the average of (2x2,1x2,2x1).
|
||||
const float kHighTexture = 0.035f;
|
||||
const float kLowTexture = 0.020f;
|
||||
|
||||
// Used to reduce thresholds for larger/HD scenes: correction factor since
|
||||
// higher correlation in HD scenes means lower spatial prediction error.
|
||||
const float kScaleTexture = 0.9f;
|
||||
|
||||
// Percentage reduction in transitional bitrate for 2x2 selected over 1x2/2x1.
|
||||
const float kRateRedSpatial2X2 = 0.6f;
|
||||
|
||||
const float kSpatialErr2x2VsHoriz = 0.1f; // percentage to favor 2x2 over H
|
||||
const float kSpatialErr2X2VsVert = 0.1f; // percentage to favor 2x2 over V
|
||||
const float kSpatialErrVertVsHoriz = 0.1f; // percentage to favor H over V
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
|
||||
1311
webrtc/modules/video_coding/main/source/qm_select_unittest.cc
Normal file
1311
webrtc/modules/video_coding/main/source/qm_select_unittest.cc
Normal file
File diff suppressed because it is too large
Load Diff
495
webrtc/modules/video_coding/main/source/receiver.cc
Normal file
495
webrtc/modules/video_coding/main/source/receiver.cc
Normal file
@ -0,0 +1,495 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/receiver.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
#include "modules/video_coding/main/source/encoded_frame.h"
|
||||
#include "modules/video_coding/main/source/internal_defines.h"
|
||||
#include "modules/video_coding/main/source/media_opt_util.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMReceiver::VCMReceiver(VCMTiming& timing,
|
||||
TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId,
|
||||
WebRtc_Word32 receiverId,
|
||||
bool master)
|
||||
: _critSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_vcmId(vcmId),
|
||||
_clock(clock),
|
||||
_receiverId(receiverId),
|
||||
_master(master),
|
||||
_jitterBuffer(_clock, vcmId, receiverId, master),
|
||||
_timing(timing),
|
||||
_renderWaitEvent(*new VCMEvent()),
|
||||
_state(kPassive) {}
|
||||
|
||||
VCMReceiver::~VCMReceiver()
|
||||
{
|
||||
_renderWaitEvent.Set();
|
||||
delete &_renderWaitEvent;
|
||||
delete _critSect;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::Reset()
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (!_jitterBuffer.Running())
|
||||
{
|
||||
_jitterBuffer.Start();
|
||||
}
|
||||
else
|
||||
{
|
||||
_jitterBuffer.Flush();
|
||||
}
|
||||
_renderWaitEvent.Reset();
|
||||
if (_master)
|
||||
{
|
||||
_state = kReceiving;
|
||||
}
|
||||
else
|
||||
{
|
||||
_state = kPassive;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::Initialize()
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
Reset();
|
||||
if (!_master)
|
||||
{
|
||||
SetNackMode(kNoNack);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateRtt(WebRtc_UWord32 rtt)
|
||||
{
|
||||
_jitterBuffer.UpdateRtt(rtt);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight)
|
||||
{
|
||||
// Find an empty frame
|
||||
VCMEncodedFrame *buffer = NULL;
|
||||
const WebRtc_Word32 error = _jitterBuffer.GetFrame(packet, buffer);
|
||||
if (error == VCM_OLD_PACKET_ERROR)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
else if (error != VCM_OK)
|
||||
{
|
||||
return error;
|
||||
}
|
||||
assert(buffer);
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
|
||||
if (frameWidth && frameHeight)
|
||||
{
|
||||
buffer->SetEncodedSize(static_cast<WebRtc_UWord32>(frameWidth),
|
||||
static_cast<WebRtc_UWord32>(frameHeight));
|
||||
}
|
||||
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible
|
||||
// to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Packet seqNo %u of frame %u at %u",
|
||||
packet.seqNum, packet.timestamp,
|
||||
MaskWord64ToUWord32(_clock->MillisecondTimestamp()));
|
||||
}
|
||||
|
||||
const WebRtc_Word64 nowMs = _clock->MillisecondTimestamp();
|
||||
|
||||
WebRtc_Word64 renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
|
||||
if (renderTimeMs < 0)
|
||||
{
|
||||
// Render time error. Assume that this is due to some change in
|
||||
// the incoming video stream and reset the JB and the timing.
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
else if (renderTimeMs < nowMs - kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"This frame should have been rendered more than %u ms ago."
|
||||
"Flushing jitter buffer and resetting timing.", kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
else if (_timing.TargetVideoDelay() > kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"More than %u ms target delay. Flushing jitter buffer and resetting timing.",
|
||||
kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
|
||||
// First packet received belonging to this frame.
|
||||
if (buffer->Length() == 0)
|
||||
{
|
||||
const WebRtc_Word64 nowMs = _clock->MillisecondTimestamp();
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"First packet of frame %u at %u", packet.timestamp,
|
||||
MaskWord64ToUWord32(nowMs));
|
||||
}
|
||||
renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
if (renderTimeMs >= 0)
|
||||
{
|
||||
buffer->SetRenderTime(renderTimeMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer->SetRenderTime(nowMs);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert packet into the jitter buffer
|
||||
// both media and empty packets
|
||||
const VCMFrameBufferEnum
|
||||
ret = _jitterBuffer.InsertPacket(buffer, packet);
|
||||
if (ret == kFlushIndicator) {
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
} else if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Error inserting packet seqNo=%u, timeStamp=%u",
|
||||
packet.seqNum, packet.timestamp);
|
||||
return VCM_JITTER_BUFFER_ERROR;
|
||||
}
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64& nextRenderTimeMs,
|
||||
bool renderTiming,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// No need to enter the critical section here since the jitter buffer
|
||||
// is thread-safe.
|
||||
FrameType incomingFrameType = kVideoFrameDelta;
|
||||
nextRenderTimeMs = -1;
|
||||
const WebRtc_Word64 startTimeMs = _clock->MillisecondTimestamp();
|
||||
WebRtc_Word64 ret = _jitterBuffer.NextTimestamp(maxWaitTimeMs,
|
||||
&incomingFrameType,
|
||||
&nextRenderTimeMs);
|
||||
if (ret < 0)
|
||||
{
|
||||
// No timestamp in jitter buffer at the moment
|
||||
return NULL;
|
||||
}
|
||||
const WebRtc_UWord32 timeStamp = static_cast<WebRtc_UWord32>(ret);
|
||||
|
||||
// Update the timing
|
||||
_timing.SetRequiredDelay(_jitterBuffer.EstimatedJitterMs());
|
||||
_timing.UpdateCurrentDelay(timeStamp);
|
||||
|
||||
const WebRtc_Word32 tempWaitTime = maxWaitTimeMs -
|
||||
static_cast<WebRtc_Word32>(_clock->MillisecondTimestamp() - startTimeMs);
|
||||
WebRtc_UWord16 newMaxWaitTime = static_cast<WebRtc_UWord16>(VCM_MAX(tempWaitTime, 0));
|
||||
|
||||
VCMEncodedFrame* frame = NULL;
|
||||
|
||||
if (renderTiming)
|
||||
{
|
||||
frame = FrameForDecoding(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
}
|
||||
else
|
||||
{
|
||||
frame = FrameForRendering(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
}
|
||||
|
||||
if (frame != NULL)
|
||||
{
|
||||
bool retransmitted = false;
|
||||
const WebRtc_Word64 lastPacketTimeMs =
|
||||
_jitterBuffer.LastPacketTime(frame, &retransmitted);
|
||||
if (lastPacketTimeMs >= 0 && !retransmitted)
|
||||
{
|
||||
// We don't want to include timestamps which have suffered from retransmission
|
||||
// here, since we compensate with extra retransmission delay within
|
||||
// the jitter estimate.
|
||||
_timing.IncomingTimestamp(timeStamp, lastPacketTimeMs);
|
||||
}
|
||||
if (dualReceiver != NULL)
|
||||
{
|
||||
dualReceiver->UpdateState(*frame);
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long can we wait until we must decode the next frame
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp());
|
||||
|
||||
// Try to get a complete frame from the jitter buffer
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL && maxWaitTimeMs == 0 && waitTimeMs > 0)
|
||||
{
|
||||
// If we're not allowed to wait for frames to get complete we must
|
||||
// calculate if it's time to decode, and if it's not we will just return
|
||||
// for now.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (frame == NULL && VCM_MIN(waitTimeMs, maxWaitTimeMs) == 0)
|
||||
{
|
||||
// No time to wait for a complete frame,
|
||||
// check if we have an incomplete
|
||||
const bool dualReceiverEnabledAndPassive = (dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite);
|
||||
if (dualReceiverEnabledAndPassive &&
|
||||
!_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
assert(frame);
|
||||
} else {
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
}
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Wait for a complete frame
|
||||
frame = _jitterBuffer.GetCompleteFrameForDecoding(maxWaitTimeMs);
|
||||
}
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
if (_timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp()) > 0)
|
||||
{
|
||||
// Still time to wait for a complete frame
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// No time left to wait, we must decode this frame now.
|
||||
const bool dualReceiverEnabledAndPassive = (dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite);
|
||||
if (dualReceiverEnabledAndPassive &&
|
||||
!_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long MUST we wait until we must decode the next frame. This is different for the case
|
||||
// where we have a renderer which can render at a specified time. Here we must wait as long
|
||||
// as possible before giving the frame to the decoder, which will render the frame as soon
|
||||
// as it has been decoded.
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp());
|
||||
if (maxWaitTimeMs < waitTimeMs)
|
||||
{
|
||||
// If we're not allowed to wait until the frame is supposed to be rendered
|
||||
// we will have to return NULL for now.
|
||||
return NULL;
|
||||
}
|
||||
// Wait until it's time to render
|
||||
_renderWaitEvent.Wait(waitTimeMs);
|
||||
|
||||
// Get a complete frame if possible
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite;
|
||||
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame)
|
||||
{
|
||||
_jitterBuffer.ReleaseFrame(frame);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate)
|
||||
{
|
||||
_jitterBuffer.IncomingRateStatistics(&frameRate, &bitRate);
|
||||
bitRate /= 1000; // Should be in kbps
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const
|
||||
{
|
||||
_jitterBuffer.FrameStatistics(&frameCount.numDeltaFrames,
|
||||
&frameCount.numKeyFrames);
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMReceiver::DiscardedPackets() const {
|
||||
return _jitterBuffer.num_discarded_packets();
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::SetNackMode(VCMNackMode nackMode)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
// Default to always having NACK enabled in hybrid mode.
|
||||
_jitterBuffer.SetNackMode(nackMode, kLowRttNackMs, -1);
|
||||
if (!_master)
|
||||
{
|
||||
_state = kPassive; // The dual decoder defaults to passive
|
||||
}
|
||||
}
|
||||
|
||||
VCMNackMode
|
||||
VCMReceiver::NackMode() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _jitterBuffer.nack_mode();
|
||||
}
|
||||
|
||||
VCMNackStatus
|
||||
VCMReceiver::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
|
||||
{
|
||||
bool extended = false;
|
||||
WebRtc_UWord16 nackListSize = 0;
|
||||
WebRtc_UWord16* internalNackList = _jitterBuffer.CreateNackList(
|
||||
&nackListSize, &extended);
|
||||
if (internalNackList == NULL && nackListSize == 0xffff)
|
||||
{
|
||||
// This combination is used to trigger key frame requests.
|
||||
size = 0;
|
||||
return kNackKeyFrameRequest;
|
||||
}
|
||||
if (nackListSize > size)
|
||||
{
|
||||
size = nackListSize;
|
||||
return kNackNeedMoreMemory;
|
||||
}
|
||||
if (internalNackList != NULL && nackListSize > 0) {
|
||||
memcpy(nackList, internalNackList, nackListSize * sizeof(WebRtc_UWord16));
|
||||
}
|
||||
size = nackListSize;
|
||||
return kNackOk;
|
||||
}
|
||||
|
||||
// Decide whether we should change decoder state. This should be done if the dual decoder
|
||||
// has caught up with the decoder decoding with packet losses.
|
||||
bool
|
||||
VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const
|
||||
{
|
||||
if (dualFrame == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (_jitterBuffer.LastDecodedTimestamp() == dualFrame->TimeStamp())
|
||||
{
|
||||
dualReceiver.UpdateState(kWaitForPrimaryDecode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver)
|
||||
{
|
||||
_jitterBuffer.CopyFrom(receiver._jitterBuffer);
|
||||
}
|
||||
|
||||
VCMReceiverState
|
||||
VCMReceiver::State() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _state;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMReceiverState newState)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
assert(!(_state == kPassive && newState == kWaitForPrimaryDecode));
|
||||
// assert(!(_state == kReceiving && newState == kPassive));
|
||||
_state = newState;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMEncodedFrame& frame)
|
||||
{
|
||||
if (_jitterBuffer.nack_mode() == kNoNack)
|
||||
{
|
||||
// Dual decoder mode has not been enabled.
|
||||
return;
|
||||
}
|
||||
// Update the dual receiver state
|
||||
if (frame.Complete() && frame.FrameType() == kVideoFrameKey)
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (State() == kWaitForPrimaryDecode &&
|
||||
frame.Complete() && !frame.MissingFrame())
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (frame.MissingFrame() || !frame.Complete())
|
||||
{
|
||||
// State was corrupted, enable dual receiver.
|
||||
UpdateState(kReceiving);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
101
webrtc/modules/video_coding/main/source/receiver.h
Normal file
101
webrtc/modules/video_coding/main/source/receiver.h
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "jitter_buffer.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "timing.h"
|
||||
#include "packet.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMEncodedFrame;
|
||||
|
||||
enum VCMNackStatus
|
||||
{
|
||||
kNackOk,
|
||||
kNackNeedMoreMemory,
|
||||
kNackKeyFrameRequest
|
||||
};
|
||||
|
||||
|
||||
enum VCMReceiverState
|
||||
{
|
||||
kReceiving,
|
||||
kPassive,
|
||||
kWaitForPrimaryDecode
|
||||
};
|
||||
|
||||
class VCMReceiver
|
||||
{
|
||||
public:
|
||||
VCMReceiver(VCMTiming& timing,
|
||||
TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId = -1,
|
||||
WebRtc_Word32 receiverId = -1,
|
||||
bool master = true);
|
||||
~VCMReceiver();
|
||||
|
||||
void Reset();
|
||||
WebRtc_Word32 Initialize();
|
||||
void UpdateRtt(WebRtc_UWord32 rtt);
|
||||
WebRtc_Word32 InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight);
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64& nextRenderTimeMs,
|
||||
bool renderTiming = true,
|
||||
VCMReceiver* dualReceiver = NULL);
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
WebRtc_Word32 ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate);
|
||||
WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
|
||||
WebRtc_UWord32 DiscardedPackets() const;
|
||||
|
||||
// NACK
|
||||
void SetNackMode(VCMNackMode nackMode);
|
||||
VCMNackMode NackMode() const;
|
||||
VCMNackStatus NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
|
||||
|
||||
// Dual decoder
|
||||
bool DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const;
|
||||
VCMReceiverState State() const;
|
||||
|
||||
private:
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
VCMEncodedFrame* FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
|
||||
void UpdateState(VCMReceiverState newState);
|
||||
void UpdateState(VCMEncodedFrame& frame);
|
||||
static WebRtc_Word32 GenerateReceiverId();
|
||||
|
||||
CriticalSectionWrapper* _critSect;
|
||||
WebRtc_Word32 _vcmId;
|
||||
TickTimeBase* _clock;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _master;
|
||||
VCMJitterBuffer _jitterBuffer;
|
||||
VCMTiming& _timing;
|
||||
VCMEvent& _renderWaitEvent;
|
||||
VCMReceiverState _state;
|
||||
|
||||
static WebRtc_Word32 _receiverIdCounter;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
214
webrtc/modules/video_coding/main/source/rtt_filter.cc
Normal file
214
webrtc/modules/video_coding/main/source/rtt_filter.cc
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "rtt_filter.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMRttFilter::VCMRttFilter(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId)
|
||||
:
|
||||
_vcmId(vcmId),
|
||||
_receiverId(receiverId),
|
||||
_filtFactMax(35),
|
||||
_jumpStdDevs(2.5),
|
||||
_driftStdDevs(3.5),
|
||||
_detectThreshold(kMaxDriftJumpCount)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMRttFilter&
|
||||
VCMRttFilter::operator=(const VCMRttFilter& rhs)
|
||||
{
|
||||
if (this != &rhs)
|
||||
{
|
||||
_gotNonZeroUpdate = rhs._gotNonZeroUpdate;
|
||||
_avgRtt = rhs._avgRtt;
|
||||
_varRtt = rhs._varRtt;
|
||||
_maxRtt = rhs._maxRtt;
|
||||
_filtFactCount = rhs._filtFactCount;
|
||||
_jumpCount = rhs._jumpCount;
|
||||
_driftCount = rhs._driftCount;
|
||||
memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
|
||||
memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::Reset()
|
||||
{
|
||||
_gotNonZeroUpdate = false;
|
||||
_avgRtt = 0;
|
||||
_varRtt = 0;
|
||||
_maxRtt = 0;
|
||||
_filtFactCount = 1;
|
||||
_jumpCount = 0;
|
||||
_driftCount = 0;
|
||||
memset(_jumpBuf, 0, kMaxDriftJumpCount);
|
||||
memset(_driftBuf, 0, kMaxDriftJumpCount);
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::Update(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
if (!_gotNonZeroUpdate)
|
||||
{
|
||||
if (rttMs == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_gotNonZeroUpdate = true;
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if (rttMs > 3000)
|
||||
{
|
||||
rttMs = 3000;
|
||||
}
|
||||
|
||||
double filtFactor = 0;
|
||||
if (_filtFactCount > 1)
|
||||
{
|
||||
filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
|
||||
}
|
||||
_filtFactCount++;
|
||||
if (_filtFactCount > _filtFactMax)
|
||||
{
|
||||
// This prevents filtFactor from going above
|
||||
// (_filtFactMax - 1) / _filtFactMax,
|
||||
// e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
|
||||
_filtFactCount = _filtFactMax;
|
||||
}
|
||||
double oldAvg = _avgRtt;
|
||||
double oldVar = _varRtt;
|
||||
_avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
|
||||
_varRtt = filtFactor * _varRtt + (1 - filtFactor) *
|
||||
(rttMs - _avgRtt) * (rttMs - _avgRtt);
|
||||
_maxRtt = VCM_MAX(rttMs, _maxRtt);
|
||||
if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
|
||||
{
|
||||
// In some cases we don't want to update the statistics
|
||||
_avgRtt = oldAvg;
|
||||
_varRtt = oldVar;
|
||||
}
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"RttFilter Update: sample=%u avgRtt=%f varRtt=%f maxRtt=%u",
|
||||
rttMs, _avgRtt, _varRtt, _maxRtt);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMRttFilter::JumpDetection(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
double diffFromAvg = _avgRtt - rttMs;
|
||||
if (abs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
|
||||
{
|
||||
int diffSign = (diffFromAvg >= 0) ? 1 : -1;
|
||||
int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
|
||||
if (diffSign != jumpCountSign)
|
||||
{
|
||||
// Since the signs differ the samples currently
|
||||
// in the buffer is useless as they represent a
|
||||
// jump in a different direction.
|
||||
_jumpCount = 0;
|
||||
}
|
||||
if (abs(_jumpCount) < kMaxDriftJumpCount)
|
||||
{
|
||||
// Update the buffer used for the short time
|
||||
// statistics.
|
||||
// The sign of the diff is used for updating the counter since
|
||||
// we want to use the same buffer for keeping track of when
|
||||
// the RTT jumps down and up.
|
||||
_jumpBuf[abs(_jumpCount)] = rttMs;
|
||||
_jumpCount += diffSign;
|
||||
}
|
||||
if (abs(_jumpCount) >= _detectThreshold)
|
||||
{
|
||||
// Detected an RTT jump
|
||||
ShortRttFilter(_jumpBuf, abs(_jumpCount));
|
||||
_filtFactCount = _detectThreshold + 1;
|
||||
_jumpCount = 0;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Detected an RTT jump");
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_jumpCount = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMRttFilter::DriftDetection(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
|
||||
{
|
||||
if (_driftCount < kMaxDriftJumpCount)
|
||||
{
|
||||
// Update the buffer used for the short time
|
||||
// statistics.
|
||||
_driftBuf[_driftCount] = rttMs;
|
||||
_driftCount++;
|
||||
}
|
||||
if (_driftCount >= _detectThreshold)
|
||||
{
|
||||
// Detected an RTT drift
|
||||
ShortRttFilter(_driftBuf, _driftCount);
|
||||
_filtFactCount = _detectThreshold + 1;
|
||||
_driftCount = 0;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Detected an RTT drift");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_driftCount = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length)
|
||||
{
|
||||
if (length == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_maxRtt = 0;
|
||||
_avgRtt = 0;
|
||||
for (WebRtc_UWord32 i=0; i < length; i++)
|
||||
{
|
||||
if (buf[i] > _maxRtt)
|
||||
{
|
||||
_maxRtt = buf[i];
|
||||
}
|
||||
_avgRtt += buf[i];
|
||||
}
|
||||
_avgRtt = _avgRtt / static_cast<double>(length);
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMRttFilter::RttMs() const
|
||||
{
|
||||
return static_cast<WebRtc_UWord32>(_maxRtt + 0.5);
|
||||
}
|
||||
|
||||
}
|
||||
70
webrtc/modules/video_coding/main/source/rtt_filter.h
Normal file
70
webrtc/modules/video_coding/main/source/rtt_filter.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMRttFilter
|
||||
{
|
||||
public:
|
||||
VCMRttFilter(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
|
||||
|
||||
VCMRttFilter& operator=(const VCMRttFilter& rhs);
|
||||
|
||||
// Resets the filter.
|
||||
void Reset();
|
||||
// Updates the filter with a new sample.
|
||||
void Update(WebRtc_UWord32 rttMs);
|
||||
// A getter function for the current RTT level in ms.
|
||||
WebRtc_UWord32 RttMs() const;
|
||||
|
||||
private:
|
||||
// The size of the drift and jump memory buffers
|
||||
// and thus also the detection threshold for these
|
||||
// detectors in number of samples.
|
||||
enum { kMaxDriftJumpCount = 5 };
|
||||
// Detects RTT jumps by comparing the difference between
|
||||
// samples and average to the standard deviation.
|
||||
// Returns true if the long time statistics should be updated
|
||||
// and false otherwise
|
||||
bool JumpDetection(WebRtc_UWord32 rttMs);
|
||||
// Detects RTT drifts by comparing the difference between
|
||||
// max and average to the standard deviation.
|
||||
// Returns true if the long time statistics should be updated
|
||||
// and false otherwise
|
||||
bool DriftDetection(WebRtc_UWord32 rttMs);
|
||||
// Computes the short time average and maximum of the vector buf.
|
||||
void ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length);
|
||||
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _gotNonZeroUpdate;
|
||||
double _avgRtt;
|
||||
double _varRtt;
|
||||
WebRtc_UWord32 _maxRtt;
|
||||
WebRtc_UWord32 _filtFactCount;
|
||||
const WebRtc_UWord32 _filtFactMax;
|
||||
const double _jumpStdDevs;
|
||||
const double _driftStdDevs;
|
||||
WebRtc_Word32 _jumpCount;
|
||||
WebRtc_Word32 _driftCount;
|
||||
const WebRtc_Word32 _detectThreshold;
|
||||
WebRtc_UWord32 _jumpBuf[kMaxDriftJumpCount];
|
||||
WebRtc_UWord32 _driftBuf[kMaxDriftJumpCount];
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
595
webrtc/modules/video_coding/main/source/session_info.cc
Normal file
595
webrtc/modules/video_coding/main/source/session_info.cc
Normal file
@ -0,0 +1,595 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/session_info.h"
|
||||
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMSessionInfo::VCMSessionInfo()
|
||||
: session_nack_(false),
|
||||
complete_(false),
|
||||
decodable_(false),
|
||||
frame_type_(kVideoFrameDelta),
|
||||
previous_frame_loss_(false),
|
||||
packets_(),
|
||||
empty_seq_num_low_(-1),
|
||||
empty_seq_num_high_(-1),
|
||||
packets_not_decodable_(0) {
|
||||
}
|
||||
|
||||
void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
|
||||
const uint8_t* new_base_ptr) {
|
||||
for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it)
|
||||
if ((*it).dataPtr != NULL) {
|
||||
assert(old_base_ptr != NULL && new_base_ptr != NULL);
|
||||
(*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
int VCMSessionInfo::LowSequenceNumber() const {
|
||||
if (packets_.empty())
|
||||
return empty_seq_num_low_;
|
||||
return packets_.front().seqNum;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::HighSequenceNumber() const {
|
||||
if (packets_.empty())
|
||||
return empty_seq_num_high_;
|
||||
return LatestSequenceNumber(packets_.back().seqNum, empty_seq_num_high_,
|
||||
NULL);
|
||||
}
|
||||
|
||||
int VCMSessionInfo::PictureId() const {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().codecSpecificHeader.codec != kRTPVideoVP8)
|
||||
return kNoPictureId;
|
||||
return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::TemporalId() const {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().codecSpecificHeader.codec != kRTPVideoVP8)
|
||||
return kNoTemporalIdx;
|
||||
return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::LayerSync() const {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().codecSpecificHeader.codec != kRTPVideoVP8)
|
||||
return false;
|
||||
return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::Tl0PicId() const {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().codecSpecificHeader.codec != kRTPVideoVP8)
|
||||
return kNoTl0PicIdx;
|
||||
return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::NonReference() const {
|
||||
if (packets_.empty() ||
|
||||
packets_.front().codecSpecificHeader.codec != kRTPVideoVP8)
|
||||
return false;
|
||||
return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::Reset() {
|
||||
session_nack_ = false;
|
||||
complete_ = false;
|
||||
decodable_ = false;
|
||||
frame_type_ = kVideoFrameDelta;
|
||||
previous_frame_loss_ = false;
|
||||
packets_.clear();
|
||||
empty_seq_num_low_ = -1;
|
||||
empty_seq_num_high_ = -1;
|
||||
packets_not_decodable_ = 0;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::SessionLength() const {
|
||||
int length = 0;
|
||||
for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
|
||||
length += (*it).sizeBytes;
|
||||
return length;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
|
||||
PacketIterator packet_it) {
|
||||
VCMPacket& packet = *packet_it;
|
||||
PacketIterator it;
|
||||
|
||||
int packet_size = packet.sizeBytes;
|
||||
packet_size += (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
|
||||
|
||||
// Calculate the offset into the frame buffer for this packet.
|
||||
int offset = 0;
|
||||
for (it = packets_.begin(); it != packet_it; ++it)
|
||||
offset += (*it).sizeBytes;
|
||||
|
||||
// Set the data pointer to pointing to the start of this packet in the
|
||||
// frame buffer.
|
||||
const uint8_t* data = packet.dataPtr;
|
||||
packet.dataPtr = frame_buffer + offset;
|
||||
packet.sizeBytes = packet_size;
|
||||
|
||||
ShiftSubsequentPackets(packet_it, packet_size);
|
||||
|
||||
const unsigned char startCode[] = {0, 0, 0, 1};
|
||||
if (packet.insertStartCode) {
|
||||
memcpy(const_cast<uint8_t*>(packet.dataPtr), startCode,
|
||||
kH264StartCodeLengthBytes);
|
||||
}
|
||||
memcpy(const_cast<uint8_t*>(packet.dataPtr
|
||||
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0)),
|
||||
data,
|
||||
packet.sizeBytes);
|
||||
|
||||
return packet_size;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::ShiftSubsequentPackets(PacketIterator it,
|
||||
int steps_to_shift) {
|
||||
++it;
|
||||
if (it == packets_.end())
|
||||
return;
|
||||
uint8_t* first_packet_ptr = const_cast<WebRtc_UWord8*>((*it).dataPtr);
|
||||
int shift_length = 0;
|
||||
// Calculate the total move length and move the data pointers in advance.
|
||||
for (; it != packets_.end(); ++it) {
|
||||
shift_length += (*it).sizeBytes;
|
||||
if ((*it).dataPtr != NULL)
|
||||
(*it).dataPtr += steps_to_shift;
|
||||
}
|
||||
memmove(first_packet_ptr + steps_to_shift, first_packet_ptr, shift_length);
|
||||
}
|
||||
|
||||
void VCMSessionInfo::UpdateCompleteSession() {
|
||||
if (packets_.front().isFirstPacket && packets_.back().markerBit) {
|
||||
// Do we have all the packets in this session?
|
||||
bool complete_session = true;
|
||||
PacketIterator it = packets_.begin();
|
||||
PacketIterator prev_it = it;
|
||||
++it;
|
||||
for (; it != packets_.end(); ++it) {
|
||||
if (!InSequence(it, prev_it)) {
|
||||
complete_session = false;
|
||||
break;
|
||||
}
|
||||
prev_it = it;
|
||||
}
|
||||
complete_ = complete_session;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMSessionInfo::UpdateDecodableSession(int rttMs) {
|
||||
// Irrelevant if session is already complete or decodable
|
||||
if (complete_ || decodable_)
|
||||
return;
|
||||
// First iteration - do nothing
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::complete() const {
|
||||
return complete_;
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::decodable() const {
|
||||
return decodable_;
|
||||
}
|
||||
|
||||
// Find the end of the NAL unit which the packet pointed to by |packet_it|
|
||||
// belongs to. Returns an iterator to the last packet of the frame if the end
|
||||
// of the NAL unit wasn't found.
|
||||
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
|
||||
PacketIterator packet_it) const {
|
||||
if ((*packet_it).completeNALU == kNaluEnd ||
|
||||
(*packet_it).completeNALU == kNaluComplete) {
|
||||
return packet_it;
|
||||
}
|
||||
// Find the end of the NAL unit.
|
||||
for (; packet_it != packets_.end(); ++packet_it) {
|
||||
if (((*packet_it).completeNALU == kNaluComplete &&
|
||||
(*packet_it).sizeBytes > 0) ||
|
||||
// Found next NALU.
|
||||
(*packet_it).completeNALU == kNaluStart)
|
||||
return --packet_it;
|
||||
if ((*packet_it).completeNALU == kNaluEnd)
|
||||
return packet_it;
|
||||
}
|
||||
// The end wasn't found.
|
||||
return --packet_it;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::DeletePacketData(PacketIterator start,
|
||||
PacketIterator end) {
|
||||
int bytes_to_delete = 0; // The number of bytes to delete.
|
||||
PacketIterator packet_after_end = end;
|
||||
++packet_after_end;
|
||||
|
||||
// Get the number of bytes to delete.
|
||||
// Clear the size of these packets.
|
||||
for (PacketIterator it = start; it != packet_after_end; ++it) {
|
||||
bytes_to_delete += (*it).sizeBytes;
|
||||
(*it).sizeBytes = 0;
|
||||
(*it).dataPtr = NULL;
|
||||
++packets_not_decodable_;
|
||||
}
|
||||
if (bytes_to_delete > 0)
|
||||
ShiftSubsequentPackets(end, -bytes_to_delete);
|
||||
return bytes_to_delete;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::BuildVP8FragmentationHeader(
|
||||
uint8_t* frame_buffer,
|
||||
int frame_buffer_length,
|
||||
RTPFragmentationHeader* fragmentation) {
|
||||
int new_length = 0;
|
||||
// Allocate space for max number of partitions
|
||||
fragmentation->VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
|
||||
fragmentation->fragmentationVectorSize = 0;
|
||||
memset(fragmentation->fragmentationLength, 0,
|
||||
kMaxVP8Partitions * sizeof(WebRtc_UWord32));
|
||||
if (packets_.empty())
|
||||
return new_length;
|
||||
PacketIterator it = FindNextPartitionBeginning(packets_.begin(),
|
||||
&packets_not_decodable_);
|
||||
while (it != packets_.end()) {
|
||||
const int partition_id =
|
||||
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
|
||||
PacketIterator partition_end = FindPartitionEnd(it);
|
||||
fragmentation->fragmentationOffset[partition_id] =
|
||||
(*it).dataPtr - frame_buffer;
|
||||
assert(fragmentation->fragmentationOffset[partition_id] <
|
||||
static_cast<WebRtc_UWord32>(frame_buffer_length));
|
||||
fragmentation->fragmentationLength[partition_id] =
|
||||
(*partition_end).dataPtr + (*partition_end).sizeBytes - (*it).dataPtr;
|
||||
assert(fragmentation->fragmentationLength[partition_id] <=
|
||||
static_cast<WebRtc_UWord32>(frame_buffer_length));
|
||||
new_length += fragmentation->fragmentationLength[partition_id];
|
||||
++partition_end;
|
||||
it = FindNextPartitionBeginning(partition_end, &packets_not_decodable_);
|
||||
if (partition_id + 1 > fragmentation->fragmentationVectorSize)
|
||||
fragmentation->fragmentationVectorSize = partition_id + 1;
|
||||
}
|
||||
// Set all empty fragments to start where the previous fragment ends,
|
||||
// and have zero length.
|
||||
if (fragmentation->fragmentationLength[0] == 0)
|
||||
fragmentation->fragmentationOffset[0] = 0;
|
||||
for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
|
||||
if (fragmentation->fragmentationLength[i] == 0)
|
||||
fragmentation->fragmentationOffset[i] =
|
||||
fragmentation->fragmentationOffset[i - 1] +
|
||||
fragmentation->fragmentationLength[i - 1];
|
||||
assert(i == 0 ||
|
||||
fragmentation->fragmentationOffset[i] >=
|
||||
fragmentation->fragmentationOffset[i - 1]);
|
||||
}
|
||||
assert(new_length <= frame_buffer_length);
|
||||
return new_length;
|
||||
}
|
||||
|
||||
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
|
||||
PacketIterator it, int* packets_skipped) const {
|
||||
while (it != packets_.end()) {
|
||||
if ((*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition) {
|
||||
return it;
|
||||
} else if (packets_skipped != NULL) {
|
||||
// This packet belongs to a partition with a previous loss and can't
|
||||
// be decoded.
|
||||
++(*packets_skipped);
|
||||
}
|
||||
++it;
|
||||
}
|
||||
return it;
|
||||
}
|
||||
|
||||
VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
|
||||
PacketIterator it) const {
|
||||
assert((*it).codec == kVideoCodecVP8);
|
||||
PacketIterator prev_it = it;
|
||||
const int partition_id =
|
||||
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
|
||||
while (it != packets_.end()) {
|
||||
bool beginning =
|
||||
(*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition;
|
||||
int current_partition_id =
|
||||
(*it).codecSpecificHeader.codecHeader.VP8.partitionId;
|
||||
bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
|
||||
if (packet_loss_found ||
|
||||
(beginning && current_partition_id != partition_id)) {
|
||||
// Missing packet, the previous packet was the last in sequence.
|
||||
return prev_it;
|
||||
}
|
||||
prev_it = it;
|
||||
++it;
|
||||
}
|
||||
return prev_it;
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
|
||||
const PacketIterator& prev_packet_it) {
|
||||
// If the two iterators are pointing to the same packet they are considered
|
||||
// to be in sequence.
|
||||
return (packet_it == prev_packet_it ||
|
||||
(static_cast<WebRtc_UWord16>((*prev_packet_it).seqNum + 1) ==
|
||||
(*packet_it).seqNum));
|
||||
}
|
||||
|
||||
int VCMSessionInfo::MakeDecodable() {
|
||||
int return_length = 0;
|
||||
if (packets_.empty()) {
|
||||
return 0;
|
||||
}
|
||||
PacketIterator it = packets_.begin();
|
||||
// Make sure we remove the first NAL unit if it's not decodable.
|
||||
if ((*it).completeNALU == kNaluIncomplete ||
|
||||
(*it).completeNALU == kNaluEnd) {
|
||||
PacketIterator nalu_end = FindNaluEnd(it);
|
||||
return_length += DeletePacketData(it, nalu_end);
|
||||
it = nalu_end;
|
||||
}
|
||||
PacketIterator prev_it = it;
|
||||
// Take care of the rest of the NAL units.
|
||||
for (; it != packets_.end(); ++it) {
|
||||
bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
|
||||
(*it).completeNALU == kNaluComplete);
|
||||
if (!start_of_nalu && !InSequence(it, prev_it)) {
|
||||
// Found a sequence number gap due to packet loss.
|
||||
PacketIterator nalu_end = FindNaluEnd(it);
|
||||
return_length += DeletePacketData(it, nalu_end);
|
||||
it = nalu_end;
|
||||
}
|
||||
prev_it = it;
|
||||
}
|
||||
return return_length;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::BuildHardNackList(int* seq_num_list,
|
||||
int seq_num_list_length) {
|
||||
if (NULL == seq_num_list || seq_num_list_length < 1) {
|
||||
return -1;
|
||||
}
|
||||
if (packets_.empty()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Find end point (index of entry equals the sequence number of the first
|
||||
// packet).
|
||||
int index = 0;
|
||||
for (; index < seq_num_list_length; ++index) {
|
||||
if (seq_num_list[index] == packets_.front().seqNum) {
|
||||
seq_num_list[index] = -1;
|
||||
++index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Zero out between the first entry and the end point.
|
||||
PacketIterator it = packets_.begin();
|
||||
PacketIterator prev_it = it;
|
||||
++it;
|
||||
while (it != packets_.end() && index < seq_num_list_length) {
|
||||
if (!InSequence(it, prev_it)) {
|
||||
// Found a sequence number gap due to packet loss.
|
||||
index += PacketsMissing(it, prev_it);
|
||||
session_nack_ = true;
|
||||
}
|
||||
seq_num_list[index] = -1;
|
||||
++index;
|
||||
prev_it = it;
|
||||
++it;
|
||||
}
|
||||
if (!packets_.front().isFirstPacket)
|
||||
session_nack_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::BuildSoftNackList(int* seq_num_list,
|
||||
int seq_num_list_length,
|
||||
int rtt_ms) {
|
||||
if (NULL == seq_num_list || seq_num_list_length < 1) {
|
||||
return -1;
|
||||
}
|
||||
if (packets_.empty() && empty_seq_num_low_ == -1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
int low_seq_num = (packets_.empty()) ? empty_seq_num_low_:
|
||||
packets_.front().seqNum;
|
||||
// Find entrance point (index of entry equals the sequence number of the
|
||||
// first packet).
|
||||
for (; index < seq_num_list_length; ++index) {
|
||||
if (seq_num_list[index] == low_seq_num) {
|
||||
seq_num_list[index] = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(mikhal): 1. Update score based on RTT value 2. Add partition data.
|
||||
// Use the previous available.
|
||||
bool base_available = false;
|
||||
if ((index > 0) && (seq_num_list[index] == -1)) {
|
||||
// Found first packet, for now let's go only one back.
|
||||
if ((seq_num_list[index - 1] == -1) || (seq_num_list[index - 1] == -2)) {
|
||||
// This is indeed the first packet, as previous packet was populated.
|
||||
base_available = true;
|
||||
}
|
||||
}
|
||||
bool allow_nack = ((packets_.size() > 0 && !packets_.front().isFirstPacket)
|
||||
|| !base_available);
|
||||
|
||||
// Zero out between first entry and end point.
|
||||
|
||||
int media_high_seq_num;
|
||||
if (HaveLastPacket()) {
|
||||
media_high_seq_num = packets_.back().seqNum;
|
||||
} else {
|
||||
// Estimation.
|
||||
if (empty_seq_num_low_ >= 0) {
|
||||
// Assuming empty packets have later sequence numbers than media packets.
|
||||
media_high_seq_num = empty_seq_num_low_ - 1;
|
||||
} else {
|
||||
// Since this frame doesn't have the marker bit we can assume it should
|
||||
// contain at least one more packet.
|
||||
media_high_seq_num = static_cast<uint16_t>(packets_.back().seqNum + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Compute session/packet scores and thresholds:
|
||||
// based on RTT and layer info (when available).
|
||||
float nack_score_threshold = 0.25f;
|
||||
float layer_score = TemporalId() > 0 ? 0.0f : 1.0f;
|
||||
float rtt_score = 1.0f;
|
||||
float score_multiplier = rtt_score * layer_score;
|
||||
// Zero out between first entry and end point.
|
||||
if (!packets_.empty()) {
|
||||
PacketIterator it = packets_.begin();
|
||||
PacketIterator prev_it = it;
|
||||
++index;
|
||||
++it;
|
||||
// TODO(holmer): Rewrite this in a way which better makes use of the list.
|
||||
while (it != packets_.end() && index < seq_num_list_length) {
|
||||
// Only process media packet sequence numbers.
|
||||
if (LatestSequenceNumber((*it).seqNum, media_high_seq_num, NULL) ==
|
||||
(*it).seqNum && (*it).seqNum != media_high_seq_num)
|
||||
break;
|
||||
if (!InSequence(it, prev_it)) {
|
||||
// Found a sequence number gap due to packet loss.
|
||||
int num_lost = PacketsMissing(it, prev_it);
|
||||
for (int i = 0 ; i < num_lost; ++i) {
|
||||
// Compute score of the packet.
|
||||
float score = 1.0f;
|
||||
// Multiply internal score (packet) by score multiplier.
|
||||
score *= score_multiplier;
|
||||
if (score > nack_score_threshold) {
|
||||
allow_nack = true;
|
||||
} else {
|
||||
seq_num_list[index] = -1;
|
||||
}
|
||||
++index;
|
||||
}
|
||||
}
|
||||
seq_num_list[index] = -1;
|
||||
++index;
|
||||
prev_it = it;
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
// Empty packets follow the data packets, and therefore have a higher
|
||||
// sequence number. We do not want to NACK empty packets.
|
||||
if ((empty_seq_num_low_ != -1) && (empty_seq_num_high_ != -1) &&
|
||||
(index < seq_num_list_length)) {
|
||||
// First make sure that we are at least at the minimum value (if not we are
|
||||
// missing last packet(s)).
|
||||
while (seq_num_list[index] < empty_seq_num_low_ &&
|
||||
index < seq_num_list_length) {
|
||||
++index;
|
||||
}
|
||||
|
||||
// Mark empty packets.
|
||||
while (seq_num_list[index] <= empty_seq_num_high_ &&
|
||||
index < seq_num_list_length) {
|
||||
seq_num_list[index] = -2;
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
session_nack_ = allow_nack;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::PacketsMissing(const PacketIterator& packet_it,
|
||||
const PacketIterator& prev_packet_it) {
|
||||
if (packet_it == prev_packet_it)
|
||||
return 0;
|
||||
if ((*prev_packet_it).seqNum > (*packet_it).seqNum) // Wrap.
|
||||
return static_cast<WebRtc_UWord16>(
|
||||
static_cast<WebRtc_UWord32>((*packet_it).seqNum + 0x10000) -
|
||||
(*prev_packet_it).seqNum) - 1;
|
||||
else
|
||||
return (*packet_it).seqNum - (*prev_packet_it).seqNum - 1;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMSessionInfo::HaveLastPacket() const {
|
||||
return (!packets_.empty() && packets_.back().markerBit);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMSessionInfo::session_nack() const {
|
||||
return session_nack_;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
uint8_t* frame_buffer,
|
||||
bool enable_decodable_state,
|
||||
int rtt_ms) {
|
||||
// Check if this is first packet (only valid for some codecs)
|
||||
if (packet.isFirstPacket) {
|
||||
// The first packet in a frame signals the frame type.
|
||||
frame_type_ = packet.frameType;
|
||||
} else if (frame_type_ == kFrameEmpty && packet.frameType != kFrameEmpty) {
|
||||
// Update the frame type with the first media packet.
|
||||
frame_type_ = packet.frameType;
|
||||
}
|
||||
if (packet.frameType == kFrameEmpty) {
|
||||
// Update sequence number of an empty packet.
|
||||
// Only media packets are inserted into the packet list.
|
||||
InformOfEmptyPacket(packet.seqNum);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (packets_.size() == kMaxPacketsInSession)
|
||||
return -1;
|
||||
|
||||
// Find the position of this packet in the packet list in sequence number
|
||||
// order and insert it. Loop over the list in reverse order.
|
||||
ReversePacketIterator rit = packets_.rbegin();
|
||||
for (; rit != packets_.rend(); ++rit)
|
||||
if (LatestSequenceNumber((*rit).seqNum, packet.seqNum, NULL) ==
|
||||
packet.seqNum)
|
||||
break;
|
||||
|
||||
// Check for duplicate packets.
|
||||
if (rit != packets_.rend() &&
|
||||
(*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
|
||||
return -2;
|
||||
|
||||
// The insert operation invalidates the iterator |rit|.
|
||||
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
|
||||
|
||||
int returnLength = InsertBuffer(frame_buffer, packet_list_it);
|
||||
UpdateCompleteSession();
|
||||
if (enable_decodable_state)
|
||||
UpdateDecodableSession(rtt_ms);
|
||||
return returnLength;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
|
||||
// Empty packets may be FEC or filler packets. They are sequential and
|
||||
// follow the data packets, therefore, we should only keep track of the high
|
||||
// and low sequence numbers and may assume that the packets in between are
|
||||
// empty packets belonging to the same frame (timestamp).
|
||||
empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_,
|
||||
NULL);
|
||||
if (empty_seq_num_low_ == -1 ||
|
||||
LatestSequenceNumber(seq_num, empty_seq_num_low_, NULL) ==
|
||||
empty_seq_num_low_)
|
||||
empty_seq_num_low_ = seq_num;
|
||||
}
|
||||
|
||||
int VCMSessionInfo::packets_not_decodable() const {
|
||||
return packets_not_decodable_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
134
webrtc/modules/video_coding/main/source/session_info.h
Normal file
134
webrtc/modules/video_coding/main/source/session_info.h
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
#include "typedefs.h" // NOLINT(build/include)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VCMSessionInfo {
|
||||
public:
|
||||
VCMSessionInfo();
|
||||
|
||||
void UpdateDataPointers(const uint8_t* old_base_ptr,
|
||||
const uint8_t* new_base_ptr);
|
||||
// NACK - Building the NACK lists.
|
||||
// Build hard NACK list: Zero out all entries in list up to and including
|
||||
// _lowSeqNum.
|
||||
int BuildHardNackList(int* seq_num_list,
|
||||
int seq_num_list_length);
|
||||
|
||||
// Build soft NACK list: Zero out only a subset of the packets, discard
|
||||
// empty packets.
|
||||
int BuildSoftNackList(int* seq_num_list,
|
||||
int seq_num_list_length,
|
||||
int rtt_ms);
|
||||
void Reset();
|
||||
int InsertPacket(const VCMPacket& packet,
|
||||
uint8_t* frame_buffer,
|
||||
bool enable_decodable_state,
|
||||
int rtt_ms);
|
||||
bool complete() const;
|
||||
bool decodable() const;
|
||||
|
||||
// Builds fragmentation headers for VP8, each fragment being a decodable
|
||||
// VP8 partition. Returns the total number of bytes which are decodable. Is
|
||||
// used instead of MakeDecodable for VP8.
|
||||
int BuildVP8FragmentationHeader(uint8_t* frame_buffer,
|
||||
int frame_buffer_length,
|
||||
RTPFragmentationHeader* fragmentation);
|
||||
|
||||
// Makes the frame decodable. I.e., only contain decodable NALUs. All
|
||||
// non-decodable NALUs will be deleted and packets will be moved to in
|
||||
// memory to remove any empty space.
|
||||
// Returns the number of bytes deleted from the session.
|
||||
int MakeDecodable();
|
||||
int SessionLength() const;
|
||||
bool HaveLastPacket() const;
|
||||
bool session_nack() const;
|
||||
webrtc::FrameType FrameType() const { return frame_type_; }
|
||||
int LowSequenceNumber() const;
|
||||
|
||||
// Returns highest sequence number, media or empty.
|
||||
int HighSequenceNumber() const;
|
||||
int PictureId() const;
|
||||
int TemporalId() const;
|
||||
bool LayerSync() const;
|
||||
int Tl0PicId() const;
|
||||
bool NonReference() const;
|
||||
void SetPreviousFrameLoss() { previous_frame_loss_ = true; }
|
||||
bool PreviousFrameLoss() const { return previous_frame_loss_; }
|
||||
|
||||
// The number of packets discarded because the decoder can't make use of
|
||||
// them.
|
||||
int packets_not_decodable() const;
|
||||
|
||||
private:
|
||||
enum { kMaxVP8Partitions = 9 };
|
||||
|
||||
typedef std::list<VCMPacket> PacketList;
|
||||
typedef PacketList::iterator PacketIterator;
|
||||
typedef PacketList::const_iterator PacketIteratorConst;
|
||||
typedef PacketList::reverse_iterator ReversePacketIterator;
|
||||
|
||||
void InformOfEmptyPacket(uint16_t seq_num);
|
||||
|
||||
// Finds the packet of the beginning of the next VP8 partition. If
|
||||
// none is found the returned iterator points to |packets_.end()|.
|
||||
// |it| is expected to point to the last packet of the previous partition,
|
||||
// or to the first packet of the frame. |packets_skipped| is incremented
|
||||
// for each packet found which doesn't have the beginning bit set.
|
||||
PacketIterator FindNextPartitionBeginning(PacketIterator it,
|
||||
int* packets_skipped) const;
|
||||
|
||||
// Returns an iterator pointing to the last packet of the partition pointed to
|
||||
// by |it|.
|
||||
PacketIterator FindPartitionEnd(PacketIterator it) const;
|
||||
static bool InSequence(const PacketIterator& it,
|
||||
const PacketIterator& prev_it);
|
||||
static int PacketsMissing(const PacketIterator& packet_it,
|
||||
const PacketIterator& prev_packet_it);
|
||||
int InsertBuffer(uint8_t* frame_buffer,
|
||||
PacketIterator packetIterator);
|
||||
void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
|
||||
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
|
||||
// Deletes the data of all packets between |start| and |end|, inclusively.
|
||||
// Note that this function doesn't delete the actual packets.
|
||||
int DeletePacketData(PacketIterator start,
|
||||
PacketIterator end);
|
||||
void UpdateCompleteSession();
|
||||
|
||||
// When enabled, determine if session is decodable, i.e. incomplete but
|
||||
// would be sent to the decoder.
|
||||
void UpdateDecodableSession(int rtt_ms);
|
||||
|
||||
// If this session has been NACKed by the jitter buffer.
|
||||
bool session_nack_;
|
||||
bool complete_;
|
||||
bool decodable_;
|
||||
webrtc::FrameType frame_type_;
|
||||
bool previous_frame_loss_;
|
||||
// Packets in this frame.
|
||||
PacketList packets_;
|
||||
int empty_seq_num_low_;
|
||||
int empty_seq_num_high_;
|
||||
// Number of packets discarded because the decoder can't use them.
|
||||
int packets_not_decodable_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
931
webrtc/modules/video_coding/main/source/session_info_unittest.cc
Normal file
931
webrtc/modules/video_coding/main/source/session_info_unittest.cc
Normal file
@ -0,0 +1,931 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "modules/interface/module_common_types.h"
|
||||
#include "modules/video_coding/main/source/packet.h"
|
||||
#include "modules/video_coding/main/source/session_info.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class TestSessionInfo : public ::testing::Test {
|
||||
protected:
|
||||
enum { kPacketBufferSize = 10 };
|
||||
enum { kFrameBufferSize = 10 * kPacketBufferSize };
|
||||
|
||||
virtual void SetUp() {
|
||||
memset(packet_buffer_, 0, kPacketBufferSize);
|
||||
memset(frame_buffer_, 0, kFrameBufferSize);
|
||||
session_.Reset();
|
||||
packet_.Reset();
|
||||
packet_.frameType = kVideoFrameDelta;
|
||||
packet_.sizeBytes = kPacketBufferSize;
|
||||
packet_.dataPtr = packet_buffer_;
|
||||
packet_.seqNum = 0;
|
||||
packet_.timestamp = 0;
|
||||
}
|
||||
|
||||
void FillPacket(uint8_t start_value) {
|
||||
for (int i = 0; i < kPacketBufferSize; ++i)
|
||||
packet_buffer_[i] = start_value + i;
|
||||
}
|
||||
|
||||
void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
|
||||
for (int j = 0; j < kPacketBufferSize; ++j) {
|
||||
ASSERT_EQ(start_value + j, start_ptr[j]);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t packet_buffer_[kPacketBufferSize];
|
||||
uint8_t frame_buffer_[kFrameBufferSize];
|
||||
VCMSessionInfo session_;
|
||||
VCMPacket packet_;
|
||||
};
|
||||
|
||||
class TestVP8Partitions : public TestSessionInfo {
|
||||
protected:
|
||||
enum { kMaxVP8Partitions = 9 };
|
||||
|
||||
virtual void SetUp() {
|
||||
TestSessionInfo::SetUp();
|
||||
vp8_header_ = &packet_header_.type.Video.codecHeader.VP8;
|
||||
packet_header_.frameType = kVideoFrameDelta;
|
||||
packet_header_.type.Video.codec = kRTPVideoVP8;
|
||||
vp8_header_->InitRTPVideoHeaderVP8();
|
||||
fragmentation_.VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
|
||||
}
|
||||
|
||||
bool VerifyPartition(int partition_id,
|
||||
int packets_expected,
|
||||
int start_value) {
|
||||
EXPECT_EQ(static_cast<uint32_t>(packets_expected * kPacketBufferSize),
|
||||
fragmentation_.fragmentationLength[partition_id]);
|
||||
for (int i = 0; i < packets_expected; ++i) {
|
||||
int packet_index = fragmentation_.fragmentationOffset[partition_id] +
|
||||
i * kPacketBufferSize;
|
||||
if (packet_index + kPacketBufferSize > kFrameBufferSize)
|
||||
return false;
|
||||
VerifyPacket(frame_buffer_ + packet_index, start_value + i);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtcRTPHeader packet_header_;
|
||||
RTPVideoHeaderVP8* vp8_header_;
|
||||
RTPFragmentationHeader fragmentation_;
|
||||
};
|
||||
|
||||
class TestNalUnits : public TestSessionInfo {
|
||||
protected:
|
||||
virtual void SetUp() {
|
||||
TestSessionInfo::SetUp();
|
||||
packet_.codec = kVideoCodecVP8;
|
||||
}
|
||||
|
||||
bool VerifyNalu(int offset, int packets_expected, int start_value) {
|
||||
EXPECT_GE(session_.SessionLength(),
|
||||
packets_expected * kPacketBufferSize);
|
||||
for (int i = 0; i < packets_expected; ++i) {
|
||||
int packet_index = offset * kPacketBufferSize + i * kPacketBufferSize;
|
||||
VerifyPacket(frame_buffer_ + packet_index, start_value + i);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class TestNackList : public TestSessionInfo {
|
||||
protected:
|
||||
enum { kMaxSeqNumListLength = 30 };
|
||||
|
||||
virtual void SetUp() {
|
||||
TestSessionInfo::SetUp();
|
||||
seq_num_list_length_ = 0;
|
||||
memset(seq_num_list_, 0, sizeof(seq_num_list_));
|
||||
}
|
||||
|
||||
void BuildSeqNumList(uint16_t low,
|
||||
uint16_t high) {
|
||||
int i = 0;
|
||||
while (low != high + 1) {
|
||||
EXPECT_LT(i, kMaxSeqNumListLength);
|
||||
if (i >= kMaxSeqNumListLength) {
|
||||
seq_num_list_length_ = kMaxSeqNumListLength;
|
||||
return;
|
||||
}
|
||||
seq_num_list_[i] = low;
|
||||
low++;
|
||||
i++;
|
||||
}
|
||||
seq_num_list_length_ = i;
|
||||
}
|
||||
|
||||
void VerifyAll(int value) {
|
||||
for (int i = 0; i < seq_num_list_length_; ++i)
|
||||
EXPECT_EQ(seq_num_list_[i], value);
|
||||
}
|
||||
|
||||
int seq_num_list_[kMaxSeqNumListLength];
|
||||
int seq_num_list_length_;
|
||||
};
|
||||
|
||||
TEST_F(TestSessionInfo, TestSimpleAPIs) {
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.seqNum = 0xFFFE;
|
||||
packet_.sizeBytes = kPacketBufferSize;
|
||||
packet_.frameType = kVideoFrameKey;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(kPacketBufferSize,
|
||||
session_.InsertPacket(packet_, frame_buffer_, false, 0));
|
||||
EXPECT_FALSE(session_.HaveLastPacket());
|
||||
EXPECT_EQ(kVideoFrameKey, session_.FrameType());
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = true;
|
||||
packet_.seqNum += 1;
|
||||
ASSERT_EQ(kPacketBufferSize,
|
||||
session_.InsertPacket(packet_, frame_buffer_, false, 0));
|
||||
EXPECT_TRUE(session_.HaveLastPacket());
|
||||
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
|
||||
EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
|
||||
|
||||
// Insert empty packet which will be the new high sequence number.
|
||||
// To make things more difficult we will make sure to have a wrap here.
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = true;
|
||||
packet_.seqNum = 2;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
ASSERT_EQ(0,
|
||||
session_.InsertPacket(packet_, frame_buffer_, false, 0));
|
||||
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
|
||||
}
|
||||
|
||||
TEST_F(TestSessionInfo, NormalOperation) {
|
||||
packet_.seqNum = 0xFFFF;
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
for (int i = 1; i < 9; ++i) {
|
||||
packet_.seqNum += 1;
|
||||
FillPacket(i);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
}
|
||||
|
||||
packet_.seqNum += 1;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(9);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
EXPECT_EQ(10 * kPacketBufferSize, session_.SessionLength());
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
SCOPED_TRACE("Calling VerifyPacket");
|
||||
VerifyPacket(frame_buffer_ + i * kPacketBufferSize, i);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
|
||||
// Partition 0 | Partition 1
|
||||
// [ 0 ] [ 2 ] | [ 3 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 0;
|
||||
FillPacket(0);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 2;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(3);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// One packet should be removed (end of partition 0).
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
2*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 1, 0));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(1, 1, 3));
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
|
||||
// Partition 0 | Partition 1
|
||||
// [ 1 ] [ 2 ] | [ 3 ] [ 5 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 1;
|
||||
FillPacket(1);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0)
|
||||
, kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(3);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 2;
|
||||
FillPacket(5);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// One packet should be removed (end of partition 2), 3 left.
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
3*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 1));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(1, 1, 3));
|
||||
EXPECT_EQ(1, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
|
||||
// Partition 0 | Partition 1
|
||||
// [ fffd ] [ fffe ] | [ ffff ] [ 0 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 0xfffd;
|
||||
FillPacket(0);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(1);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(3);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// No packet should be removed.
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
4*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 0));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(1, 2, 2));
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
|
||||
// Partition 0 | Partition 1
|
||||
// [ fffd ] [ fffe ] | [ ffff ] [ 1 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 0xfffd;
|
||||
FillPacket(0);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(1);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 2;
|
||||
FillPacket(3);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// One packet should be removed from the last partition
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
3*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 0));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(1, 1, 2));
|
||||
EXPECT_EQ(1, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
|
||||
TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
|
||||
// Partition 1 |Partition 2 | Partition 3
|
||||
// [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 1;
|
||||
FillPacket(1);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 2;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 3;
|
||||
FillPacket(5);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 2;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(6);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// No packet should be removed.
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
4*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 1));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(2, 2, 5));
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
|
||||
// Partition 0 |Partition 1 | Partition 2
|
||||
// [ 1 ] [ 2 ] | [ 4 ] [ 5 ] | [ 6 ] [ 7 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 1;
|
||||
FillPacket(1);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 0;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 2;
|
||||
FillPacket(4);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(5);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 2;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(6);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 2;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(7);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// 2 partitions left. 2 packets removed from second partition
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
4*kPacketBufferSize);
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 1));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(2, 2, 6));
|
||||
EXPECT_EQ(2, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
|
||||
// Partition 0 | Partition 1 | Partition 2
|
||||
// [ 0 | ] [ 1 ] | [ 2 ]
|
||||
packet_header_.type.Video.isFirstPacket = true;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
vp8_header_->partitionId = 0;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber = 0;
|
||||
FillPacket(0);
|
||||
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
|
||||
packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 1;
|
||||
vp8_header_->beginningOfPartition = false;
|
||||
packet_header_.header.markerBit = false;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(1);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
packet_header_.type.Video.isFirstPacket = false;
|
||||
vp8_header_->partitionId = 2;
|
||||
vp8_header_->beginningOfPartition = true;
|
||||
packet_header_.header.markerBit = true;
|
||||
packet_header_.header.sequenceNumber += 1;
|
||||
FillPacket(2);
|
||||
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
|
||||
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
delete packet;
|
||||
|
||||
// No packets removed.
|
||||
EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
|
||||
kFrameBufferSize,
|
||||
&fragmentation_),
|
||||
3*kPacketBufferSize);
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(0, 2, 0));
|
||||
// This partition is aggregated in partition 0
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(1, 0, 0));
|
||||
SCOPED_TRACE("Calling VerifyPartition");
|
||||
EXPECT_TRUE(VerifyPartition(2, 1, 2));
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
ASSERT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, false, 0));
|
||||
|
||||
EXPECT_EQ(0, session_.MakeDecodable());
|
||||
EXPECT_EQ(0, session_.SessionLength());
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(2);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(0, session_.MakeDecodable());
|
||||
EXPECT_EQ(2 * kPacketBufferSize, session_.SessionLength());
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
SCOPED_TRACE("Calling VerifyNalu");
|
||||
EXPECT_TRUE(VerifyNalu(0, 1, 0));
|
||||
SCOPED_TRACE("Calling VerifyNalu");
|
||||
EXPECT_TRUE(VerifyNalu(1, 1, 2));
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, LossInMiddleOfNalu) {
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluEnd;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(2);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(kPacketBufferSize, session_.MakeDecodable());
|
||||
EXPECT_EQ(kPacketBufferSize, session_.SessionLength());
|
||||
EXPECT_EQ(1, session_.packets_not_decodable());
|
||||
SCOPED_TRACE("Calling VerifyNalu");
|
||||
EXPECT_TRUE(VerifyNalu(0, 1, 0));
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluIncomplete;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(1);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(kPacketBufferSize, session_.MakeDecodable());
|
||||
EXPECT_EQ(kPacketBufferSize, session_.SessionLength());
|
||||
EXPECT_EQ(1, session_.packets_not_decodable());
|
||||
SCOPED_TRACE("Calling VerifyNalu");
|
||||
EXPECT_TRUE(VerifyNalu(0, 1, 0));
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, ReorderWrapNoLoss) {
|
||||
packet_.seqNum = 0xFFFF;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluIncomplete;
|
||||
packet_.seqNum += 1;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(1);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.seqNum -= 1;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluEnd;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(2);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(0, session_.MakeDecodable());
|
||||
EXPECT_EQ(0, session_.packets_not_decodable());
|
||||
EXPECT_EQ(3*kPacketBufferSize, session_.SessionLength());
|
||||
SCOPED_TRACE("Calling VerifyNalu");
|
||||
EXPECT_TRUE(VerifyNalu(0, 1, 0));
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, WrapLosses) {
|
||||
packet_.seqNum = 0xFFFF;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluIncomplete;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(1);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluEnd;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(2);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(2 * kPacketBufferSize, session_.MakeDecodable());
|
||||
EXPECT_EQ(0, session_.SessionLength());
|
||||
EXPECT_EQ(2, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestNalUnits, ReorderWrapLosses) {
|
||||
packet_.seqNum = 0xFFFF;
|
||||
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluEnd;
|
||||
packet_.seqNum += 2;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(2);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
packet_.seqNum -= 2;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluIncomplete;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(1);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(2 * kPacketBufferSize, session_.MakeDecodable());
|
||||
EXPECT_EQ(0, session_.SessionLength());
|
||||
EXPECT_EQ(2, session_.packets_not_decodable());
|
||||
}
|
||||
|
||||
TEST_F(TestNackList, NoLosses) {
|
||||
uint16_t low = 0xFFFF - 5;
|
||||
|
||||
packet_.seqNum = low;
|
||||
packet_.isFirstPacket = true;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
for (int i = 1; i < 9; ++i) {
|
||||
packet_.seqNum += 1;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(i + 1);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
}
|
||||
|
||||
packet_.seqNum += 1;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(10);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(10 * kPacketBufferSize, session_.SessionLength());
|
||||
BuildSeqNumList(low, packet_.seqNum);
|
||||
EXPECT_EQ(0, session_.BuildHardNackList(seq_num_list_, seq_num_list_length_));
|
||||
EXPECT_FALSE(session_.session_nack());
|
||||
SCOPED_TRACE("Calling VerifyAll");
|
||||
VerifyAll(-1);
|
||||
|
||||
BuildSeqNumList(low, packet_.seqNum);
|
||||
EXPECT_EQ(0, session_.BuildSoftNackList(seq_num_list_, seq_num_list_length_,
|
||||
60));
|
||||
SCOPED_TRACE("Calling VerifyAll");
|
||||
VerifyAll(-1);
|
||||
}
|
||||
|
||||
TEST_F(TestNackList, FiveLossesSpreadOut) {
|
||||
uint16_t low = 0xFFFF - 5;
|
||||
|
||||
packet_.seqNum = low;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = true;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
for (int i = 1; i < 9; ++i) {
|
||||
packet_.seqNum += 1;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(i);
|
||||
if ((i + 1) % 2)
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
}
|
||||
|
||||
packet_.seqNum++; // Simulate loss of last packet.
|
||||
|
||||
EXPECT_EQ(5 * kPacketBufferSize, session_.SessionLength());
|
||||
BuildSeqNumList(low, packet_.seqNum);
|
||||
EXPECT_EQ(0, session_.BuildHardNackList(seq_num_list_, seq_num_list_length_));
|
||||
for (int i = 0; i < seq_num_list_length_; ++i) {
|
||||
if (i % 2)
|
||||
EXPECT_EQ(static_cast<uint16_t>(low + i), seq_num_list_[i]);
|
||||
else
|
||||
EXPECT_EQ(-1, seq_num_list_[i]);
|
||||
}
|
||||
|
||||
BuildSeqNumList(low, packet_.seqNum);
|
||||
EXPECT_EQ(0, session_.BuildSoftNackList(seq_num_list_, seq_num_list_length_,
|
||||
60));
|
||||
EXPECT_EQ(true, session_.session_nack());
|
||||
for (int i = 0; i < seq_num_list_length_; ++i) {
|
||||
if (i % 2)
|
||||
EXPECT_EQ(static_cast<uint16_t>(low + i), seq_num_list_[i]);
|
||||
else
|
||||
EXPECT_EQ(-1, seq_num_list_[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestNackList, FirstAndLastLost) {
|
||||
uint16_t low = 0xFFFF;
|
||||
|
||||
packet_.seqNum = low + 1;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = false;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
|
||||
kPacketBufferSize);
|
||||
|
||||
EXPECT_EQ(kPacketBufferSize, session_.SessionLength());
|
||||
BuildSeqNumList(low, packet_.seqNum + 1);
|
||||
EXPECT_EQ(0, session_.BuildHardNackList(seq_num_list_, seq_num_list_length_));
|
||||
EXPECT_EQ(0xFFFF, seq_num_list_[0]);
|
||||
EXPECT_EQ(-1, seq_num_list_[1]);
|
||||
EXPECT_EQ(1, seq_num_list_[2]);
|
||||
|
||||
BuildSeqNumList(low, packet_.seqNum + 1);
|
||||
EXPECT_EQ(0, session_.BuildSoftNackList(seq_num_list_,seq_num_list_length_,
|
||||
60));
|
||||
EXPECT_EQ(true, session_.session_nack());
|
||||
EXPECT_EQ(0xFFFF, seq_num_list_[0]);
|
||||
EXPECT_EQ(-1, seq_num_list_[1]);
|
||||
EXPECT_EQ(1, seq_num_list_[2]);
|
||||
}
|
||||
|
||||
TEST_F(TestNackList, LostAllButEmptyPackets) {
|
||||
uint16_t low = 0;
|
||||
packet_.seqNum = low + 1;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = false;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
packet_.sizeBytes = 0;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0), 0);
|
||||
|
||||
packet_.seqNum = low + 3;
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.markerBit = false;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
packet_.sizeBytes = 0;
|
||||
FillPacket(0);
|
||||
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0), 0);
|
||||
|
||||
EXPECT_EQ(0, session_.SessionLength());
|
||||
BuildSeqNumList(low, packet_.seqNum + 1);
|
||||
EXPECT_EQ(0, session_.BuildSoftNackList(seq_num_list_, seq_num_list_length_,
|
||||
60));
|
||||
EXPECT_EQ(true, session_.session_nack());
|
||||
EXPECT_EQ(0, seq_num_list_[0]);
|
||||
EXPECT_EQ(-1, seq_num_list_[1]);
|
||||
EXPECT_EQ(-2, seq_num_list_[2]);
|
||||
EXPECT_EQ(-2, seq_num_list_[3]);
|
||||
EXPECT_EQ(4, seq_num_list_[4]);
|
||||
}
|
||||
} // namespace webrtc
|
||||
36
webrtc/modules/video_coding/main/source/tick_time_base.h
Normal file
36
webrtc/modules/video_coding/main/source/tick_time_base.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TICK_TIME_BASE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TICK_TIME_BASE_H_
|
||||
|
||||
#include "system_wrappers/interface/tick_util.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This class provides a mockable wrapper to TickTime.
|
||||
class TickTimeBase {
|
||||
public:
|
||||
virtual ~TickTimeBase() {}
|
||||
|
||||
// "Now" in milliseconds.
|
||||
virtual int64_t MillisecondTimestamp() const {
|
||||
return TickTime::MillisecondTimestamp();
|
||||
}
|
||||
|
||||
// "Now" in microseconds.
|
||||
virtual int64_t MicrosecondTimestamp() const {
|
||||
return TickTime::MicrosecondTimestamp();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TICK_TIME_BASE_H_
|
||||
@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "internal_defines.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "timestamp_extrapolator.h"
|
||||
#include "trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMTimestampExtrapolator::VCMTimestampExtrapolator(TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId,
|
||||
WebRtc_Word32 id)
|
||||
:
|
||||
_rwLock(RWLockWrapper::CreateRWLock()),
|
||||
_vcmId(vcmId),
|
||||
_id(id),
|
||||
_clock(clock),
|
||||
_startMs(0),
|
||||
_firstTimestamp(0),
|
||||
_wrapArounds(0),
|
||||
_prevTs90khz(0),
|
||||
_lambda(1),
|
||||
_firstAfterReset(true),
|
||||
_packetCount(0),
|
||||
_startUpFilterDelayInPackets(2),
|
||||
_detectorAccumulatorPos(0),
|
||||
_detectorAccumulatorNeg(0),
|
||||
_alarmThreshold(60e3),
|
||||
_accDrift(6600), // in timestamp ticks, i.e. 15 ms
|
||||
_accMaxError(7000),
|
||||
_P11(1e10)
|
||||
{
|
||||
Reset(_clock->MillisecondTimestamp());
|
||||
}
|
||||
|
||||
VCMTimestampExtrapolator::~VCMTimestampExtrapolator()
|
||||
{
|
||||
delete _rwLock;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTimestampExtrapolator::Reset(const WebRtc_Word64 nowMs /* = -1 */)
|
||||
{
|
||||
WriteLockScoped wl(*_rwLock);
|
||||
if (nowMs > -1)
|
||||
{
|
||||
_startMs = nowMs;
|
||||
}
|
||||
else
|
||||
{
|
||||
_startMs = _clock->MillisecondTimestamp();
|
||||
}
|
||||
_prevMs = _startMs;
|
||||
_firstTimestamp = 0;
|
||||
_w[0] = 90.0;
|
||||
_w[1] = 0;
|
||||
_P[0][0] = 1;
|
||||
_P[1][1] = _P11;
|
||||
_P[0][1] = _P[1][0] = 0;
|
||||
_firstAfterReset = true;
|
||||
_prevTs90khz = 0;
|
||||
_wrapArounds = 0;
|
||||
_packetCount = 0;
|
||||
_detectorAccumulatorPos = 0;
|
||||
_detectorAccumulatorNeg = 0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTimestampExtrapolator::Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace)
|
||||
{
|
||||
|
||||
_rwLock->AcquireLockExclusive();
|
||||
if (tMs - _prevMs > 10e3)
|
||||
{
|
||||
// Ten seconds without a complete frame.
|
||||
// Reset the extrapolator
|
||||
_rwLock->ReleaseLockExclusive();
|
||||
Reset();
|
||||
_rwLock->AcquireLockExclusive();
|
||||
}
|
||||
else
|
||||
{
|
||||
_prevMs = tMs;
|
||||
}
|
||||
|
||||
// Remove offset to prevent badly scaled matrices
|
||||
tMs -= _startMs;
|
||||
|
||||
WebRtc_Word32 prevWrapArounds = _wrapArounds;
|
||||
CheckForWrapArounds(ts90khz);
|
||||
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
|
||||
|
||||
if (wrapAroundsSincePrev == 0 && ts90khz < _prevTs90khz)
|
||||
{
|
||||
_rwLock->ReleaseLockExclusive();
|
||||
return;
|
||||
}
|
||||
|
||||
if (_firstAfterReset)
|
||||
{
|
||||
// Make an initial guess of the offset,
|
||||
// should be almost correct since tMs - _startMs
|
||||
// should about zero at this time.
|
||||
_w[1] = -_w[0] * tMs;
|
||||
_firstTimestamp = ts90khz;
|
||||
_firstAfterReset = false;
|
||||
}
|
||||
|
||||
// Compensate for wraparounds by changing the line offset
|
||||
_w[1] = _w[1] - wrapAroundsSincePrev * ((static_cast<WebRtc_Word64>(1)<<32) - 1);
|
||||
|
||||
double residual = (static_cast<double>(ts90khz) - _firstTimestamp) - static_cast<double>(tMs) * _w[0] - _w[1];
|
||||
if (DelayChangeDetection(residual, trace) &&
|
||||
_packetCount >= _startUpFilterDelayInPackets)
|
||||
{
|
||||
// A sudden change of average network delay has been detected.
|
||||
// Force the filter to adjust its offset parameter by changing
|
||||
// the offset uncertainty. Don't do this during startup.
|
||||
_P[1][1] = _P11;
|
||||
}
|
||||
//T = [t(k) 1]';
|
||||
//that = T'*w;
|
||||
//K = P*T/(lambda + T'*P*T);
|
||||
double K[2];
|
||||
K[0] = _P[0][0] * tMs + _P[0][1];
|
||||
K[1] = _P[1][0] * tMs + _P[1][1];
|
||||
double TPT = _lambda + tMs * K[0] + K[1];
|
||||
K[0] /= TPT;
|
||||
K[1] /= TPT;
|
||||
//w = w + K*(ts(k) - that);
|
||||
_w[0] = _w[0] + K[0] * residual;
|
||||
_w[1] = _w[1] + K[1] * residual;
|
||||
//P = 1/lambda*(P - K*T'*P);
|
||||
double p00 = 1 / _lambda * (_P[0][0] - (K[0] * tMs * _P[0][0] + K[0] * _P[1][0]));
|
||||
double p01 = 1 / _lambda * (_P[0][1] - (K[0] * tMs * _P[0][1] + K[0] * _P[1][1]));
|
||||
_P[1][0] = 1 / _lambda * (_P[1][0] - (K[1] * tMs * _P[0][0] + K[1] * _P[1][0]));
|
||||
_P[1][1] = 1 / _lambda * (_P[1][1] - (K[1] * tMs * _P[0][1] + K[1] * _P[1][1]));
|
||||
_P[0][0] = p00;
|
||||
_P[0][1] = p01;
|
||||
if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
_packetCount++;
|
||||
}
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "w[0]=%f w[1]=%f ts=%u tMs=%u", _w[0], _w[1], ts90khz, tMs);
|
||||
}
|
||||
_rwLock->ReleaseLockExclusive();
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTimestampExtrapolator::ExtrapolateTimestamp(WebRtc_Word64 tMs) const
|
||||
{
|
||||
ReadLockScoped rl(*_rwLock);
|
||||
WebRtc_UWord32 timestamp = 0;
|
||||
if (_packetCount == 0)
|
||||
{
|
||||
timestamp = 0;
|
||||
}
|
||||
else if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
timestamp = static_cast<WebRtc_UWord32>(90.0 * (tMs - _prevMs) + _prevTs90khz + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
timestamp = static_cast<WebRtc_UWord32>(_w[0] * (tMs - _startMs) + _w[1] + _firstTimestamp + 0.5);
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTimestampExtrapolator::ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const
|
||||
{
|
||||
ReadLockScoped rl(*_rwLock);
|
||||
WebRtc_Word64 localTimeMs = 0;
|
||||
if (_packetCount == 0)
|
||||
{
|
||||
localTimeMs = -1;
|
||||
}
|
||||
else if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
localTimeMs = _prevMs + static_cast<WebRtc_Word64>(static_cast<double>(timestamp90khz - _prevTs90khz) / 90.0 + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_w[0] < 1e-3)
|
||||
{
|
||||
localTimeMs = _startMs;
|
||||
}
|
||||
else
|
||||
{
|
||||
double timestampDiff = static_cast<double>(timestamp90khz) - static_cast<double>(_firstTimestamp);
|
||||
localTimeMs = static_cast<WebRtc_Word64>(static_cast<double>(_startMs) + (timestampDiff - _w[1]) / _w[0] + 0.5);
|
||||
}
|
||||
}
|
||||
return localTimeMs;
|
||||
}
|
||||
|
||||
// Investigates if the timestamp clock has overflowed since the last timestamp and
|
||||
// keeps track of the number of wrap arounds since reset.
|
||||
void
|
||||
VCMTimestampExtrapolator::CheckForWrapArounds(WebRtc_UWord32 ts90khz)
|
||||
{
|
||||
if (_prevTs90khz == 0)
|
||||
{
|
||||
_prevTs90khz = ts90khz;
|
||||
return;
|
||||
}
|
||||
if (ts90khz < _prevTs90khz)
|
||||
{
|
||||
// This difference will probably be less than -2^31 if we have had a wrap around
|
||||
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is casted to a Word32,
|
||||
// it should be positive.
|
||||
if (static_cast<WebRtc_Word32>(ts90khz - _prevTs90khz) > 0)
|
||||
{
|
||||
// Forward wrap around
|
||||
_wrapArounds++;
|
||||
}
|
||||
}
|
||||
// This difference will probably be less than -2^31 if we have had a backward wrap around.
|
||||
// Since it is casted to a Word32, it should be positive.
|
||||
else if (static_cast<WebRtc_Word32>(_prevTs90khz - ts90khz) > 0)
|
||||
{
|
||||
// Backward wrap around
|
||||
_wrapArounds--;
|
||||
}
|
||||
_prevTs90khz = ts90khz;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMTimestampExtrapolator::DelayChangeDetection(double error, bool trace)
|
||||
{
|
||||
// CUSUM detection of sudden delay changes
|
||||
error = (error > 0) ? VCM_MIN(error, _accMaxError) : VCM_MAX(error, -_accMaxError);
|
||||
_detectorAccumulatorPos = VCM_MAX(_detectorAccumulatorPos + error - _accDrift, (double)0);
|
||||
_detectorAccumulatorNeg = VCM_MIN(_detectorAccumulatorNeg + error + _accDrift, (double)0);
|
||||
if (_detectorAccumulatorPos > _alarmThreshold || _detectorAccumulatorNeg < -_alarmThreshold)
|
||||
{
|
||||
// Alarm
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=1", _detectorAccumulatorPos, _detectorAccumulatorNeg);
|
||||
}
|
||||
_detectorAccumulatorPos = _detectorAccumulatorNeg = 0;
|
||||
return true;
|
||||
}
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=0", _detectorAccumulatorPos, _detectorAccumulatorNeg);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "rw_lock_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class TickTimeBase;
|
||||
|
||||
class VCMTimestampExtrapolator
|
||||
{
|
||||
public:
|
||||
VCMTimestampExtrapolator(TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId = 0,
|
||||
WebRtc_Word32 receiverId = 0);
|
||||
~VCMTimestampExtrapolator();
|
||||
void Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace = true);
|
||||
WebRtc_UWord32 ExtrapolateTimestamp(WebRtc_Word64 tMs) const;
|
||||
WebRtc_Word64 ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const;
|
||||
void Reset(WebRtc_Word64 nowMs = -1);
|
||||
|
||||
private:
|
||||
void CheckForWrapArounds(WebRtc_UWord32 ts90khz);
|
||||
bool DelayChangeDetection(double error, bool trace = true);
|
||||
RWLockWrapper* _rwLock;
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _id;
|
||||
TickTimeBase* _clock;
|
||||
double _w[2];
|
||||
double _P[2][2];
|
||||
WebRtc_Word64 _startMs;
|
||||
WebRtc_Word64 _prevMs;
|
||||
WebRtc_UWord32 _firstTimestamp;
|
||||
WebRtc_Word32 _wrapArounds;
|
||||
WebRtc_UWord32 _prevTs90khz;
|
||||
const double _lambda;
|
||||
bool _firstAfterReset;
|
||||
WebRtc_UWord32 _packetCount;
|
||||
const WebRtc_UWord32 _startUpFilterDelayInPackets;
|
||||
|
||||
double _detectorAccumulatorPos;
|
||||
double _detectorAccumulatorNeg;
|
||||
const double _alarmThreshold;
|
||||
const double _accDrift;
|
||||
const double _accMaxError;
|
||||
const double _P11;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
99
webrtc/modules/video_coding/main/source/timestamp_map.cc
Normal file
99
webrtc/modules/video_coding/main/source/timestamp_map.cc
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "timestamp_map.h"
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Constructor. Optional parameter specifies maximum number of
|
||||
// coexisting timers.
|
||||
VCMTimestampMap::VCMTimestampMap(WebRtc_Word32 length):
|
||||
_nextAddIx(0),
|
||||
_nextPopIx(0)
|
||||
{
|
||||
if (length <= 0)
|
||||
{
|
||||
// default
|
||||
length = 10;
|
||||
}
|
||||
|
||||
_map = new VCMTimestampDataTuple[length];
|
||||
_length = length;
|
||||
}
|
||||
|
||||
// Destructor.
|
||||
VCMTimestampMap::~VCMTimestampMap()
|
||||
{
|
||||
delete [] _map;
|
||||
}
|
||||
|
||||
// Empty the list of timers.
|
||||
void
|
||||
VCMTimestampMap::Reset()
|
||||
{
|
||||
_nextAddIx = 0;
|
||||
_nextPopIx = 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMTimestampMap::Add(WebRtc_UWord32 timestamp, void* data)
|
||||
{
|
||||
_map[_nextAddIx].timestamp = timestamp;
|
||||
_map[_nextAddIx].data = data;
|
||||
_nextAddIx = (_nextAddIx + 1) % _length;
|
||||
|
||||
if (_nextAddIx == _nextPopIx)
|
||||
{
|
||||
// Circular list full; forget oldest entry
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void*
|
||||
VCMTimestampMap::Pop(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
while (!IsEmpty())
|
||||
{
|
||||
if (_map[_nextPopIx].timestamp == timestamp)
|
||||
{
|
||||
// found start time for this timestamp
|
||||
void* data = _map[_nextPopIx].data;
|
||||
_map[_nextPopIx].data = NULL;
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
return data;
|
||||
}
|
||||
else if (_map[_nextPopIx].timestamp > timestamp)
|
||||
{
|
||||
// the timestamp we are looking for is not in the list
|
||||
assert(_nextPopIx < _length && _nextPopIx >= 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// not in this position, check next (and forget this position)
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
}
|
||||
|
||||
// could not find matching timestamp in list
|
||||
assert(_nextPopIx < _length && _nextPopIx >= 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Check if no timers are currently running
|
||||
bool
|
||||
VCMTimestampMap::IsEmpty() const
|
||||
{
|
||||
return (_nextAddIx == _nextPopIx);
|
||||
}
|
||||
|
||||
}
|
||||
52
webrtc/modules/video_coding/main/source/timestamp_map.h
Normal file
52
webrtc/modules/video_coding/main/source/timestamp_map.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
struct VCMTimestampDataTuple
|
||||
{
|
||||
WebRtc_UWord32 timestamp;
|
||||
void* data;
|
||||
};
|
||||
|
||||
class VCMTimestampMap
|
||||
{
|
||||
public:
|
||||
// Constructor. Optional parameter specifies maximum number of
|
||||
// timestamps in map.
|
||||
VCMTimestampMap(const WebRtc_Word32 length = 10);
|
||||
|
||||
// Destructor.
|
||||
~VCMTimestampMap();
|
||||
|
||||
// Empty the map
|
||||
void Reset();
|
||||
|
||||
WebRtc_Word32 Add(WebRtc_UWord32 timestamp, void* data);
|
||||
void* Pop(WebRtc_UWord32 timestamp);
|
||||
|
||||
private:
|
||||
bool IsEmpty() const;
|
||||
|
||||
VCMTimestampDataTuple* _map;
|
||||
WebRtc_Word32 _nextAddIx;
|
||||
WebRtc_Word32 _nextPopIx;
|
||||
WebRtc_Word32 _length;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
337
webrtc/modules/video_coding/main/source/timing.cc
Normal file
337
webrtc/modules/video_coding/main/source/timing.cc
Normal file
@ -0,0 +1,337 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "timing.h"
|
||||
#include "timestamp_extrapolator.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMTiming::VCMTiming(TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId,
|
||||
WebRtc_Word32 timingId,
|
||||
VCMTiming* masterTiming)
|
||||
:
|
||||
_critSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_vcmId(vcmId),
|
||||
_clock(clock),
|
||||
_timingId(timingId),
|
||||
_master(false),
|
||||
_tsExtrapolator(),
|
||||
_codecTimer(),
|
||||
_renderDelayMs(kDefaultRenderDelayMs),
|
||||
_minTotalDelayMs(0),
|
||||
_requiredDelayMs(0),
|
||||
_currentDelayMs(0),
|
||||
_prevFrameTimestamp(0)
|
||||
{
|
||||
if (masterTiming == NULL)
|
||||
{
|
||||
_master = true;
|
||||
_tsExtrapolator = new VCMTimestampExtrapolator(_clock, vcmId, timingId);
|
||||
}
|
||||
else
|
||||
{
|
||||
_tsExtrapolator = masterTiming->_tsExtrapolator;
|
||||
}
|
||||
}
|
||||
|
||||
VCMTiming::~VCMTiming()
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
delete _tsExtrapolator;
|
||||
}
|
||||
delete _critSect;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::Reset(WebRtc_Word64 nowMs /* = -1 */)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (nowMs > -1)
|
||||
{
|
||||
_tsExtrapolator->Reset(nowMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_tsExtrapolator->Reset();
|
||||
}
|
||||
_codecTimer.Reset();
|
||||
_renderDelayMs = kDefaultRenderDelayMs;
|
||||
_minTotalDelayMs = 0;
|
||||
_requiredDelayMs = 0;
|
||||
_currentDelayMs = 0;
|
||||
_prevFrameTimestamp = 0;
|
||||
}
|
||||
|
||||
void VCMTiming::ResetDecodeTime()
|
||||
{
|
||||
_codecTimer.Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetRenderDelay(WebRtc_UWord32 renderDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_renderDelayMs = renderDelayMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_minTotalDelayMs = minTotalDelayMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetRequiredDelay(WebRtc_UWord32 requiredDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (requiredDelayMs != _requiredDelayMs)
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Desired jitter buffer level: %u ms", requiredDelayMs);
|
||||
}
|
||||
_requiredDelayMs = requiredDelayMs;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMTiming::UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
|
||||
|
||||
// Make sure we try to sync with audio
|
||||
if (targetDelayMs < _minTotalDelayMs)
|
||||
{
|
||||
targetDelayMs = _minTotalDelayMs;
|
||||
}
|
||||
|
||||
if (_currentDelayMs == 0)
|
||||
{
|
||||
// Not initialized, set current delay to target.
|
||||
_currentDelayMs = targetDelayMs;
|
||||
}
|
||||
else if (targetDelayMs != _currentDelayMs)
|
||||
{
|
||||
WebRtc_Word64 delayDiffMs = static_cast<WebRtc_Word64>(targetDelayMs) -
|
||||
_currentDelayMs;
|
||||
// Never change the delay with more than 100 ms every second. If we're changing the
|
||||
// delay in too large steps we will get noticable freezes. By limiting the change we
|
||||
// can increase the delay in smaller steps, which will be experienced as the video is
|
||||
// played in slow motion. When lowering the delay the video will be played at a faster
|
||||
// pace.
|
||||
WebRtc_Word64 maxChangeMs = 0;
|
||||
if (frameTimestamp < 0x0000ffff && _prevFrameTimestamp > 0xffff0000)
|
||||
{
|
||||
// wrap
|
||||
maxChangeMs = kDelayMaxChangeMsPerS * (frameTimestamp +
|
||||
(static_cast<WebRtc_Word64>(1)<<32) - _prevFrameTimestamp) / 90000;
|
||||
}
|
||||
else
|
||||
{
|
||||
maxChangeMs = kDelayMaxChangeMsPerS *
|
||||
(frameTimestamp - _prevFrameTimestamp) / 90000;
|
||||
}
|
||||
if (maxChangeMs <= 0)
|
||||
{
|
||||
// Any changes less than 1 ms are truncated and
|
||||
// will be postponed. Negative change will be due
|
||||
// to reordering and should be ignored.
|
||||
return;
|
||||
}
|
||||
else if (delayDiffMs < -maxChangeMs)
|
||||
{
|
||||
delayDiffMs = -maxChangeMs;
|
||||
}
|
||||
else if (delayDiffMs > maxChangeMs)
|
||||
{
|
||||
delayDiffMs = maxChangeMs;
|
||||
}
|
||||
_currentDelayMs = _currentDelayMs + static_cast<WebRtc_Word32>(delayDiffMs);
|
||||
}
|
||||
_prevFrameTimestamp = frameTimestamp;
|
||||
}
|
||||
|
||||
void VCMTiming::UpdateCurrentDelay(WebRtc_Word64 renderTimeMs,
|
||||
WebRtc_Word64 actualDecodeTimeMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
|
||||
// Make sure we try to sync with audio
|
||||
if (targetDelayMs < _minTotalDelayMs)
|
||||
{
|
||||
targetDelayMs = _minTotalDelayMs;
|
||||
}
|
||||
WebRtc_Word64 delayedMs = actualDecodeTimeMs -
|
||||
(renderTimeMs - MaxDecodeTimeMs() - _renderDelayMs);
|
||||
if (delayedMs < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
else if (_currentDelayMs + delayedMs <= targetDelayMs)
|
||||
{
|
||||
_currentDelayMs += static_cast<WebRtc_UWord32>(delayedMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentDelayMs = targetDelayMs;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMTiming::StopDecodeTimer(WebRtc_UWord32 timeStamp,
|
||||
WebRtc_Word64 startTimeMs,
|
||||
WebRtc_Word64 nowMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
const WebRtc_Word32 maxDecTime = MaxDecodeTimeMs();
|
||||
WebRtc_Word32 timeDiffMs = _codecTimer.StopTimer(startTimeMs, nowMs);
|
||||
if (timeDiffMs < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Codec timer error: %d", timeDiffMs);
|
||||
assert(false);
|
||||
}
|
||||
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Frame decoded: timeStamp=%u decTime=%d maxDecTime=%u, at %u",
|
||||
timeStamp, timeDiffMs, maxDecTime, MaskWord64ToUWord32(nowMs));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 nowMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_tsExtrapolator->Update(nowMs, timeStamp, _master);
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTiming::RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
const WebRtc_Word64 renderTimeMs = RenderTimeMsInternal(frameTimestamp, nowMs);
|
||||
if (renderTimeMs < 0)
|
||||
{
|
||||
return renderTimeMs;
|
||||
}
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Render frame %u at %u. Render delay %u, required delay %u,"
|
||||
" max decode time %u, min total delay %u",
|
||||
frameTimestamp, MaskWord64ToUWord32(renderTimeMs), _renderDelayMs,
|
||||
_requiredDelayMs, MaxDecodeTimeMs(),_minTotalDelayMs);
|
||||
}
|
||||
return renderTimeMs;
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTiming::RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
WebRtc_Word64 estimatedCompleteTimeMs =
|
||||
_tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
|
||||
if (estimatedCompleteTimeMs - nowMs > kMaxVideoDelayMs)
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Timestamp arrived 2 seconds early, reset statistics",
|
||||
frameTimestamp, estimatedCompleteTimeMs);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"ExtrapolateLocalTime(%u)=%u ms",
|
||||
frameTimestamp, MaskWord64ToUWord32(estimatedCompleteTimeMs));
|
||||
}
|
||||
if (estimatedCompleteTimeMs == -1)
|
||||
{
|
||||
estimatedCompleteTimeMs = nowMs;
|
||||
}
|
||||
|
||||
return estimatedCompleteTimeMs + _currentDelayMs;
|
||||
}
|
||||
|
||||
// Must be called from inside a critical section
|
||||
WebRtc_Word32
|
||||
VCMTiming::MaxDecodeTimeMs(FrameType frameType /*= kVideoFrameDelta*/) const
|
||||
{
|
||||
const WebRtc_Word32 decodeTimeMs = _codecTimer.RequiredDecodeTimeMs(frameType);
|
||||
|
||||
if (decodeTimeMs < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Negative maximum decode time: %d", decodeTimeMs);
|
||||
return -1;
|
||||
}
|
||||
return decodeTimeMs;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
|
||||
const WebRtc_Word64 maxWaitTimeMs = renderTimeMs - nowMs -
|
||||
MaxDecodeTimeMs() - _renderDelayMs;
|
||||
|
||||
if (maxWaitTimeMs < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return static_cast<WebRtc_UWord32>(maxWaitTimeMs);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMTiming::EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_Word32 maxDecodeTimeMs = MaxDecodeTimeMs();
|
||||
if (maxDecodeTimeMs < 0)
|
||||
{
|
||||
// Haven't decoded any frames yet, try decoding one to get an estimate
|
||||
// of the decode time.
|
||||
return true;
|
||||
}
|
||||
else if (maxDecodeTimeMs == 0)
|
||||
{
|
||||
// Decode time is less than 1, set to 1 for now since
|
||||
// we don't have any better precision. Count ticks later?
|
||||
maxDecodeTimeMs = 1;
|
||||
}
|
||||
return static_cast<WebRtc_Word32>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::TargetVideoDelay() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return TargetDelayInternal();
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::TargetDelayInternal() const
|
||||
{
|
||||
return _requiredDelayMs + MaxDecodeTimeMs() + _renderDelayMs;
|
||||
}
|
||||
|
||||
}
|
||||
111
webrtc/modules/video_coding/main/source/timing.h
Normal file
111
webrtc/modules/video_coding/main/source/timing.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "codec_timer.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class TickTimeBase;
|
||||
class VCMTimestampExtrapolator;
|
||||
|
||||
class VCMTiming
|
||||
{
|
||||
public:
|
||||
// The primary timing component should be passed
|
||||
// if this is the dual timing component.
|
||||
VCMTiming(TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId = 0,
|
||||
WebRtc_Word32 timingId = 0,
|
||||
VCMTiming* masterTiming = NULL);
|
||||
~VCMTiming();
|
||||
|
||||
// Resets the timing to the initial state.
|
||||
void Reset(WebRtc_Word64 nowMs = -1);
|
||||
void ResetDecodeTime();
|
||||
|
||||
// The amount of time needed to render an image. Defaults to 10 ms.
|
||||
void SetRenderDelay(WebRtc_UWord32 renderDelayMs);
|
||||
|
||||
// The minimum time the video must be delayed on the receiver to
|
||||
// get the desired jitter buffer level.
|
||||
void SetRequiredDelay(WebRtc_UWord32 requiredDelayMs);
|
||||
|
||||
// Minimum total delay required to sync video with audio.
|
||||
void SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs);
|
||||
|
||||
// Increases or decreases the current delay to get closer to the target delay.
|
||||
// Calculates how long it has been since the previous call to this function,
|
||||
// and increases/decreases the delay in proportion to the time difference.
|
||||
void UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp);
|
||||
|
||||
// Increases or decreases the current delay to get closer to the target delay.
|
||||
// Given the actual decode time in ms and the render time in ms for a frame, this
|
||||
// function calculates how late the frame is and increases the delay accordingly.
|
||||
void UpdateCurrentDelay(WebRtc_Word64 renderTimeMs, WebRtc_Word64 actualDecodeTimeMs);
|
||||
|
||||
// Stops the decoder timer, should be called when the decoder returns a frame
|
||||
// or when the decoded frame callback is called.
|
||||
WebRtc_Word32 StopDecodeTimer(WebRtc_UWord32 timeStamp,
|
||||
WebRtc_Word64 startTimeMs,
|
||||
WebRtc_Word64 nowMs);
|
||||
|
||||
// Used to report that a frame is passed to decoding. Updates the timestamp filter
|
||||
// which is used to map between timestamps and receiver system time.
|
||||
void IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 lastPacketTimeMs);
|
||||
|
||||
// Returns the receiver system time when the frame with timestamp frameTimestamp
|
||||
// should be rendered, assuming that the system time currently is nowMs.
|
||||
WebRtc_Word64 RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const;
|
||||
|
||||
// Returns the maximum time in ms that we can wait for a frame to become complete
|
||||
// before we must pass it to the decoder.
|
||||
WebRtc_UWord32 MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const;
|
||||
|
||||
// Returns the current target delay which is required delay + decode time + render
|
||||
// delay.
|
||||
WebRtc_UWord32 TargetVideoDelay() const;
|
||||
|
||||
// Calculates whether or not there is enough time to decode a frame given a
|
||||
// certain amount of processing time.
|
||||
bool EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const;
|
||||
|
||||
enum { kDefaultRenderDelayMs = 10 };
|
||||
enum { kDelayMaxChangeMsPerS = 100 };
|
||||
|
||||
protected:
|
||||
WebRtc_Word32 MaxDecodeTimeMs(FrameType frameType = kVideoFrameDelta) const;
|
||||
WebRtc_Word64 RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp,
|
||||
WebRtc_Word64 nowMs) const;
|
||||
WebRtc_UWord32 TargetDelayInternal() const;
|
||||
|
||||
private:
|
||||
CriticalSectionWrapper* _critSect;
|
||||
WebRtc_Word32 _vcmId;
|
||||
TickTimeBase* _clock;
|
||||
WebRtc_Word32 _timingId;
|
||||
bool _master;
|
||||
VCMTimestampExtrapolator* _tsExtrapolator;
|
||||
VCMCodecTimer _codecTimer;
|
||||
WebRtc_UWord32 _renderDelayMs;
|
||||
WebRtc_UWord32 _minTotalDelayMs;
|
||||
WebRtc_UWord32 _requiredDelayMs;
|
||||
WebRtc_UWord32 _currentDelayMs;
|
||||
WebRtc_UWord32 _prevFrameTimestamp;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
100
webrtc/modules/video_coding/main/source/video_coding.gypi
Normal file
100
webrtc/modules/video_coding/main/source/video_coding.gypi
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
{
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'webrtc_video_coding',
|
||||
'type': '<(library)',
|
||||
'dependencies': [
|
||||
'webrtc_i420',
|
||||
'<(webrtc_root)/common_video/common_video.gyp:common_video',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'<(webrtc_vp8_dir)/vp8.gyp:webrtc_vp8',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
'../../codecs/interface',
|
||||
'../../../../common_video/interface',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../codecs/interface',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
# interfaces
|
||||
'../interface/video_coding.h',
|
||||
'../interface/video_coding_defines.h',
|
||||
|
||||
# headers
|
||||
'codec_database.h',
|
||||
'codec_timer.h',
|
||||
'content_metrics_processing.h',
|
||||
'decoding_state.h',
|
||||
'encoded_frame.h',
|
||||
'er_tables_xor.h',
|
||||
'event.h',
|
||||
'exp_filter.h',
|
||||
'fec_tables_xor.h',
|
||||
'frame_buffer.h',
|
||||
'frame_dropper.h',
|
||||
'generic_decoder.h',
|
||||
'generic_encoder.h',
|
||||
'inter_frame_delay.h',
|
||||
'internal_defines.h',
|
||||
'jitter_buffer.h',
|
||||
'jitter_buffer_common.h',
|
||||
'jitter_estimator.h',
|
||||
'media_opt_util.h',
|
||||
'media_optimization.h',
|
||||
'nack_fec_tables.h',
|
||||
'packet.h',
|
||||
'qm_select_data.h',
|
||||
'qm_select.h',
|
||||
'receiver.h',
|
||||
'rtt_filter.h',
|
||||
'session_info.h',
|
||||
'tick_time_base.h',
|
||||
'timestamp_extrapolator.h',
|
||||
'timestamp_map.h',
|
||||
'timing.h',
|
||||
'video_coding_impl.h',
|
||||
|
||||
# sources
|
||||
'codec_database.cc',
|
||||
'codec_timer.cc',
|
||||
'content_metrics_processing.cc',
|
||||
'decoding_state.cc',
|
||||
'encoded_frame.cc',
|
||||
'exp_filter.cc',
|
||||
'frame_buffer.cc',
|
||||
'frame_dropper.cc',
|
||||
'generic_decoder.cc',
|
||||
'generic_encoder.cc',
|
||||
'inter_frame_delay.cc',
|
||||
'jitter_buffer.cc',
|
||||
'jitter_buffer_common.cc',
|
||||
'jitter_estimator.cc',
|
||||
'media_opt_util.cc',
|
||||
'media_optimization.cc',
|
||||
'packet.cc',
|
||||
'qm_select.cc',
|
||||
'receiver.cc',
|
||||
'rtt_filter.cc',
|
||||
'session_info.cc',
|
||||
'timestamp_extrapolator.cc',
|
||||
'timestamp_map.cc',
|
||||
'timing.cc',
|
||||
'video_coding_impl.cc',
|
||||
], # source
|
||||
},
|
||||
],
|
||||
}
|
||||
1398
webrtc/modules/video_coding/main/source/video_coding_impl.cc
Normal file
1398
webrtc/modules/video_coding/main/source/video_coding_impl.cc
Normal file
File diff suppressed because it is too large
Load Diff
316
webrtc/modules/video_coding/main/source/video_coding_impl.h
Normal file
316
webrtc/modules/video_coding/main/source/video_coding_impl.h
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
|
||||
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "modules/video_coding/main/source/codec_database.h"
|
||||
#include "modules/video_coding/main/source/frame_buffer.h"
|
||||
#include "modules/video_coding/main/source/generic_decoder.h"
|
||||
#include "modules/video_coding/main/source/generic_encoder.h"
|
||||
#include "modules/video_coding/main/source/jitter_buffer.h"
|
||||
#include "modules/video_coding/main/source/media_optimization.h"
|
||||
#include "modules/video_coding/main/source/receiver.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "modules/video_coding/main/source/timing.h"
|
||||
#include "system_wrappers/interface/critical_section_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMProcessTimer
|
||||
{
|
||||
public:
|
||||
VCMProcessTimer(WebRtc_UWord32 periodMs, TickTimeBase* clock)
|
||||
: _clock(clock),
|
||||
_periodMs(periodMs),
|
||||
_latestMs(_clock->MillisecondTimestamp()) {}
|
||||
WebRtc_UWord32 Period() const;
|
||||
WebRtc_UWord32 TimeUntilProcess() const;
|
||||
void Processed();
|
||||
|
||||
private:
|
||||
TickTimeBase* _clock;
|
||||
WebRtc_UWord32 _periodMs;
|
||||
WebRtc_Word64 _latestMs;
|
||||
};
|
||||
|
||||
enum VCMKeyRequestMode
|
||||
{
|
||||
kKeyOnError, // Normal mode, request key frames on decoder error
|
||||
kKeyOnKeyLoss, // Request key frames on decoder error and on packet loss
|
||||
// in key frames.
|
||||
kKeyOnLoss, // Request key frames on decoder error and on packet loss
|
||||
// in any frame
|
||||
};
|
||||
|
||||
class VideoCodingModuleImpl : public VideoCodingModule
|
||||
{
|
||||
public:
|
||||
VideoCodingModuleImpl(const WebRtc_Word32 id,
|
||||
TickTimeBase* clock,
|
||||
bool delete_clock_on_destroy);
|
||||
|
||||
virtual ~VideoCodingModuleImpl();
|
||||
|
||||
WebRtc_Word32 Id() const;
|
||||
|
||||
// Change the unique identifier of this object
|
||||
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
|
||||
|
||||
// Returns the number of milliseconds until the module want a worker thread
|
||||
// to call Process
|
||||
virtual WebRtc_Word32 TimeUntilNextProcess();
|
||||
|
||||
virtual WebRtc_Word32 Process();
|
||||
|
||||
/*
|
||||
* Sender
|
||||
*/
|
||||
|
||||
// Initialize send codec
|
||||
virtual WebRtc_Word32 InitializeSender();
|
||||
|
||||
// Register the send codec to be used.
|
||||
virtual WebRtc_Word32 RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize);
|
||||
|
||||
// Get current send codec
|
||||
virtual WebRtc_Word32 SendCodec(VideoCodec* currentSendCodec) const;
|
||||
|
||||
// Get current send codec type
|
||||
virtual VideoCodecType SendCodec() const;
|
||||
|
||||
// Register an external encoder object.
|
||||
virtual WebRtc_Word32 RegisterExternalEncoder(VideoEncoder* externalEncoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalSource = false);
|
||||
|
||||
// Get codec config parameters
|
||||
virtual WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer,
|
||||
WebRtc_Word32 size);
|
||||
|
||||
// Get encode bitrate
|
||||
virtual int Bitrate(unsigned int* bitrate) const;
|
||||
|
||||
// Get encode frame rate
|
||||
virtual int FrameRate(unsigned int* framerate) const;
|
||||
|
||||
// Set channel parameters
|
||||
virtual WebRtc_Word32 SetChannelParameters(
|
||||
WebRtc_UWord32 availableBandWidth,
|
||||
WebRtc_UWord8 lossRate,
|
||||
WebRtc_UWord32 rtt);
|
||||
|
||||
// Set recieve channel parameters
|
||||
virtual WebRtc_Word32 SetReceiveChannelParameters(WebRtc_UWord32 rtt);
|
||||
|
||||
// Register a transport callback which will be called to deliver the
|
||||
// encoded buffers
|
||||
virtual WebRtc_Word32 RegisterTransportCallback(
|
||||
VCMPacketizationCallback* transport);
|
||||
|
||||
// Register a send statistics callback which will be called to deliver
|
||||
// information about the video stream produced by the encoder,
|
||||
// for instance the average frame rate and bit rate.
|
||||
virtual WebRtc_Word32 RegisterSendStatisticsCallback(
|
||||
VCMSendStatisticsCallback* sendStats);
|
||||
|
||||
// Register a video quality settings callback which will be called when
|
||||
// frame rate/dimensions need to be updated for video quality optimization
|
||||
virtual WebRtc_Word32 RegisterVideoQMCallback(
|
||||
VCMQMSettingsCallback* videoQMSettings);
|
||||
|
||||
// Register a video protection callback which will be called to deliver
|
||||
// the requested FEC rate and NACK status (on/off).
|
||||
virtual WebRtc_Word32 RegisterProtectionCallback(
|
||||
VCMProtectionCallback* protection);
|
||||
|
||||
// Enable or disable a video protection method.
|
||||
virtual WebRtc_Word32 SetVideoProtection(VCMVideoProtection videoProtection,
|
||||
bool enable);
|
||||
|
||||
// Add one raw video frame to the encoder, blocking.
|
||||
virtual WebRtc_Word32 AddVideoFrame(
|
||||
const VideoFrame& videoFrame,
|
||||
const VideoContentMetrics* _contentMetrics = NULL,
|
||||
const CodecSpecificInfo* codecSpecificInfo = NULL);
|
||||
|
||||
virtual WebRtc_Word32 IntraFrameRequest(int stream_index);
|
||||
|
||||
//Enable frame dropper
|
||||
virtual WebRtc_Word32 EnableFrameDropper(bool enable);
|
||||
|
||||
// Sent frame counters
|
||||
virtual WebRtc_Word32 SentFrameCount(VCMFrameCount& frameCount) const;
|
||||
|
||||
/*
|
||||
* Receiver
|
||||
*/
|
||||
|
||||
// Initialize receiver, resets codec database etc
|
||||
virtual WebRtc_Word32 InitializeReceiver();
|
||||
|
||||
// Register possible reveive codecs, can be called multiple times
|
||||
virtual WebRtc_Word32 RegisterReceiveCodec(const VideoCodec* receiveCodec,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
bool requireKeyFrame = false);
|
||||
|
||||
// Register an externally defined decoder/render object.
|
||||
// Can be a decoder only or a decoder coupled with a renderer.
|
||||
virtual WebRtc_Word32 RegisterExternalDecoder(VideoDecoder* externalDecoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming);
|
||||
|
||||
// Register a receive callback. Will be called whenever there are a new
|
||||
// frame ready for rendering.
|
||||
virtual WebRtc_Word32 RegisterReceiveCallback(
|
||||
VCMReceiveCallback* receiveCallback);
|
||||
|
||||
// Register a receive statistics callback which will be called to deliver
|
||||
// information about the video stream received by the receiving side of the
|
||||
// VCM, for instance the average frame rate and bit rate.
|
||||
virtual WebRtc_Word32 RegisterReceiveStatisticsCallback(
|
||||
VCMReceiveStatisticsCallback* receiveStats);
|
||||
|
||||
// Register a frame type request callback.
|
||||
virtual WebRtc_Word32 RegisterFrameTypeCallback(
|
||||
VCMFrameTypeCallback* frameTypeCallback);
|
||||
|
||||
// Register a frame storage callback.
|
||||
virtual WebRtc_Word32 RegisterFrameStorageCallback(
|
||||
VCMFrameStorageCallback* frameStorageCallback);
|
||||
|
||||
// Nack callback
|
||||
virtual WebRtc_Word32 RegisterPacketRequestCallback(
|
||||
VCMPacketRequestCallback* callback);
|
||||
|
||||
// Decode next frame, blocks for a maximum of maxWaitTimeMs milliseconds.
|
||||
// Should be called as often as possible to get the most out of the decoder.
|
||||
virtual WebRtc_Word32 Decode(WebRtc_UWord16 maxWaitTimeMs = 200);
|
||||
|
||||
// Decode next dual frame, blocks for a maximum of maxWaitTimeMs
|
||||
// milliseconds.
|
||||
virtual WebRtc_Word32 DecodeDualFrame(WebRtc_UWord16 maxWaitTimeMs = 200);
|
||||
|
||||
// Reset the decoder state
|
||||
virtual WebRtc_Word32 ResetDecoder();
|
||||
|
||||
// Get current received codec
|
||||
virtual WebRtc_Word32 ReceiveCodec(VideoCodec* currentReceiveCodec) const;
|
||||
|
||||
// Get current received codec type
|
||||
virtual VideoCodecType ReceiveCodec() const;
|
||||
|
||||
// Incoming packet from network parsed and ready for decode, non blocking.
|
||||
virtual WebRtc_Word32 IncomingPacket(const WebRtc_UWord8* incomingPayload,
|
||||
WebRtc_UWord32 payloadLength,
|
||||
const WebRtcRTPHeader& rtpInfo);
|
||||
|
||||
// A part of an encoded frame to be decoded.
|
||||
// Used in conjunction with VCMFrameStorageCallback.
|
||||
virtual WebRtc_Word32 DecodeFromStorage(
|
||||
const EncodedVideoData& frameFromStorage);
|
||||
|
||||
// Minimum playout delay (Used for lip-sync). This is the minimum delay
|
||||
// required to sync with audio. Not included in VideoCodingModule::Delay()
|
||||
// Defaults to 0 ms.
|
||||
virtual WebRtc_Word32 SetMinimumPlayoutDelay(
|
||||
WebRtc_UWord32 minPlayoutDelayMs);
|
||||
|
||||
// The estimated delay caused by rendering
|
||||
virtual WebRtc_Word32 SetRenderDelay(WebRtc_UWord32 timeMS);
|
||||
|
||||
// Current delay
|
||||
virtual WebRtc_Word32 Delay() const;
|
||||
|
||||
// Received frame counters
|
||||
virtual WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
|
||||
|
||||
// Returns the number of packets discarded by the jitter buffer.
|
||||
virtual WebRtc_UWord32 DiscardedPackets() const;
|
||||
|
||||
|
||||
// Robustness APIs
|
||||
|
||||
// Set the sender RTX/NACK mode.
|
||||
virtual int SetSenderNackMode(SenderNackMode mode);
|
||||
|
||||
// Set the sender reference picture selection (RPS) mode.
|
||||
virtual int SetSenderReferenceSelection(bool enable);
|
||||
|
||||
// Set the sender forward error correction (FEC) mode.
|
||||
virtual int SetSenderFEC(bool enable);
|
||||
|
||||
// Set the key frame period, or disable periodic key frames (I-frames).
|
||||
virtual int SetSenderKeyFramePeriod(int periodMs);
|
||||
|
||||
// Set the receiver robustness mode.
|
||||
virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
|
||||
DecodeErrors errorMode);
|
||||
// Enables recording of debugging information.
|
||||
virtual int StartDebugRecording(const char* file_name_utf8);
|
||||
|
||||
// Disables recording of debugging information.
|
||||
virtual int StopDebugRecording();
|
||||
|
||||
protected:
|
||||
WebRtc_Word32 Decode(const webrtc::VCMEncodedFrame& frame);
|
||||
WebRtc_Word32 RequestKeyFrame();
|
||||
WebRtc_Word32 RequestSliceLossIndication(
|
||||
const WebRtc_UWord64 pictureID) const;
|
||||
WebRtc_Word32 NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
|
||||
|
||||
private:
|
||||
WebRtc_Word32 _id;
|
||||
TickTimeBase* clock_;
|
||||
bool delete_clock_on_destroy_;
|
||||
CriticalSectionWrapper* _receiveCritSect;
|
||||
bool _receiverInited;
|
||||
VCMTiming _timing;
|
||||
VCMTiming _dualTiming;
|
||||
VCMReceiver _receiver;
|
||||
VCMReceiver _dualReceiver;
|
||||
VCMDecodedFrameCallback _decodedFrameCallback;
|
||||
VCMDecodedFrameCallback _dualDecodedFrameCallback;
|
||||
VCMFrameTypeCallback* _frameTypeCallback;
|
||||
VCMFrameStorageCallback* _frameStorageCallback;
|
||||
VCMReceiveStatisticsCallback* _receiveStatsCallback;
|
||||
VCMPacketRequestCallback* _packetRequestCallback;
|
||||
VCMGenericDecoder* _decoder;
|
||||
VCMGenericDecoder* _dualDecoder;
|
||||
#ifdef DEBUG_DECODER_BIT_STREAM
|
||||
FILE* _bitStreamBeforeDecoder;
|
||||
#endif
|
||||
VCMFrameBuffer _frameFromFile;
|
||||
VCMKeyRequestMode _keyRequestMode;
|
||||
bool _scheduleKeyRequest;
|
||||
|
||||
CriticalSectionWrapper* _sendCritSect; // Critical section for send side
|
||||
VCMGenericEncoder* _encoder;
|
||||
VCMEncodedFrameCallback _encodedFrameCallback;
|
||||
std::vector<FrameType> _nextFrameTypes;
|
||||
VCMMediaOptimization _mediaOpt;
|
||||
VideoCodecType _sendCodecType;
|
||||
VCMSendStatisticsCallback* _sendStatsCallback;
|
||||
FILE* _encoderInputFile;
|
||||
VCMCodecDataBase _codecDataBase;
|
||||
VCMProcessTimer _receiveStatsTimer;
|
||||
VCMProcessTimer _sendStatsTimer;
|
||||
VCMProcessTimer _retransmissionTimer;
|
||||
VCMProcessTimer _keyRequestTimer;
|
||||
};
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
|
||||
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
#include "system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::AllOf;
|
||||
using ::testing::ElementsAre;
|
||||
using ::testing::ElementsAreArray;
|
||||
using ::testing::Field;
|
||||
using ::testing::NiceMock;
|
||||
using ::testing::Pointee;
|
||||
using ::testing::Return;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class TestVideoCodingModule : public ::testing::Test {
|
||||
protected:
|
||||
static const int kDefaultWidth = 1280;
|
||||
static const int kDefaultHeight = 720;
|
||||
static const int kNumberOfStreams = 3;
|
||||
static const int kNumberOfLayers = 3;
|
||||
static const int kUnusedPayloadType = 10;
|
||||
|
||||
virtual void SetUp() {
|
||||
vcm_ = VideoCodingModule::Create(0);
|
||||
EXPECT_EQ(0, vcm_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType,
|
||||
false));
|
||||
memset(&settings_, 0, sizeof(settings_));
|
||||
EXPECT_EQ(0, vcm_->Codec(kVideoCodecVP8, &settings_));
|
||||
settings_.numberOfSimulcastStreams = kNumberOfStreams;
|
||||
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, 100,
|
||||
&settings_.simulcastStream[0]);
|
||||
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, 500,
|
||||
&settings_.simulcastStream[1]);
|
||||
ConfigureStream(kDefaultWidth, kDefaultHeight, 1200,
|
||||
&settings_.simulcastStream[2]);
|
||||
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
|
||||
EXPECT_EQ(0, vcm_->RegisterSendCodec(&settings_, 1, 1200));
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
VideoCodingModule::Destroy(vcm_);
|
||||
input_frame_.Free();
|
||||
}
|
||||
|
||||
void ExpectIntraRequest(int stream) {
|
||||
if (stream == -1) {
|
||||
// No intra request expected.
|
||||
EXPECT_CALL(encoder_, Encode(
|
||||
_, _, Pointee(ElementsAre(kDeltaFrame, kDeltaFrame, kDeltaFrame))))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
return;
|
||||
}
|
||||
assert(stream >= 0);
|
||||
assert(stream < kNumberOfStreams);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfStreams, kDeltaFrame);
|
||||
frame_types[stream] = kKeyFrame;
|
||||
EXPECT_CALL(encoder_, Encode(
|
||||
_, _, Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
|
||||
static void ConfigureStream(int width, int height, int max_bitrate,
|
||||
SimulcastStream* stream) {
|
||||
assert(stream);
|
||||
stream->width = width;
|
||||
stream->height = height;
|
||||
stream->maxBitrate = max_bitrate;
|
||||
stream->numberOfTemporalLayers = kNumberOfLayers;
|
||||
stream->qpMax = 45;
|
||||
}
|
||||
|
||||
VideoCodingModule* vcm_;
|
||||
NiceMock<MockVideoEncoder> encoder_;
|
||||
VideoFrame input_frame_;
|
||||
VideoCodec settings_;
|
||||
};
|
||||
|
||||
TEST_F(TestVideoCodingModule, TestIntraRequests) {
|
||||
EXPECT_EQ(0, vcm_->IntraFrameRequest(0));
|
||||
ExpectIntraRequest(0);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
ExpectIntraRequest(-1);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
|
||||
EXPECT_EQ(0, vcm_->IntraFrameRequest(1));
|
||||
ExpectIntraRequest(1);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
ExpectIntraRequest(-1);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
|
||||
EXPECT_EQ(0, vcm_->IntraFrameRequest(2));
|
||||
ExpectIntraRequest(2);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
ExpectIntraRequest(-1);
|
||||
EXPECT_EQ(0, vcm_->AddVideoFrame(input_frame_, NULL, NULL));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,396 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "gmock/gmock.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
#include "modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
|
||||
#include "modules/video_coding/main/source/mock/fake_tick_time.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
using ::testing::Return;
|
||||
using ::testing::_;
|
||||
using ::testing::ElementsAre;
|
||||
using ::testing::AllOf;
|
||||
using ::testing::Args;
|
||||
using ::testing::Field;
|
||||
using ::testing::Pointee;
|
||||
using ::testing::NiceMock;
|
||||
using ::testing::Sequence;
|
||||
|
||||
class VCMRobustnessTest : public ::testing::Test {
|
||||
protected:
|
||||
static const size_t kPayloadLen = 10;
|
||||
|
||||
virtual void SetUp() {
|
||||
clock_ = new FakeTickTime(0);
|
||||
ASSERT_TRUE(clock_ != NULL);
|
||||
vcm_ = VideoCodingModule::Create(0, clock_);
|
||||
ASSERT_TRUE(vcm_ != NULL);
|
||||
ASSERT_EQ(0, vcm_->InitializeReceiver());
|
||||
ASSERT_EQ(0, vcm_->RegisterFrameTypeCallback(&frame_type_callback_));
|
||||
ASSERT_EQ(0, vcm_->RegisterPacketRequestCallback(&request_callback_));
|
||||
ASSERT_EQ(VCM_OK, vcm_->Codec(kVideoCodecVP8, &video_codec_));
|
||||
ASSERT_EQ(VCM_OK, vcm_->RegisterReceiveCodec(&video_codec_, 1));
|
||||
ASSERT_EQ(VCM_OK, vcm_->RegisterExternalDecoder(&decoder_,
|
||||
video_codec_.plType,
|
||||
true));
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
VideoCodingModule::Destroy(vcm_);
|
||||
delete clock_;
|
||||
}
|
||||
|
||||
void InsertPacket(uint32_t timestamp,
|
||||
uint16_t seq_no,
|
||||
bool first,
|
||||
bool marker_bit,
|
||||
FrameType frame_type) {
|
||||
const uint8_t payload[kPayloadLen] = {0};
|
||||
WebRtcRTPHeader rtp_info;
|
||||
memset(&rtp_info, 0, sizeof(rtp_info));
|
||||
rtp_info.frameType = frame_type;
|
||||
rtp_info.header.timestamp = timestamp;
|
||||
rtp_info.header.sequenceNumber = seq_no;
|
||||
rtp_info.header.markerBit = marker_bit;
|
||||
rtp_info.header.payloadType = video_codec_.plType;
|
||||
rtp_info.type.Video.codec = kRTPVideoVP8;
|
||||
rtp_info.type.Video.codecHeader.VP8.InitRTPVideoHeaderVP8();
|
||||
rtp_info.type.Video.isFirstPacket = first;
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->IncomingPacket(payload, kPayloadLen, rtp_info));
|
||||
}
|
||||
|
||||
VideoCodingModule* vcm_;
|
||||
VideoCodec video_codec_;
|
||||
MockVCMFrameTypeCallback frame_type_callback_;
|
||||
MockPacketRequestCallback request_callback_;
|
||||
NiceMock<MockVideoDecoder> decoder_;
|
||||
NiceMock<MockVideoDecoder> decoderCopy_;
|
||||
FakeTickTime* clock_;
|
||||
};
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestHardNack) {
|
||||
Sequence s;
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, 2))
|
||||
.With(Args<0, 1>(ElementsAre(6, 7)))
|
||||
.Times(1);
|
||||
for (int ts = 0; ts <= 6000; ts += 3000) {
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
|
||||
Field(&EncodedImage::_length,
|
||||
kPayloadLen * 3),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s);
|
||||
}
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kHardNack,
|
||||
VideoCodingModule::kNoDecodeErrors));
|
||||
|
||||
InsertPacket(0, 0, true, false, kVideoFrameKey);
|
||||
InsertPacket(0, 1, false, false, kVideoFrameKey);
|
||||
InsertPacket(0, 2, false, true, kVideoFrameKey);
|
||||
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->Decode(0));
|
||||
ASSERT_EQ(VCM_OK, vcm_->Decode(0));
|
||||
ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
|
||||
clock_->IncrementDebugClock(10);
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
|
||||
ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
|
||||
InsertPacket(6000, 8, false, true, kVideoFrameDelta);
|
||||
clock_->IncrementDebugClock(10);
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
|
||||
ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
|
||||
InsertPacket(6000, 6, true, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 7, false, false, kVideoFrameDelta);
|
||||
clock_->IncrementDebugClock(10);
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->Decode(0));
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, _))
|
||||
.Times(0);
|
||||
EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
|
||||
.Times(1);
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kHardNack,
|
||||
VideoCodingModule::kNoDecodeErrors));
|
||||
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
|
||||
clock_->IncrementDebugClock(10);
|
||||
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestDualDecoder) {
|
||||
Sequence s1, s2;
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, 1))
|
||||
.With(Args<0, 1>(ElementsAre(4)))
|
||||
.Times(1);
|
||||
|
||||
EXPECT_CALL(decoder_, Copy())
|
||||
.Times(1)
|
||||
.WillOnce(Return(&decoderCopy_));
|
||||
EXPECT_CALL(decoderCopy_, Copy())
|
||||
.Times(1)
|
||||
.WillOnce(Return(&decoder_));
|
||||
|
||||
// Decode operations
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
false)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
|
||||
EXPECT_CALL(decoderCopy_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s2);
|
||||
EXPECT_CALL(decoderCopy_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s2);
|
||||
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kDualDecoder,
|
||||
VideoCodingModule::kAllowDecodeErrors));
|
||||
|
||||
InsertPacket(0, 0, true, false, kVideoFrameKey);
|
||||
InsertPacket(0, 1, false, false, kVideoFrameKey);
|
||||
InsertPacket(0, 2, false, true, kVideoFrameKey);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
// Packet 4 missing
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(6000, 6, true, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 7, false, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 8, false, true, kVideoFrameDelta);
|
||||
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 3000 incomplete.
|
||||
// Spawn a decoder copy.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
|
||||
clock_->IncrementDebugClock(10);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Generate NACK list.
|
||||
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
|
||||
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
|
||||
EXPECT_EQ(1, vcm_->DecodeDualFrame(0)); // Dual decode of timestamp 3000.
|
||||
EXPECT_EQ(1, vcm_->DecodeDualFrame(0)); // Dual decode of timestamp 6000.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // No more frames.
|
||||
|
||||
InsertPacket(9000, 9, true, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 10, false, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 11, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
|
||||
EXPECT_CALL(decoder_, InitDecode(_, _)).Times(1);
|
||||
EXPECT_CALL(decoder_, Release()).Times(1);
|
||||
Sequence s1;
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, 1))
|
||||
.With(Args<0, 1>(ElementsAre(4)))
|
||||
.Times(0);
|
||||
|
||||
EXPECT_CALL(decoder_, Copy())
|
||||
.Times(0);
|
||||
EXPECT_CALL(decoderCopy_, Copy())
|
||||
.Times(0);
|
||||
|
||||
// Decode operations
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
false)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kNone,
|
||||
VideoCodingModule::kAllowDecodeErrors));
|
||||
|
||||
InsertPacket(0, 0, true, false, kVideoFrameKey);
|
||||
InsertPacket(0, 1, false, false, kVideoFrameKey);
|
||||
InsertPacket(0, 2, false, true, kVideoFrameKey);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
// Packet 4 missing
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(6000, 6, true, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 7, false, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 8, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 3000 incomplete.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(10);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(23);
|
||||
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
|
||||
|
||||
InsertPacket(9000, 9, true, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 10, false, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 11, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestModeNoneWithoutErrors) {
|
||||
Sequence s1;
|
||||
EXPECT_CALL(decoder_, InitDecode(_, _)).Times(1);
|
||||
EXPECT_CALL(decoder_, Release()).Times(1);
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, 1))
|
||||
.With(Args<0, 1>(ElementsAre(4)))
|
||||
.Times(0);
|
||||
|
||||
EXPECT_CALL(decoder_, Copy())
|
||||
.Times(0);
|
||||
EXPECT_CALL(decoderCopy_, Copy())
|
||||
.Times(0);
|
||||
|
||||
// Decode operations
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
false)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
|
||||
.Times(1);
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kNone,
|
||||
VideoCodingModule::kNoDecodeErrors));
|
||||
|
||||
InsertPacket(0, 0, true, false, kVideoFrameKey);
|
||||
InsertPacket(0, 1, false, false, kVideoFrameKey);
|
||||
InsertPacket(0, 2, false, true, kVideoFrameKey);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
// Packet 4 missing
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(33);
|
||||
InsertPacket(6000, 6, true, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 7, false, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 8, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 3000 incomplete.
|
||||
// Schedule key frame request.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(10);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
|
||||
|
||||
clock_->IncrementDebugClock(500); // Wait for the key request timer to set.
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect key frame request.
|
||||
}
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,97 @@
|
||||
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
{
|
||||
'targets': [{
|
||||
'target_name': 'video_coding_test',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
'<(webrtc_root)/test/test.gyp:test_support',
|
||||
'<(webrtc_root)/test/metrics.gyp:metrics',
|
||||
'webrtc_video_coding',
|
||||
'rtp_rtcp',
|
||||
'webrtc_utility',
|
||||
'video_processing',
|
||||
'<(webrtc_root)/common_video/common_video.gyp:common_video',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../../../interface',
|
||||
'../../codecs/vp8/include',
|
||||
'../../../../system_wrappers/interface',
|
||||
'../../../../common_video/interface',
|
||||
'../source',
|
||||
],
|
||||
'sources': [
|
||||
# headers
|
||||
'../test/codec_database_test.h',
|
||||
'../test/generic_codec_test.h',
|
||||
'../test/jitter_estimate_test.h',
|
||||
'../test/media_opt_test.h',
|
||||
'../test/mt_test_common.h',
|
||||
'../test/normal_test.h',
|
||||
'../test/quality_modes_test.h',
|
||||
'../test/receiver_tests.h',
|
||||
'../test/release_test.h',
|
||||
'../test/rtp_player.h',
|
||||
'../test/test_callbacks.h',
|
||||
'../test/test_util.h',
|
||||
'../test/video_source.h',
|
||||
|
||||
# sources
|
||||
'../test/codec_database_test.cc',
|
||||
'../test/decode_from_storage_test.cc',
|
||||
'../test/generic_codec_test.cc',
|
||||
'../test/jitter_buffer_test.cc',
|
||||
'../test/media_opt_test.cc',
|
||||
'../test/mt_test_common.cc',
|
||||
'../test/mt_rx_tx_test.cc',
|
||||
'../test/normal_test.cc',
|
||||
'../test/quality_modes_test.cc',
|
||||
'../test/receiver_timing_tests.cc',
|
||||
'../test/rtp_player.cc',
|
||||
'../test/test_callbacks.cc',
|
||||
'../test/test_util.cc',
|
||||
'../test/tester_main.cc',
|
||||
'../test/video_rtp_play_mt.cc',
|
||||
'../test/video_rtp_play.cc',
|
||||
'../test/video_source.cc',
|
||||
], # source
|
||||
},
|
||||
{
|
||||
'target_name': 'video_coding_unittests',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'webrtc_video_coding',
|
||||
'<(webrtc_root)/test/test.gyp:test_support_main',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
'<(DEPTH)/testing/gmock.gyp:gmock',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../../../interface',
|
||||
'../../codecs/interface',
|
||||
],
|
||||
'sources': [
|
||||
'../interface/mock/mock_vcm_callbacks.h',
|
||||
'decoding_state_unittest.cc',
|
||||
'jitter_buffer_unittest.cc',
|
||||
'session_info_unittest.cc',
|
||||
'video_coding_robustness_unittest.cc',
|
||||
'video_coding_impl_unittest.cc',
|
||||
'qm_select_unittest.cc',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
||||
Reference in New Issue
Block a user