git-svn-id: http://webrtc.googlecode.com/svn/trunk@4 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
4
modules/video_coding/codecs/OWNERS
Normal file
4
modules/video_coding/codecs/OWNERS
Normal file
@ -0,0 +1,4 @@
|
||||
holmer@google.com
|
||||
mikhal@google.com
|
||||
marpan@google.com
|
||||
hlundin@google.com
|
||||
163
modules/video_coding/codecs/i420/main/interface/i420.h
Normal file
163
modules/video_coding/codecs/i420/main/interface/i420.h
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_H_
|
||||
|
||||
#include "video_codec_interface.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class I420Encoder : public VideoEncoder
|
||||
{
|
||||
public:
|
||||
|
||||
I420Encoder();
|
||||
|
||||
virtual ~I420Encoder();
|
||||
|
||||
// Initialize the encoder with the information from the VideoCodec
|
||||
//
|
||||
// Input:
|
||||
// - codecSettings : Codec settings
|
||||
// - numberOfCores : Number of cores available for the encoder
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 InitEncode(const VideoCodec* codecSettings, WebRtc_Word32 /*numberOfCores*/, WebRtc_UWord32 /*maxPayloadSize*/);
|
||||
|
||||
// "Encode" an I420 image (as a part of a video stream). The encoded image
|
||||
// will be returned to the user via the encode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Image to be encoded
|
||||
// - codecSpecificInfo : Pointer to codec specific data
|
||||
// - frameType : Frame type to be sent (Key /Delta) .
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 Encode(const RawImage& inputImage, const void* /*codecSpecificInfo*/, VideoFrameType /*frameType*/);
|
||||
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles encoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterEncodeCompleteCallback(EncodedImageCallback* callback);
|
||||
|
||||
// Free encoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Release();
|
||||
|
||||
// Reset encoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 Reset();
|
||||
|
||||
virtual WebRtc_Word32 SetRates(WebRtc_UWord32 /*newBitRate*/, WebRtc_UWord32 /*frameRate*/) {return WEBRTC_VIDEO_CODEC_OK;}
|
||||
|
||||
virtual WebRtc_Word32 SetPacketLoss(WebRtc_UWord32 /*packetLoss*/){return WEBRTC_VIDEO_CODEC_OK;};
|
||||
|
||||
virtual WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* /*buffer*/, WebRtc_Word32 /*size*/){return WEBRTC_VIDEO_CODEC_OK;};
|
||||
|
||||
// Get version number for the codec.
|
||||
//
|
||||
// Input:
|
||||
// - version : Pointer to allocated char buffer.
|
||||
// - length : Length of provided char buffer.
|
||||
//
|
||||
// Output:
|
||||
// - version : Version number string written to char buffer.
|
||||
//
|
||||
// Return value : >0 - Length of written string.
|
||||
// <0 - Error
|
||||
static WebRtc_Word32 VersionStatic(WebRtc_Word8 *version, WebRtc_Word32 length);
|
||||
virtual WebRtc_Word32 Version(WebRtc_Word8 *version, WebRtc_Word32 length) const;
|
||||
|
||||
private:
|
||||
bool _inited;
|
||||
EncodedImage _encodedImage;
|
||||
EncodedImageCallback* _encodedCompleteCallback;
|
||||
|
||||
}; // end of WebRtcI420DEncoder class
|
||||
|
||||
class I420Decoder : public VideoDecoder
|
||||
{
|
||||
public:
|
||||
|
||||
I420Decoder();
|
||||
|
||||
virtual ~I420Decoder();
|
||||
|
||||
// Initialize the decoder.
|
||||
// The user must notify the codec of width and height values.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Errors
|
||||
virtual WebRtc_Word32 InitDecode(const VideoCodec* codecSettings, WebRtc_Word32 /*numberOfCores*/);
|
||||
|
||||
virtual WebRtc_Word32 SetCodecConfigParameters(const WebRtc_UWord8* /*buffer*/, WebRtc_Word32 /*size*/){return WEBRTC_VIDEO_CODEC_OK;};
|
||||
|
||||
// Decode encoded image (as a part of a video stream). The decoded image
|
||||
// will be returned to the user through the decode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Encoded image to be decoded
|
||||
// - missingFrames : True if one or more frames have been lost
|
||||
// since the previous decode call.
|
||||
// - codecSpecificInfo : pointer to specific codec data
|
||||
// - renderTimeMs : Render time in Ms
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 Decode(const EncodedImage& inputImage, bool missingFrames,
|
||||
const void* /*codecSpecificInfo */, WebRtc_Word64 /*renderTimeMs*/);
|
||||
|
||||
// Register a decode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles decoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterDecodeCompleteCallback(DecodedImageCallback* callback);
|
||||
|
||||
// Free decoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 Release();
|
||||
|
||||
// Reset decoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Error
|
||||
virtual WebRtc_Word32 Reset();
|
||||
|
||||
private:
|
||||
|
||||
RawImage _decodedImage;
|
||||
WebRtc_Word32 _width;
|
||||
WebRtc_Word32 _height;
|
||||
bool _inited;
|
||||
DecodedImageCallback* _decodeCompleteCallback;
|
||||
|
||||
|
||||
}; // end of WebRtcI420Decoder class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_H_
|
||||
279
modules/video_coding/codecs/i420/main/source/i420.cc
Normal file
279
modules/video_coding/codecs/i420/main/source/i420.cc
Normal file
@ -0,0 +1,279 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "i420.h"
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
I420Encoder::I420Encoder():
|
||||
_inited(false),
|
||||
_encodedImage(),
|
||||
_encodedCompleteCallback(NULL)
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
I420Encoder::~I420Encoder()
|
||||
{
|
||||
_inited = false;
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::VersionStatic(WebRtc_Word8* version, WebRtc_Word32 length)
|
||||
{
|
||||
const WebRtc_Word8* str= "I420 version 1.1.0\n";
|
||||
WebRtc_Word32 verLen = (WebRtc_Word32)strlen(str);
|
||||
if(verLen > length)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
strncpy(version, str,length);
|
||||
return verLen;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::Version(WebRtc_Word8 *version, WebRtc_Word32 length) const
|
||||
{
|
||||
return VersionStatic(version, length);
|
||||
}
|
||||
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::Release()
|
||||
{
|
||||
// should allocate an encoded frame and then release it here, for that we actaully need an init flag
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
}
|
||||
_inited = false;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::Reset()
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::InitEncode(const VideoCodec* codecSettings,
|
||||
WebRtc_Word32 /*numberOfCores*/,
|
||||
WebRtc_UWord32 /*maxPayloadSize */)
|
||||
{
|
||||
if (codecSettings == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (codecSettings->width < 1 || codecSettings->height < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
// allocating encoded memory
|
||||
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
_encodedImage._size = 0;
|
||||
}
|
||||
const WebRtc_UWord32 newSize = (3 * codecSettings->width *
|
||||
codecSettings->height) >> 1;
|
||||
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[newSize];
|
||||
if (newBuffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
_encodedImage._size = newSize;
|
||||
_encodedImage._buffer = newBuffer;
|
||||
|
||||
// if no memory allocation, no point to init
|
||||
_inited = true;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::Encode(const RawImage& inputImage, const void* /*codecSpecificInfo*/, VideoFrameType /*frameTypes*/)
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (_encodedCompleteCallback == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
_encodedImage._frameType = kKeyFrame; // no coding
|
||||
_encodedImage._timeStamp = inputImage._timeStamp;
|
||||
_encodedImage._encodedHeight = inputImage._height;
|
||||
_encodedImage._encodedWidth = inputImage._width;
|
||||
if (inputImage._length > _encodedImage._size)
|
||||
{
|
||||
|
||||
// allocating encoded memory
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
_encodedImage._size = 0;
|
||||
}
|
||||
const WebRtc_UWord32 newSize = (3 * _encodedImage._encodedWidth * _encodedImage._encodedHeight) >> 1;
|
||||
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[newSize];
|
||||
if (newBuffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
_encodedImage._size = newSize;
|
||||
_encodedImage._buffer = newBuffer;
|
||||
}
|
||||
memcpy(_encodedImage._buffer, inputImage._buffer, inputImage._length);
|
||||
_encodedImage._length = inputImage._length;
|
||||
_encodedCompleteCallback->Encoded(_encodedImage);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
I420Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback)
|
||||
{
|
||||
_encodedCompleteCallback = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
I420Decoder::I420Decoder():
|
||||
_decodedImage(),
|
||||
_width(0),
|
||||
_height(0),
|
||||
_inited(false),
|
||||
_decodeCompleteCallback(NULL)
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
I420Decoder::~I420Decoder()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Decoder::Reset()
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
I420Decoder::InitDecode(const VideoCodec* codecSettings, WebRtc_Word32 /*numberOfCores */)
|
||||
{
|
||||
if (codecSettings == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
else if (codecSettings->width < 1 || codecSettings->height < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
_width = codecSettings->width;
|
||||
_height = codecSettings->height;
|
||||
_inited = true;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/, const void* /*codecSpecificInfo*/, WebRtc_Word64 /*renderTimeMs*/)
|
||||
{
|
||||
if (inputImage._buffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (_decodeCompleteCallback == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (inputImage._length <= 0)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
//Allocate memory for decoded image
|
||||
|
||||
if (_decodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _decodedImage._buffer;
|
||||
_decodedImage._buffer = NULL;
|
||||
_decodedImage._size = 0;
|
||||
}
|
||||
if (_decodedImage._buffer == NULL)
|
||||
{
|
||||
const WebRtc_UWord32 newSize = (3*_width*_height) >> 1;
|
||||
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[newSize];
|
||||
if (newBuffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
_decodedImage._size = newSize;
|
||||
_decodedImage._buffer = newBuffer;
|
||||
}
|
||||
|
||||
// Set decoded image parameters
|
||||
_decodedImage._height = _height;
|
||||
_decodedImage._width = _width;
|
||||
_decodedImage._timeStamp = inputImage._timeStamp;
|
||||
memcpy(_decodedImage._buffer, inputImage._buffer, inputImage._length);
|
||||
_decodedImage._length = inputImage._length;
|
||||
//_decodedImage._buffer = inputImage._buffer;
|
||||
|
||||
_decodeCompleteCallback->Decoded(_decodedImage);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Decoder::RegisterDecodeCompleteCallback(DecodedImageCallback* callback)
|
||||
{
|
||||
_decodeCompleteCallback = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
I420Decoder::Release()
|
||||
{
|
||||
if (_decodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _decodedImage._buffer;
|
||||
_decodedImage._buffer = NULL;
|
||||
}
|
||||
_inited = false;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
}
|
||||
37
modules/video_coding/codecs/i420/main/source/i420.gyp
Normal file
37
modules/video_coding/codecs/i420/main/source/i420.gyp
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../../../../../../common_settings.gypi', # Common settings
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'webrtc_i420',
|
||||
'type': '<(library)',
|
||||
'dependencies': [
|
||||
'../../../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
'../interface/i420.h',
|
||||
'i420.cc',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
||||
238
modules/video_coding/codecs/interface/video_codec_interface.h
Normal file
238
modules/video_coding/codecs/interface/video_codec_interface.h
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_CODEC_INTERFACE_H
|
||||
#define VIDEO_CODEC_INTERFACE_H
|
||||
|
||||
#include "common_types.h"
|
||||
#include "typedefs.h"
|
||||
#include "video_image.h"
|
||||
#include "video_error_codes.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class RTPFragmentationHeader; // forward declaration
|
||||
|
||||
struct CodecSpecificInfoVP8
|
||||
{
|
||||
bool hasReceivedSLI;
|
||||
WebRtc_UWord8 pictureIdSLI;
|
||||
bool hasReceivedRPSI;
|
||||
WebRtc_UWord64 pictureIdRPSI;
|
||||
};
|
||||
|
||||
union CodecSpecificInfoUnion
|
||||
{
|
||||
CodecSpecificInfoVP8 VP8;
|
||||
};
|
||||
|
||||
struct CodecSpecificInfo
|
||||
{
|
||||
VideoCodecType codecType;
|
||||
CodecSpecificInfoUnion codecSpecific;
|
||||
};
|
||||
|
||||
class EncodedImageCallback
|
||||
{
|
||||
public:
|
||||
virtual ~EncodedImageCallback() {};
|
||||
|
||||
// Callback function which is called when an image has been encoded.
|
||||
//
|
||||
// Input:
|
||||
// - encodedImage : The encoded image
|
||||
//
|
||||
// Return value : > 0, signals to the caller that one or more future frames
|
||||
// should be dropped to keep bit rate or frame rate.
|
||||
// = 0, if OK.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 Encoded(EncodedImage& encodedImage,
|
||||
const void* codecSpecificInfo = NULL,
|
||||
const RTPFragmentationHeader* fragmentation = NULL) = 0;
|
||||
};
|
||||
|
||||
class VideoEncoder
|
||||
{
|
||||
public:
|
||||
virtual ~VideoEncoder() {};
|
||||
|
||||
// Get the encoder version.
|
||||
//
|
||||
// Input:
|
||||
// - length : Length of the version buffer.
|
||||
//
|
||||
// Output:
|
||||
// - version : Buffer where the version string will be written.
|
||||
//
|
||||
// Return value : Number of bytes written to the version buffer.
|
||||
// < 0 on failure.
|
||||
virtual WebRtc_Word32 Version(WebRtc_Word8 *version, WebRtc_Word32 length) const = 0;
|
||||
|
||||
// Initialize the encoder with the information from the VideoCodec.
|
||||
//
|
||||
// Input:
|
||||
// - codecSettings : Codec settings
|
||||
// - numberOfCores : Number of cores available for the encoder
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 InitEncode(const VideoCodec* codecSettings, WebRtc_Word32 numberOfCores, WebRtc_UWord32 maxPayloadSize) = 0;
|
||||
|
||||
// Encode an I420 image (as a part of a video stream). The encoded image
|
||||
// will be returned to the user through the encode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Image to be encoded
|
||||
// - codecSpecificInfo : Pointer to codec specific data
|
||||
// - frameType : The frame type to encode
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Encode(const RawImage& inputImage,
|
||||
const void* codecSpecificInfo = NULL,
|
||||
VideoFrameType frameType = kDeltaFrame) = 0;
|
||||
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles encoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterEncodeCompleteCallback(EncodedImageCallback* callback) = 0;
|
||||
|
||||
// Free encoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Release() = 0;
|
||||
|
||||
// Reset encoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Reset() = 0;
|
||||
|
||||
// Inform the encoder about the packet loss and round trip time on the network
|
||||
// used to decide the best pattern and signaling.
|
||||
//
|
||||
// - packetLoss : Fraction lost
|
||||
// (loss rate in percent = 100 * packetLoss / 255)
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 SetPacketLoss(WebRtc_UWord32 packetLoss) = 0;
|
||||
|
||||
// Inform the encoder about the new target bit rate.
|
||||
//
|
||||
// - newBitRate : New target bit rate
|
||||
// - frameRate : The target frame rate
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate) = 0;
|
||||
|
||||
// Use this function to enable or disable periodic key frames. Can be useful for codecs
|
||||
// which have other ways of stopping error propagation.
|
||||
//
|
||||
// - enable : Enable or disable periodic key frames
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 SetPeriodicKeyFrames(bool enable) { return WEBRTC_VIDEO_CODEC_ERROR; }
|
||||
|
||||
// Codec configuration data to send out-of-band, i.e. in SIP call setup
|
||||
//
|
||||
// - buffer : Buffer pointer to where the configuration data
|
||||
// should be stored
|
||||
// - size : The size of the buffer in bytes
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* /*buffer*/, WebRtc_Word32 /*size*/) { return WEBRTC_VIDEO_CODEC_ERROR; }
|
||||
};
|
||||
|
||||
class DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
virtual ~DecodedImageCallback() {};
|
||||
|
||||
// Callback function which is called when an image has been decoded.
|
||||
//
|
||||
// Input:
|
||||
// - decodedImage : The decoded image
|
||||
//
|
||||
// Return value : 0 if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Decoded(RawImage& decodedImage) = 0;
|
||||
|
||||
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId) {return -1;}
|
||||
|
||||
virtual WebRtc_Word32 ReceivedDecodedFrame(const WebRtc_UWord64 pictureId) {return -1;}
|
||||
};
|
||||
|
||||
class VideoDecoder
|
||||
{
|
||||
public:
|
||||
virtual ~VideoDecoder() {};
|
||||
|
||||
// Initialize the decoder with the information from the VideoCodec.
|
||||
//
|
||||
// Input:
|
||||
// - inst : Codec settings
|
||||
// - numberOfCores : Number of cores available for the decoder
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 InitDecode(const VideoCodec* codecSettings, WebRtc_Word32 numberOfCores) = 0;
|
||||
|
||||
// Decode encoded image (as a part of a video stream). The decoded image
|
||||
// will be returned to the user through the decode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Encoded image to be decoded
|
||||
// - missingFrames : True if one or more frames have been lost
|
||||
// since the previous decode call.
|
||||
// - codecSpecificInfo : Pointer to codec specific data
|
||||
// - renderTimeMs : System time to render in milliseconds. Only
|
||||
// used by decoders with internal rendering.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Decode(const EncodedImage& inputImage, bool missingFrames, const void* codecSpecificInfo = NULL, WebRtc_Word64 renderTimeMs = -1) = 0;
|
||||
|
||||
// Register an decode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles decoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterDecodeCompleteCallback(DecodedImageCallback* callback) = 0;
|
||||
|
||||
// Free decoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Release() = 0;
|
||||
|
||||
// Reset decoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Reset() = 0;
|
||||
|
||||
// Codec configuration data sent out-of-band, i.e. in SIP call setup
|
||||
//
|
||||
// Input/Output:
|
||||
// - buffer : Buffer pointer to the configuration data
|
||||
// - size : The size of the configuration data in
|
||||
// bytes
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 SetCodecConfigParameters(const WebRtc_UWord8* /*buffer*/, WebRtc_Word32 /*size*/) { return WEBRTC_VIDEO_CODEC_ERROR; }
|
||||
|
||||
// Create a copy of the codec and its internal state.
|
||||
//
|
||||
// Return value : A copy of the instance if OK, NULL otherwise.
|
||||
virtual VideoDecoder* Copy() { return NULL; }
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_CODEC_INTERFACE_H
|
||||
29
modules/video_coding/codecs/interface/video_error_codes.h
Normal file
29
modules/video_coding/codecs/interface/video_error_codes.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ERROR_CODES_H
|
||||
#define VIDEO_ERROR_CODES_H
|
||||
|
||||
// NOTE: in sync with video_coding_module_defines.h
|
||||
|
||||
// Define return values
|
||||
|
||||
#define WEBRTC_VIDEO_CODEC_REQUEST_SLI 2
|
||||
#define WEBRTC_VIDEO_CODEC_OK 0
|
||||
#define WEBRTC_VIDEO_CODEC_ERROR -1
|
||||
#define WEBRTC_VIDEO_CODEC_LEVEL_EXCEEDED -2
|
||||
#define WEBRTC_VIDEO_CODEC_MEMORY -3
|
||||
#define WEBRTC_VIDEO_CODEC_ERR_PARAMETER -4
|
||||
#define WEBRTC_VIDEO_CODEC_ERR_SIZE -5
|
||||
#define WEBRTC_VIDEO_CODEC_TIMEOUT -6
|
||||
#define WEBRTC_VIDEO_CODEC_UNINITIALIZED -7
|
||||
#define WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI -12
|
||||
|
||||
#endif // VIDEO_ERROR_CODES_H
|
||||
75
modules/video_coding/codecs/interface/video_image.h
Normal file
75
modules/video_coding/codecs/interface/video_image.h
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_IMAGE_H
|
||||
#define VIDEO_IMAGE_H
|
||||
|
||||
#include "typedefs.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum VideoFrameType
|
||||
{
|
||||
kKeyFrame = 0,
|
||||
kDeltaFrame = 1,
|
||||
kGoldenFrame = 2,
|
||||
kAltRefFrame = 3,
|
||||
kSkipFrame = 4
|
||||
};
|
||||
|
||||
class RawImage
|
||||
{
|
||||
public:
|
||||
RawImage() : _width(0), _height(0), _timeStamp(0), _buffer(NULL),
|
||||
_length(0), _size(0) {}
|
||||
|
||||
RawImage(WebRtc_UWord8* buffer, WebRtc_UWord32 length,
|
||||
WebRtc_UWord32 size) :
|
||||
_width(0), _height(0), _timeStamp(0),
|
||||
_buffer(buffer), _length(length), _size(size) {}
|
||||
|
||||
WebRtc_UWord32 _width;
|
||||
WebRtc_UWord32 _height;
|
||||
WebRtc_UWord32 _timeStamp;
|
||||
WebRtc_UWord8* _buffer;
|
||||
WebRtc_UWord32 _length;
|
||||
WebRtc_UWord32 _size;
|
||||
};
|
||||
|
||||
class EncodedImage
|
||||
{
|
||||
public:
|
||||
EncodedImage() :
|
||||
_encodedWidth(0), _encodedHeight(0), _timeStamp(0),
|
||||
_frameType(kDeltaFrame), _buffer(NULL), _length(0), _size(0),
|
||||
_completeFrame(false) {}
|
||||
|
||||
EncodedImage(WebRtc_UWord8* buffer,
|
||||
WebRtc_UWord32 length,
|
||||
WebRtc_UWord32 size) :
|
||||
_encodedWidth(0), _encodedHeight(0), _timeStamp(0),
|
||||
_frameType(kDeltaFrame), _buffer(buffer), _length(length),
|
||||
_size(size), _completeFrame(false) {}
|
||||
|
||||
WebRtc_UWord32 _encodedWidth;
|
||||
WebRtc_UWord32 _encodedHeight;
|
||||
WebRtc_UWord32 _timeStamp;
|
||||
VideoFrameType _frameType;
|
||||
WebRtc_UWord8* _buffer;
|
||||
WebRtc_UWord32 _length;
|
||||
WebRtc_UWord32 _size;
|
||||
bool _completeFrame;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_IMAGE_H
|
||||
304
modules/video_coding/codecs/test_framework/benchmark.cc
Normal file
304
modules/video_coding/codecs/test_framework/benchmark.cc
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "benchmark.h"
|
||||
#include "video_source.h"
|
||||
#include "vplib.h"
|
||||
#include <vector>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <cassert>
|
||||
#if defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
#include "event_wrapper.h"
|
||||
#include "video_codec_interface.h"
|
||||
|
||||
#define SSIM_CALC 0 // by default, don't compute SSIM
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
Benchmark::Benchmark()
|
||||
:
|
||||
_resultsFileName("../../../../testFiles/benchmark.txt"),
|
||||
_codecName("Default"),
|
||||
NormalAsyncTest("Benchmark", "Codec benchmark over a range of test cases", 6)
|
||||
{
|
||||
}
|
||||
|
||||
Benchmark::Benchmark(std::string name, std::string description)
|
||||
:
|
||||
_resultsFileName("../../../../testFiles/benchmark.txt"),
|
||||
_codecName("Default"),
|
||||
NormalAsyncTest(name, description, 6)
|
||||
{
|
||||
}
|
||||
|
||||
Benchmark::Benchmark(std::string name, std::string description, std::string resultsFileName, std::string codecName)
|
||||
:
|
||||
_resultsFileName(resultsFileName),
|
||||
_codecName(codecName),
|
||||
NormalAsyncTest(name, description, 6)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
Benchmark::Perform()
|
||||
{
|
||||
std::vector<const VideoSource*> sources;
|
||||
std::vector<const VideoSource*>::iterator it;
|
||||
|
||||
// Configuration --------------------------
|
||||
sources.push_back(new const VideoSource("test/testFiles/foreman_cif.yuv", kCIF));
|
||||
sources.push_back(new const VideoSource("test/testFiles/akiyo_cif.yuv", kCIF));
|
||||
|
||||
const VideoSize size[] = {kQCIF, kCIF};
|
||||
const int frameRate[] = {10, 15, 30};
|
||||
// Specifies the framerates for which to perform a speed test.
|
||||
const bool speedTestMask[] = {false, false, false};
|
||||
const int bitRate[] = {50, 100, 200, 300, 400, 500, 600, 1000};
|
||||
// Determines the number of iterations to perform to arrive at the speed result.
|
||||
enum { kSpeedTestIterations = 10 };
|
||||
// ----------------------------------------
|
||||
|
||||
const int nFrameRates = sizeof(frameRate)/sizeof(*frameRate);
|
||||
assert(sizeof(speedTestMask)/sizeof(*speedTestMask) == nFrameRates);
|
||||
const int nBitrates = sizeof(bitRate)/sizeof(*bitRate);
|
||||
int testIterations = 10;
|
||||
|
||||
double psnr[nBitrates];
|
||||
double ssim[nBitrates];
|
||||
double fps[nBitrates];
|
||||
double totalEncodeTime[nBitrates];
|
||||
double totalDecodeTime[nBitrates];
|
||||
|
||||
_results.open(_resultsFileName.c_str(), std::fstream::out);
|
||||
_results << GetMagicStr() << std::endl;
|
||||
_results << _codecName << std::endl;
|
||||
|
||||
for (it = sources.begin() ; it < sources.end(); it++)
|
||||
{
|
||||
for (int i = 0; i < sizeof(size)/sizeof(*size); i++)
|
||||
{
|
||||
for (int j = 0; j < nFrameRates; j++)
|
||||
{
|
||||
std::stringstream ss;
|
||||
std::string strFrameRate;
|
||||
std::string outFileName;
|
||||
ss << frameRate[j];
|
||||
ss >> strFrameRate;
|
||||
outFileName = (*it)->GetFilePath() + "/" + (*it)->GetName() + "_" +
|
||||
VideoSource::GetSizeString(size[i]) + "_" + strFrameRate + ".yuv";
|
||||
|
||||
_target = new const VideoSource(outFileName, size[i], frameRate[j]);
|
||||
(*it)->Convert(*_target);
|
||||
if (VideoSource::FileExists(outFileName.c_str()))
|
||||
{
|
||||
_inname = outFileName;
|
||||
}
|
||||
else
|
||||
{
|
||||
_inname = (*it)->GetFileName();
|
||||
}
|
||||
|
||||
std::cout << (*it)->GetName() << ", " << VideoSource::GetSizeString(size[i])
|
||||
<< ", " << frameRate[j] << " fps" << std::endl << "Bitrate [kbps]:";
|
||||
_results << (*it)->GetName() << "," << VideoSource::GetSizeString(size[i])
|
||||
<< "," << frameRate[j] << " fps" << std::endl << "Bitrate [kbps]";
|
||||
|
||||
if (speedTestMask[j])
|
||||
{
|
||||
testIterations = kSpeedTestIterations;
|
||||
}
|
||||
else
|
||||
{
|
||||
testIterations = 1;
|
||||
}
|
||||
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
_bitRate = (bitRate[k]);
|
||||
double avgFps = 0.0;
|
||||
totalEncodeTime[k] = 0;
|
||||
totalDecodeTime[k] = 0;
|
||||
|
||||
for (int l = 0; l < testIterations; l++)
|
||||
{
|
||||
PerformNormalTest();
|
||||
_appendNext = false;
|
||||
|
||||
avgFps += _framecnt / (_totalEncodeTime + _totalDecodeTime);
|
||||
totalEncodeTime[k] += _totalEncodeTime;
|
||||
totalDecodeTime[k] += _totalDecodeTime;
|
||||
|
||||
}
|
||||
avgFps /= testIterations;
|
||||
totalEncodeTime[k] /= testIterations;
|
||||
totalDecodeTime[k] /= testIterations;
|
||||
|
||||
double actualBitRate = ActualBitRate(_framecnt) / 1000.0;
|
||||
std::cout << " " << actualBitRate;
|
||||
_results << "," << actualBitRate;
|
||||
PSNRfromFiles(_inname.c_str(), _outname.c_str(), _inst.width,
|
||||
_inst.height, &psnr[k]);
|
||||
if (SSIM_CALC)
|
||||
{
|
||||
SSIMfromFiles(_inname.c_str(), _outname.c_str(), _inst.width,
|
||||
_inst.height, &ssim[k]);
|
||||
|
||||
}
|
||||
fps[k] = avgFps;
|
||||
}
|
||||
std::cout << std::endl << "Y-PSNR [dB]:";
|
||||
_results << std::endl << "Y-PSNR [dB]";
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
std::cout << " " << psnr[k];
|
||||
_results << "," << psnr[k];
|
||||
|
||||
}
|
||||
if (SSIM_CALC)
|
||||
{
|
||||
std::cout << std::endl << "SSIM: ";
|
||||
_results << std::endl << "SSIM ";
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
std::cout << " " << ssim[k];
|
||||
_results << "," << ssim[k];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::cout << std::endl << "Encode Time[ms]:";
|
||||
_results << std::endl << "Encode Time[ms]";
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
std::cout << " " << totalEncodeTime[k];
|
||||
_results << "," << totalEncodeTime[k];
|
||||
|
||||
}
|
||||
|
||||
std::cout << std::endl << "Decode Time[ms]:";
|
||||
_results << std::endl << "Decode Time[ms]";
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
std::cout << " " << totalDecodeTime[k];
|
||||
_results << "," << totalDecodeTime[k];
|
||||
|
||||
}
|
||||
|
||||
if (speedTestMask[j])
|
||||
{
|
||||
std::cout << std::endl << "Speed [fps]:";
|
||||
_results << std::endl << "Speed [fps]";
|
||||
for (int k = 0; k < nBitrates; k++)
|
||||
{
|
||||
std::cout << " " << static_cast<int>(fps[k] + 0.5);
|
||||
_results << "," << static_cast<int>(fps[k] + 0.5);
|
||||
}
|
||||
}
|
||||
std::cout << std::endl << std::endl;
|
||||
_results << std::endl << std::endl;
|
||||
|
||||
delete _target;
|
||||
}
|
||||
}
|
||||
delete *it;
|
||||
}
|
||||
_results.close();
|
||||
}
|
||||
|
||||
void
|
||||
Benchmark::PerformNormalTest()
|
||||
{
|
||||
_encoder = GetNewEncoder();
|
||||
_decoder = GetNewDecoder();
|
||||
CodecSettings(_target->GetWidth(), _target->GetHeight(), _target->GetFrameRate(), _bitRate);
|
||||
Setup();
|
||||
EventWrapper* waitEvent = EventWrapper::Create();
|
||||
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_decodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_encoder->InitEncode(&_inst, 4, 1440);
|
||||
CodecSpecific_InitBitrate();
|
||||
_decoder->InitDecode(&_inst,1);
|
||||
|
||||
FrameQueue frameQueue;
|
||||
VideoEncodeCompleteCallback encCallback(_encodedFile, &frameQueue, *this);
|
||||
VideoDecodeCompleteCallback decCallback(_decodedFile, *this);
|
||||
_encoder->RegisterEncodeCompleteCallback(&encCallback);
|
||||
_decoder->RegisterDecodeCompleteCallback(&decCallback);
|
||||
|
||||
SetCodecSpecificParameters();
|
||||
|
||||
_totalEncodeTime = _totalDecodeTime = 0;
|
||||
_totalEncodePipeTime = _totalDecodePipeTime = 0;
|
||||
bool complete = false;
|
||||
_framecnt = 0;
|
||||
_encFrameCnt = 0;
|
||||
_sumEncBytes = 0;
|
||||
_lengthEncFrame = 0;
|
||||
while (!complete)
|
||||
{
|
||||
complete = Encode();
|
||||
if (!frameQueue.Empty() || complete)
|
||||
{
|
||||
while (!frameQueue.Empty())
|
||||
{
|
||||
_frameToDecode = static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
|
||||
DoPacketLoss();
|
||||
int ret = Decode();
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
if (ret < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
else if (ret == 0)
|
||||
{
|
||||
_framecnt++;
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "\n\nPositive return value from decode!\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
waitEvent->Wait(5);
|
||||
}
|
||||
|
||||
_inputVideoBuffer.Free();
|
||||
//_encodedVideoBuffer.Reset(); ?
|
||||
_encodedVideoBuffer.Free();
|
||||
_decodedVideoBuffer.Free();
|
||||
|
||||
_encoder->Release();
|
||||
_decoder->Release();
|
||||
delete waitEvent;
|
||||
delete _encoder;
|
||||
delete _decoder;
|
||||
Teardown();
|
||||
}
|
||||
|
||||
void
|
||||
Benchmark::CodecSpecific_InitBitrate()
|
||||
{
|
||||
if (_bitRate == 0)
|
||||
{
|
||||
_encoder->SetRates(600, _inst.maxFramerate);
|
||||
}
|
||||
else
|
||||
{
|
||||
_encoder->SetRates(_bitRate, _inst.maxFramerate);
|
||||
}
|
||||
}
|
||||
|
||||
40
modules/video_coding/codecs/test_framework/benchmark.h
Normal file
40
modules/video_coding/codecs/test_framework/benchmark.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_
|
||||
|
||||
#include "normal_async_test.h"
|
||||
|
||||
class VideoSource;
|
||||
|
||||
class Benchmark : public NormalAsyncTest
|
||||
{
|
||||
public:
|
||||
Benchmark();
|
||||
virtual void Perform();
|
||||
|
||||
protected:
|
||||
Benchmark(std::string name, std::string description);
|
||||
Benchmark(std::string name, std::string description, std::string resultsFileName, std::string codecName);
|
||||
virtual webrtc::VideoEncoder* GetNewEncoder() = 0;
|
||||
virtual webrtc::VideoDecoder* GetNewDecoder() = 0;
|
||||
virtual void PerformNormalTest();
|
||||
virtual void CodecSpecific_InitBitrate();
|
||||
static const char* GetMagicStr() { return "#!benchmark1.0"; }
|
||||
|
||||
const VideoSource* _target;
|
||||
std::string _resultsFileName;
|
||||
std::ofstream _results;
|
||||
std::string _codecName;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_
|
||||
|
||||
500
modules/video_coding/codecs/test_framework/exportfig.m
Normal file
500
modules/video_coding/codecs/test_framework/exportfig.m
Normal file
@ -0,0 +1,500 @@
|
||||
function exportfig(varargin)
|
||||
%EXPORTFIG Export a figure to Encapsulated Postscript.
|
||||
% EXPORTFIG(H, FILENAME) writes the figure H to FILENAME. H is
|
||||
% a figure handle and FILENAME is a string that specifies the
|
||||
% name of the output file.
|
||||
%
|
||||
% EXPORTFIG(...,PARAM1,VAL1,PARAM2,VAL2,...) specifies
|
||||
% parameters that control various characteristics of the output
|
||||
% file.
|
||||
%
|
||||
% Format Paramter:
|
||||
% 'Format' one of the strings 'eps','eps2','jpeg','png','preview'
|
||||
% specifies the output format. Defaults to 'eps'.
|
||||
% The output format 'preview' does not generate an output
|
||||
% file but instead creates a new figure window with a
|
||||
% preview of the exported figure. In this case the
|
||||
% FILENAME parameter is ignored.
|
||||
%
|
||||
% 'Preview' one of the strings 'none', 'tiff'
|
||||
% specifies a preview for EPS files. Defaults to 'none'.
|
||||
%
|
||||
% Size Parameters:
|
||||
% 'Width' a positive scalar
|
||||
% specifies the width in the figure's PaperUnits
|
||||
% 'Height' a positive scalar
|
||||
% specifies the height in the figure's PaperUnits
|
||||
%
|
||||
% Specifying only one dimension sets the other dimension
|
||||
% so that the exported aspect ratio is the same as the
|
||||
% figure's current aspect ratio.
|
||||
% If neither dimension is specified the size defaults to
|
||||
% the width and height from the figure's PaperPosition.
|
||||
%
|
||||
% Rendering Parameters:
|
||||
% 'Color' one of the strings 'bw', 'gray', 'cmyk'
|
||||
% 'bw' specifies that lines and text are exported in
|
||||
% black and all other objects in grayscale
|
||||
% 'gray' specifies that all objects are exported in grayscale
|
||||
% 'cmyk' specifies that all objects are exported in color
|
||||
% using the CMYK color space
|
||||
% 'Renderer' one of the strings 'painters', 'zbuffer', 'opengl'
|
||||
% specifies the renderer to use
|
||||
% 'Resolution' a positive scalar
|
||||
% specifies the resolution in dots-per-inch.
|
||||
%
|
||||
% The default color setting is 'bw'.
|
||||
%
|
||||
% Font Parameters:
|
||||
% 'FontMode' one of the strings 'scaled', 'fixed'
|
||||
% 'FontSize' a positive scalar
|
||||
% in 'scaled' mode multiplies with the font size of each
|
||||
% text object to obtain the exported font size
|
||||
% in 'fixed' mode specifies the font size of all text
|
||||
% objects in points
|
||||
% 'FontEncoding' one of the strings 'latin1', 'adobe'
|
||||
% specifies the character encoding of the font
|
||||
%
|
||||
% If FontMode is 'scaled' but FontSize is not specified then a
|
||||
% scaling factor is computed from the ratio of the size of the
|
||||
% exported figure to the size of the actual figure. The minimum
|
||||
% font size allowed after scaling is 5 points.
|
||||
% If FontMode is 'fixed' but FontSize is not specified then the
|
||||
% exported font sizes of all text objects is 7 points.
|
||||
%
|
||||
% The default 'FontMode' setting is 'scaled'.
|
||||
%
|
||||
% Line Width Parameters:
|
||||
% 'LineMode' one of the strings 'scaled', 'fixed'
|
||||
% 'LineWidth' a positive scalar
|
||||
% the semantics of LineMode and LineWidth are exactly the
|
||||
% same as FontMode and FontSize, except that they apply
|
||||
% to line widths instead of font sizes. The minumum line
|
||||
% width allowed after scaling is 0.5 points.
|
||||
% If LineMode is 'fixed' but LineWidth is not specified
|
||||
% then the exported line width of all line objects is 1
|
||||
% point.
|
||||
%
|
||||
% Examples:
|
||||
% exportfig(gcf,'fig1.eps','height',3);
|
||||
% Exports the current figure to the file named 'fig1.eps' with
|
||||
% a height of 3 inches (assuming the figure's PaperUnits is
|
||||
% inches) and an aspect ratio the same as the figure's aspect
|
||||
% ratio on screen.
|
||||
%
|
||||
% exportfig(gcf, 'fig2.eps', 'FontMode', 'fixed',...
|
||||
% 'FontSize', 10, 'color', 'cmyk' );
|
||||
% Exports the current figure to 'fig2.eps' in color with all
|
||||
% text in 10 point fonts. The size of the exported figure is
|
||||
% the figure's PaperPostion width and height.
|
||||
|
||||
|
||||
if (nargin < 2)
|
||||
error('Too few input arguments');
|
||||
end
|
||||
|
||||
% exportfig(H, filename, ...)
|
||||
H = varargin{1};
|
||||
if ~ishandle(H) | ~strcmp(get(H,'type'), 'figure')
|
||||
error('First argument must be a handle to a figure.');
|
||||
end
|
||||
filename = varargin{2};
|
||||
if ~ischar(filename)
|
||||
error('Second argument must be a string.');
|
||||
end
|
||||
paramPairs = varargin(3:end);
|
||||
|
||||
% Do some validity checking on param-value pairs
|
||||
if (rem(length(paramPairs),2) ~= 0)
|
||||
error(['Invalid input syntax. Optional parameters and values' ...
|
||||
' must be in pairs.']);
|
||||
end
|
||||
|
||||
format = 'eps';
|
||||
preview = 'none';
|
||||
width = -1;
|
||||
height = -1;
|
||||
color = 'bw';
|
||||
fontsize = -1;
|
||||
fontmode='scaled';
|
||||
linewidth = -1;
|
||||
linemode=[];
|
||||
fontencoding = 'latin1';
|
||||
renderer = [];
|
||||
resolution = [];
|
||||
|
||||
% Process param-value pairs
|
||||
args = {};
|
||||
for k = 1:2:length(paramPairs)
|
||||
param = lower(paramPairs{k});
|
||||
if (~ischar(param))
|
||||
error('Optional parameter names must be strings');
|
||||
end
|
||||
value = paramPairs{k+1};
|
||||
|
||||
switch (param)
|
||||
case 'format'
|
||||
format = value;
|
||||
if (~strcmp(format,{'eps','eps2','jpeg','png','preview'}))
|
||||
error(['Format must be ''eps'', ''eps2'', ''jpeg'', ''png'' or' ...
|
||||
' ''preview''.']);
|
||||
end
|
||||
case 'preview'
|
||||
preview = value;
|
||||
if (~strcmp(preview,{'none','tiff'}))
|
||||
error('Preview must be ''none'' or ''tiff''.');
|
||||
end
|
||||
case 'width'
|
||||
width = LocalToNum(value);
|
||||
if(~LocalIsPositiveScalar(width))
|
||||
error('Width must be a numeric scalar > 0');
|
||||
end
|
||||
case 'height'
|
||||
height = LocalToNum(value);
|
||||
if(~LocalIsPositiveScalar(height))
|
||||
error('Height must be a numeric scalar > 0');
|
||||
end
|
||||
case 'color'
|
||||
color = lower(value);
|
||||
if (~strcmp(color,{'bw','gray','cmyk'}))
|
||||
error('Color must be ''bw'', ''gray'' or ''cmyk''.');
|
||||
end
|
||||
case 'fontmode'
|
||||
fontmode = lower(value);
|
||||
if (~strcmp(fontmode,{'scaled','fixed'}))
|
||||
error('FontMode must be ''scaled'' or ''fixed''.');
|
||||
end
|
||||
case 'fontsize'
|
||||
fontsize = LocalToNum(value);
|
||||
if(~LocalIsPositiveScalar(fontsize))
|
||||
error('FontSize must be a numeric scalar > 0');
|
||||
end
|
||||
case 'fontencoding'
|
||||
fontencoding = lower(value);
|
||||
if (~strcmp(fontencoding,{'latin1','adobe'}))
|
||||
error('FontEncoding must be ''latin1'' or ''adobe''.');
|
||||
end
|
||||
case 'linemode'
|
||||
linemode = lower(value);
|
||||
if (~strcmp(linemode,{'scaled','fixed'}))
|
||||
error('LineMode must be ''scaled'' or ''fixed''.');
|
||||
end
|
||||
case 'linewidth'
|
||||
linewidth = LocalToNum(value);
|
||||
if(~LocalIsPositiveScalar(linewidth))
|
||||
error('LineWidth must be a numeric scalar > 0');
|
||||
end
|
||||
case 'renderer'
|
||||
renderer = lower(value);
|
||||
if (~strcmp(renderer,{'painters','zbuffer','opengl'}))
|
||||
error('Renderer must be ''painters'', ''zbuffer'' or ''opengl''.');
|
||||
end
|
||||
case 'resolution'
|
||||
resolution = LocalToNum(value);
|
||||
if ~(isnumeric(value) & (prod(size(value)) == 1) & (value >= 0));
|
||||
error('Resolution must be a numeric scalar >= 0');
|
||||
end
|
||||
otherwise
|
||||
error(['Unrecognized option ' param '.']);
|
||||
end
|
||||
end
|
||||
|
||||
allLines = findall(H, 'type', 'line');
|
||||
allText = findall(H, 'type', 'text');
|
||||
allAxes = findall(H, 'type', 'axes');
|
||||
allImages = findall(H, 'type', 'image');
|
||||
allLights = findall(H, 'type', 'light');
|
||||
allPatch = findall(H, 'type', 'patch');
|
||||
allSurf = findall(H, 'type', 'surface');
|
||||
allRect = findall(H, 'type', 'rectangle');
|
||||
allFont = [allText; allAxes];
|
||||
allColor = [allLines; allText; allAxes; allLights];
|
||||
allMarker = [allLines; allPatch; allSurf];
|
||||
allEdge = [allPatch; allSurf];
|
||||
allCData = [allImages; allPatch; allSurf];
|
||||
|
||||
old.objs = {};
|
||||
old.prop = {};
|
||||
old.values = {};
|
||||
|
||||
% Process format and preview parameter
|
||||
showPreview = strcmp(format,'preview');
|
||||
if showPreview
|
||||
format = 'png';
|
||||
filename = [tempName '.png'];
|
||||
end
|
||||
if strncmp(format,'eps',3) & ~strcmp(preview,'none')
|
||||
args = {args{:}, ['-' preview]};
|
||||
end
|
||||
|
||||
hadError = 0;
|
||||
try
|
||||
% Process size parameters
|
||||
paperPos = get(H, 'PaperPosition');
|
||||
old = LocalPushOldData(old, H, 'PaperPosition', paperPos);
|
||||
figureUnits = get(H, 'Units');
|
||||
set(H, 'Units', get(H,'PaperUnits'));
|
||||
figurePos = get(H, 'Position');
|
||||
aspectRatio = figurePos(3)/figurePos(4);
|
||||
set(H, 'Units', figureUnits);
|
||||
if (width == -1) & (height == -1)
|
||||
width = paperPos(3);
|
||||
height = paperPos(4);
|
||||
elseif (width == -1)
|
||||
width = height * aspectRatio;
|
||||
elseif (height == -1)
|
||||
height = width / aspectRatio;
|
||||
end
|
||||
set(H, 'PaperPosition', [0 0 width height]);
|
||||
paperPosMode = get(H, 'PaperPositionMode');
|
||||
old = LocalPushOldData(old, H, 'PaperPositionMode', paperPosMode);
|
||||
set(H, 'PaperPositionMode', 'manual');
|
||||
|
||||
% Process rendering parameters
|
||||
switch (color)
|
||||
case {'bw', 'gray'}
|
||||
if ~strcmp(color,'bw') & strncmp(format,'eps',3)
|
||||
format = [format 'c'];
|
||||
end
|
||||
args = {args{:}, ['-d' format]};
|
||||
|
||||
%compute and set gray colormap
|
||||
oldcmap = get(H,'Colormap');
|
||||
newgrays = 0.30*oldcmap(:,1) + 0.59*oldcmap(:,2) + 0.11*oldcmap(:,3);
|
||||
newcmap = [newgrays newgrays newgrays];
|
||||
old = LocalPushOldData(old, H, 'Colormap', oldcmap);
|
||||
set(H, 'Colormap', newcmap);
|
||||
|
||||
%compute and set ColorSpec and CData properties
|
||||
old = LocalUpdateColors(allColor, 'color', old);
|
||||
old = LocalUpdateColors(allAxes, 'xcolor', old);
|
||||
old = LocalUpdateColors(allAxes, 'ycolor', old);
|
||||
old = LocalUpdateColors(allAxes, 'zcolor', old);
|
||||
old = LocalUpdateColors(allMarker, 'MarkerEdgeColor', old);
|
||||
old = LocalUpdateColors(allMarker, 'MarkerFaceColor', old);
|
||||
old = LocalUpdateColors(allEdge, 'EdgeColor', old);
|
||||
old = LocalUpdateColors(allEdge, 'FaceColor', old);
|
||||
old = LocalUpdateColors(allCData, 'CData', old);
|
||||
|
||||
case 'cmyk'
|
||||
if strncmp(format,'eps',3)
|
||||
format = [format 'c'];
|
||||
args = {args{:}, ['-d' format], '-cmyk'};
|
||||
else
|
||||
args = {args{:}, ['-d' format]};
|
||||
end
|
||||
otherwise
|
||||
error('Invalid Color parameter');
|
||||
end
|
||||
if (~isempty(renderer))
|
||||
args = {args{:}, ['-' renderer]};
|
||||
end
|
||||
if (~isempty(resolution)) | ~strncmp(format,'eps',3)
|
||||
if isempty(resolution)
|
||||
resolution = 0;
|
||||
end
|
||||
args = {args{:}, ['-r' int2str(resolution)]};
|
||||
end
|
||||
|
||||
% Process font parameters
|
||||
if (~isempty(fontmode))
|
||||
oldfonts = LocalGetAsCell(allFont,'FontSize');
|
||||
switch (fontmode)
|
||||
case 'fixed'
|
||||
oldfontunits = LocalGetAsCell(allFont,'FontUnits');
|
||||
old = LocalPushOldData(old, allFont, {'FontUnits'}, oldfontunits);
|
||||
set(allFont,'FontUnits','points');
|
||||
if (fontsize == -1)
|
||||
set(allFont,'FontSize',7);
|
||||
else
|
||||
set(allFont,'FontSize',fontsize);
|
||||
end
|
||||
case 'scaled'
|
||||
if (fontsize == -1)
|
||||
wscale = width/figurePos(3);
|
||||
hscale = height/figurePos(4);
|
||||
scale = min(wscale, hscale);
|
||||
else
|
||||
scale = fontsize;
|
||||
end
|
||||
newfonts = LocalScale(oldfonts,scale,5);
|
||||
set(allFont,{'FontSize'},newfonts);
|
||||
otherwise
|
||||
error('Invalid FontMode parameter');
|
||||
end
|
||||
% make sure we push the size after the units
|
||||
old = LocalPushOldData(old, allFont, {'FontSize'}, oldfonts);
|
||||
end
|
||||
if strcmp(fontencoding,'adobe') & strncmp(format,'eps',3)
|
||||
args = {args{:}, '-adobecset'};
|
||||
end
|
||||
|
||||
% Process linewidth parameters
|
||||
if (~isempty(linemode))
|
||||
oldlines = LocalGetAsCell(allMarker,'LineWidth');
|
||||
old = LocalPushOldData(old, allMarker, {'LineWidth'}, oldlines);
|
||||
switch (linemode)
|
||||
case 'fixed'
|
||||
if (linewidth == -1)
|
||||
set(allMarker,'LineWidth',1);
|
||||
else
|
||||
set(allMarker,'LineWidth',linewidth);
|
||||
end
|
||||
case 'scaled'
|
||||
if (linewidth == -1)
|
||||
wscale = width/figurePos(3);
|
||||
hscale = height/figurePos(4);
|
||||
scale = min(wscale, hscale);
|
||||
else
|
||||
scale = linewidth;
|
||||
end
|
||||
newlines = LocalScale(oldlines, scale, 0.5);
|
||||
set(allMarker,{'LineWidth'},newlines);
|
||||
otherwise
|
||||
error('Invalid LineMode parameter');
|
||||
end
|
||||
end
|
||||
|
||||
% Export
|
||||
print(H, filename, args{:});
|
||||
|
||||
catch
|
||||
hadError = 1;
|
||||
end
|
||||
|
||||
% Restore figure settings
|
||||
for n=1:length(old.objs)
|
||||
set(old.objs{n}, old.prop{n}, old.values{n});
|
||||
end
|
||||
|
||||
if hadError
|
||||
error(deblank(lasterr));
|
||||
end
|
||||
|
||||
% Show preview if requested
|
||||
if showPreview
|
||||
X = imread(filename,'png');
|
||||
delete(filename);
|
||||
f = figure( 'Name', 'Preview', ...
|
||||
'Menubar', 'none', ...
|
||||
'NumberTitle', 'off', ...
|
||||
'Visible', 'off');
|
||||
image(X);
|
||||
axis image;
|
||||
ax = findobj(f, 'type', 'axes');
|
||||
set(ax, 'Units', get(H,'PaperUnits'), ...
|
||||
'Position', [0 0 width height], ...
|
||||
'Visible', 'off');
|
||||
set(ax, 'Units', 'pixels');
|
||||
axesPos = get(ax,'Position');
|
||||
figPos = get(f,'Position');
|
||||
rootSize = get(0,'ScreenSize');
|
||||
figPos(3:4) = axesPos(3:4);
|
||||
if figPos(1) + figPos(3) > rootSize(3)
|
||||
figPos(1) = rootSize(3) - figPos(3) - 50;
|
||||
end
|
||||
if figPos(2) + figPos(4) > rootSize(4)
|
||||
figPos(2) = rootSize(4) - figPos(4) - 50;
|
||||
end
|
||||
set(f, 'Position',figPos, ...
|
||||
'Visible', 'on');
|
||||
end
|
||||
|
||||
%
|
||||
% Local Functions
|
||||
%
|
||||
|
||||
function outData = LocalPushOldData(inData, objs, prop, values)
|
||||
outData.objs = {inData.objs{:}, objs};
|
||||
outData.prop = {inData.prop{:}, prop};
|
||||
outData.values = {inData.values{:}, values};
|
||||
|
||||
function cellArray = LocalGetAsCell(fig,prop);
|
||||
cellArray = get(fig,prop);
|
||||
if (~isempty(cellArray)) & (~iscell(cellArray))
|
||||
cellArray = {cellArray};
|
||||
end
|
||||
|
||||
function newArray = LocalScale(inArray, scale, minValue)
|
||||
n = length(inArray);
|
||||
newArray = cell(n,1);
|
||||
for k=1:n
|
||||
newArray{k} = max(minValue,scale*inArray{k}(1));
|
||||
end
|
||||
|
||||
function newArray = LocalMapToGray(inArray);
|
||||
n = length(inArray);
|
||||
newArray = cell(n,1);
|
||||
for k=1:n
|
||||
color = inArray{k};
|
||||
if (~isempty(color))
|
||||
if ischar(color)
|
||||
switch color(1)
|
||||
case 'y'
|
||||
color = [1 1 0];
|
||||
case 'm'
|
||||
color = [1 0 1];
|
||||
case 'c'
|
||||
color = [0 1 1];
|
||||
case 'r'
|
||||
color = [1 0 0];
|
||||
case 'g'
|
||||
color = [0 1 0];
|
||||
case 'b'
|
||||
color = [0 0 1];
|
||||
case 'w'
|
||||
color = [1 1 1];
|
||||
case 'k'
|
||||
color = [0 0 0];
|
||||
otherwise
|
||||
newArray{k} = color;
|
||||
end
|
||||
end
|
||||
if ~ischar(color)
|
||||
color = 0.30*color(1) + 0.59*color(2) + 0.11*color(3);
|
||||
end
|
||||
end
|
||||
if isempty(color) | ischar(color)
|
||||
newArray{k} = color;
|
||||
else
|
||||
newArray{k} = [color color color];
|
||||
end
|
||||
end
|
||||
|
||||
function newArray = LocalMapCData(inArray);
|
||||
n = length(inArray);
|
||||
newArray = cell(n,1);
|
||||
for k=1:n
|
||||
color = inArray{k};
|
||||
if (ndims(color) == 3) & isa(color,'double')
|
||||
gray = 0.30*color(:,:,1) + 0.59*color(:,:,2) + 0.11*color(:,:,3);
|
||||
color(:,:,1) = gray;
|
||||
color(:,:,2) = gray;
|
||||
color(:,:,3) = gray;
|
||||
end
|
||||
newArray{k} = color;
|
||||
end
|
||||
|
||||
function outData = LocalUpdateColors(inArray, prop, inData)
|
||||
value = LocalGetAsCell(inArray,prop);
|
||||
outData.objs = {inData.objs{:}, inArray};
|
||||
outData.prop = {inData.prop{:}, {prop}};
|
||||
outData.values = {inData.values{:}, value};
|
||||
if (~isempty(value))
|
||||
if strcmp(prop,'CData')
|
||||
value = LocalMapCData(value);
|
||||
else
|
||||
value = LocalMapToGray(value);
|
||||
end
|
||||
set(inArray,{prop},value);
|
||||
end
|
||||
|
||||
function bool = LocalIsPositiveScalar(value)
|
||||
bool = isnumeric(value) & ...
|
||||
prod(size(value)) == 1 & ...
|
||||
value > 0;
|
||||
|
||||
function value = LocalToNum(value)
|
||||
if ischar(value)
|
||||
value = str2num(value);
|
||||
end
|
||||
563
modules/video_coding/codecs/test_framework/normal_async_test.cc
Normal file
563
modules/video_coding/codecs/test_framework/normal_async_test.cc
Normal file
@ -0,0 +1,563 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "normal_async_test.h"
|
||||
#include "typedefs.h"
|
||||
#include <sstream>
|
||||
#include <assert.h>
|
||||
#include <queue>
|
||||
#include <string.h>
|
||||
#include "tick_util.h"
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
NormalAsyncTest::NormalAsyncTest()
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(1),
|
||||
_appendNext(false),
|
||||
_decFrameCnt(0),
|
||||
_encFrameCnt(0),
|
||||
_missingFrames(false),
|
||||
_decodeCompleteTime(0),
|
||||
_encodeCompleteTime(0),
|
||||
_rttFrames(0),
|
||||
_hasReceivedSLI(false),
|
||||
_hasReceivedPLI(false),
|
||||
_waitForKey(false),
|
||||
NormalTest("Async Normal Test 1", "A test of normal execution of the codec",
|
||||
_testNo)
|
||||
{
|
||||
}
|
||||
|
||||
NormalAsyncTest::NormalAsyncTest(WebRtc_UWord32 bitRate)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(1),
|
||||
_appendNext(false),
|
||||
_decFrameCnt(0),
|
||||
_encFrameCnt(0),
|
||||
_missingFrames(false),
|
||||
_decodeCompleteTime(0),
|
||||
_encodeCompleteTime(0),
|
||||
_rttFrames(0),
|
||||
_hasReceivedSLI(false),
|
||||
_hasReceivedPLI(false),
|
||||
_waitForKey(false),
|
||||
NormalTest("Async Normal Test 1", "A test of normal execution of the codec",
|
||||
bitRate, _testNo)
|
||||
{
|
||||
}
|
||||
|
||||
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
|
||||
unsigned int testNo)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(testNo),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
_decFrameCnt(0),
|
||||
_encFrameCnt(0),
|
||||
_missingFrames(false),
|
||||
_decodeCompleteTime(0),
|
||||
_encodeCompleteTime(0),
|
||||
_rttFrames(0),
|
||||
_hasReceivedSLI(false),
|
||||
_hasReceivedPLI(false),
|
||||
_waitForKey(false),
|
||||
NormalTest(name, description, _testNo)
|
||||
{
|
||||
}
|
||||
|
||||
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
|
||||
WebRtc_UWord32 bitRate, unsigned int testNo)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(testNo),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
_decFrameCnt(0),
|
||||
_encFrameCnt(0),
|
||||
_missingFrames(false),
|
||||
_decodeCompleteTime(0),
|
||||
_encodeCompleteTime(0),
|
||||
_rttFrames(0),
|
||||
_hasReceivedSLI(false),
|
||||
_hasReceivedPLI(false),
|
||||
_waitForKey(false),
|
||||
NormalTest(name, description, bitRate, _testNo)
|
||||
{
|
||||
}
|
||||
|
||||
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
|
||||
WebRtc_UWord32 bitRate, unsigned int testNo,
|
||||
unsigned int rttFrames)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(testNo),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
_decFrameCnt(0),
|
||||
_encFrameCnt(0),
|
||||
_missingFrames(false),
|
||||
_decodeCompleteTime(0),
|
||||
_encodeCompleteTime(0),
|
||||
_rttFrames(rttFrames),
|
||||
_hasReceivedSLI(false),
|
||||
_hasReceivedPLI(false),
|
||||
_waitForKey(false),
|
||||
NormalTest(name, description, bitRate, _testNo)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
NormalAsyncTest::Setup()
|
||||
{
|
||||
Test::Setup();
|
||||
std::stringstream ss;
|
||||
std::string strTestNo;
|
||||
ss << _testNo;
|
||||
ss >> strTestNo;
|
||||
|
||||
// Check if settings exist. Otherwise use defaults.
|
||||
if (_outname == "")
|
||||
{
|
||||
_outname = "../../out_normaltest" + strTestNo + ".yuv";
|
||||
}
|
||||
|
||||
if (_encodedName == "")
|
||||
{
|
||||
_encodedName = "../../encoded_normaltest" + strTestNo + ".yuv";
|
||||
}
|
||||
|
||||
if ((_sourceFile = fopen(_inname.c_str(), "rb")) == NULL)
|
||||
{
|
||||
printf("Cannot read file %s.\n", _inname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((_encodedFile = fopen(_encodedName.c_str(), "wb")) == NULL)
|
||||
{
|
||||
printf("Cannot write encoded file.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char mode[3] = "wb";
|
||||
if (_appendNext)
|
||||
{
|
||||
strncpy(mode, "ab", 3);
|
||||
}
|
||||
|
||||
if ((_decodedFile = fopen(_outname.c_str(), mode)) == NULL)
|
||||
{
|
||||
printf("Cannot write file %s.\n", _outname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
_appendNext = true;
|
||||
}
|
||||
|
||||
void
|
||||
NormalAsyncTest::Teardown()
|
||||
{
|
||||
Test::Teardown();
|
||||
fclose(_sourceFile);
|
||||
fclose(_decodedFile);
|
||||
}
|
||||
|
||||
FrameQueueTuple::~FrameQueueTuple()
|
||||
{
|
||||
if (_codecSpecificInfo != NULL)
|
||||
{
|
||||
// TODO(holmer): implement virtual function for deleting this and
|
||||
// remove warnings
|
||||
delete _codecSpecificInfo;
|
||||
}
|
||||
if (_frame != NULL)
|
||||
{
|
||||
delete _frame;
|
||||
}
|
||||
}
|
||||
|
||||
void FrameQueue::PushFrame(TestVideoEncodedBuffer *frame,
|
||||
void* codecSpecificInfo)
|
||||
{
|
||||
WriteLockScoped cs(_queueRWLock);
|
||||
_frameBufferQueue.push(new FrameQueueTuple(frame, codecSpecificInfo));
|
||||
}
|
||||
|
||||
FrameQueueTuple* FrameQueue::PopFrame()
|
||||
{
|
||||
WriteLockScoped cs(_queueRWLock);
|
||||
if (_frameBufferQueue.empty())
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
FrameQueueTuple* tuple = _frameBufferQueue.front();
|
||||
_frameBufferQueue.pop();
|
||||
return tuple;
|
||||
}
|
||||
|
||||
bool FrameQueue::Empty()
|
||||
{
|
||||
ReadLockScoped cs(_queueRWLock);
|
||||
return _frameBufferQueue.empty();
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VideoEncodeCompleteCallback::EncodedBytes()
|
||||
{
|
||||
return _encodedBytes;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VideoEncodeCompleteCallback::Encoded(EncodedImage& encodedImage,
|
||||
const void* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader*
|
||||
fragmentation)
|
||||
{
|
||||
_test.Encoded(encodedImage);
|
||||
TestVideoEncodedBuffer *newBuffer = new TestVideoEncodedBuffer();
|
||||
//newBuffer->VerifyAndAllocate(encodedImage._length);
|
||||
newBuffer->VerifyAndAllocate(encodedImage._size);
|
||||
_encodedBytes += encodedImage._length;
|
||||
// If _frameQueue would have been a fixed sized buffer we could have asked
|
||||
// it for an empty frame and then just do:
|
||||
// emptyFrame->SwapBuffers(encodedBuffer);
|
||||
// This is how it should be done in Video Engine to save in on memcpys
|
||||
void* codecSpecificInfoCopy =
|
||||
_test.CopyCodecSpecificInfo(codecSpecificInfo);
|
||||
_test.CopyEncodedImage(*newBuffer, encodedImage, codecSpecificInfoCopy);
|
||||
if (_encodedFile != NULL)
|
||||
{
|
||||
fwrite(newBuffer->GetBuffer(), 1, newBuffer->GetLength(), _encodedFile);
|
||||
}
|
||||
_frameQueue->PushFrame(newBuffer, codecSpecificInfoCopy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VideoDecodeCompleteCallback::DecodedBytes()
|
||||
{
|
||||
return _decodedBytes;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VideoDecodeCompleteCallback::Decoded(RawImage& image)
|
||||
{
|
||||
_test.Decoded(image);
|
||||
_decodedBytes += image._length;
|
||||
if (_decodedFile != NULL)
|
||||
{
|
||||
fwrite(image._buffer, 1, image._length, _decodedFile);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VideoDecodeCompleteCallback::ReceivedDecodedReferenceFrame(
|
||||
const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
return _test.ReceivedDecodedReferenceFrame(pictureId);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VideoDecodeCompleteCallback::ReceivedDecodedFrame(
|
||||
const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
return _test.ReceivedDecodedFrame(pictureId);
|
||||
}
|
||||
|
||||
void
|
||||
NormalAsyncTest::Encoded(const EncodedImage& encodedImage)
|
||||
{
|
||||
_encodeCompleteTime = tGetTime();
|
||||
_encFrameCnt++;
|
||||
_totalEncodePipeTime += _encodeCompleteTime -
|
||||
_encodeTimes[encodedImage._timeStamp];
|
||||
}
|
||||
|
||||
void
|
||||
NormalAsyncTest::Decoded(const RawImage& decodedImage)
|
||||
{
|
||||
_decodeCompleteTime = tGetTime();
|
||||
_decFrameCnt++;
|
||||
_totalDecodePipeTime += _decodeCompleteTime -
|
||||
_decodeTimes[decodedImage._timeStamp];
|
||||
_decodedWidth = decodedImage._width;
|
||||
_decodedHeight = decodedImage._height;
|
||||
}
|
||||
|
||||
void
|
||||
NormalAsyncTest::Perform()
|
||||
{
|
||||
_inname = "test/testFiles/foreman_cif.yuv";
|
||||
CodecSettings(352, 288, 30, _bitRate);
|
||||
Setup();
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_decodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
if(_encoder->InitEncode(&_inst, 1, 1440) < 0)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_decoder->InitDecode(&_inst, 1);
|
||||
FrameQueue frameQueue;
|
||||
VideoEncodeCompleteCallback encCallback(_encodedFile, &frameQueue, *this);
|
||||
VideoDecodeCompleteCallback decCallback(_decodedFile, *this);
|
||||
_encoder->RegisterEncodeCompleteCallback(&encCallback);
|
||||
_decoder->RegisterDecodeCompleteCallback(&decCallback);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_totalEncodeTime = _totalDecodeTime = 0;
|
||||
_totalEncodePipeTime = _totalDecodePipeTime = 0;
|
||||
bool complete = false;
|
||||
_framecnt = 0;
|
||||
_encFrameCnt = 0;
|
||||
_decFrameCnt = 0;
|
||||
_sumEncBytes = 0;
|
||||
_lengthEncFrame = 0;
|
||||
double starttime = tGetTime();
|
||||
while (!complete)
|
||||
{
|
||||
CodecSpecific_InitBitrate();
|
||||
complete = Encode();
|
||||
if (!frameQueue.Empty() || complete)
|
||||
{
|
||||
while (!frameQueue.Empty())
|
||||
{
|
||||
_frameToDecode =
|
||||
static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
|
||||
int lost = DoPacketLoss();
|
||||
if (lost == 2)
|
||||
{
|
||||
// Lost the whole frame, continue
|
||||
_missingFrames = true;
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
continue;
|
||||
}
|
||||
int ret = Decode(lost);
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
if (ret < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
else if (ret == 0)
|
||||
{
|
||||
_framecnt++;
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr,
|
||||
"\n\nPositive return value from decode!\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
double endtime = tGetTime();
|
||||
double totalExecutionTime = endtime - starttime;
|
||||
printf("Total execution time: %.1f s\n", totalExecutionTime);
|
||||
_sumEncBytes = encCallback.EncodedBytes();
|
||||
double actualBitRate = ActualBitRate(_encFrameCnt) / 1000.0;
|
||||
double avgEncTime = _totalEncodeTime / _encFrameCnt;
|
||||
double avgDecTime = _totalDecodeTime / _decFrameCnt;
|
||||
printf("Actual bitrate: %f kbps\n", actualBitRate);
|
||||
printf("Average encode time: %.1f ms\n", 1000 * avgEncTime);
|
||||
printf("Average decode time: %.1f ms\n", 1000 * avgDecTime);
|
||||
printf("Average encode pipeline time: %.1f ms\n",
|
||||
1000 * _totalEncodePipeTime / _encFrameCnt);
|
||||
printf("Average decode pipeline time: %.1f ms\n",
|
||||
1000 * _totalDecodePipeTime / _decFrameCnt);
|
||||
printf("Number of encoded frames: %u\n", _encFrameCnt);
|
||||
printf("Number of decoded frames: %u\n", _decFrameCnt);
|
||||
(*_log) << "Actual bitrate: " << actualBitRate << " kbps\tTarget: " <<
|
||||
_bitRate << " kbps" << std::endl;
|
||||
(*_log) << "Average encode time: " << avgEncTime << " s" << std::endl;
|
||||
(*_log) << "Average decode time: " << avgDecTime << " s" << std::endl;
|
||||
_encoder->Release();
|
||||
_decoder->Release();
|
||||
Teardown();
|
||||
}
|
||||
|
||||
bool
|
||||
NormalAsyncTest::Encode()
|
||||
{
|
||||
_lengthEncFrame = 0;
|
||||
fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile);
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _sourceBuffer);
|
||||
_inputVideoBuffer.SetTimeStamp((unsigned int)
|
||||
(_encFrameCnt * 9e4 / _inst.maxFramerate));
|
||||
_inputVideoBuffer.SetWidth(_inst.width);
|
||||
_inputVideoBuffer.SetHeight(_inst.height);
|
||||
RawImage rawImage;
|
||||
VideoBufferToRawImage(_inputVideoBuffer, rawImage);
|
||||
if (feof(_sourceFile) != 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
_encodeCompleteTime = 0;
|
||||
_encodeTimes[rawImage._timeStamp] = tGetTime();
|
||||
VideoFrameType frameType = kDeltaFrame;
|
||||
|
||||
// check SLI queue
|
||||
_hasReceivedSLI = false;
|
||||
while (!_signalSLI.empty() && _signalSLI.front().delay == 0)
|
||||
{
|
||||
// SLI message has arrived at sender side
|
||||
_hasReceivedSLI = true;
|
||||
_pictureIdSLI = _signalSLI.front().id;
|
||||
_signalSLI.pop_front();
|
||||
}
|
||||
// decrement SLI queue times
|
||||
for (std::list<fbSignal>::iterator it = _signalSLI.begin();
|
||||
it !=_signalSLI.end(); it++)
|
||||
{
|
||||
(*it).delay--;
|
||||
}
|
||||
|
||||
// check PLI queue
|
||||
_hasReceivedPLI = false;
|
||||
while (!_signalPLI.empty() && _signalPLI.front().delay == 0)
|
||||
{
|
||||
// PLI message has arrived at sender side
|
||||
_hasReceivedPLI = true;
|
||||
_signalPLI.pop_front();
|
||||
}
|
||||
// decrement PLI queue times
|
||||
for (std::list<fbSignal>::iterator it = _signalPLI.begin();
|
||||
it != _signalPLI.end(); it++)
|
||||
{
|
||||
(*it).delay--;
|
||||
}
|
||||
|
||||
if (_hasReceivedPLI)
|
||||
{
|
||||
// respond to PLI by encoding a key frame
|
||||
frameType = kKeyFrame;
|
||||
_hasReceivedPLI = false;
|
||||
_hasReceivedSLI = false; // don't trigger both at once
|
||||
}
|
||||
|
||||
void* codecSpecificInfo = CreateEncoderSpecificInfo();
|
||||
int ret = _encoder->Encode(rawImage, codecSpecificInfo, frameType);
|
||||
if (codecSpecificInfo != NULL)
|
||||
{
|
||||
// TODO(holmer): implement virtual function for deleting this and
|
||||
// remove warnings
|
||||
delete codecSpecificInfo;
|
||||
codecSpecificInfo = NULL;
|
||||
}
|
||||
if (_encodeCompleteTime > 0)
|
||||
{
|
||||
_totalEncodeTime += _encodeCompleteTime -
|
||||
_encodeTimes[rawImage._timeStamp];
|
||||
}
|
||||
else
|
||||
{
|
||||
_totalEncodeTime += tGetTime() - _encodeTimes[rawImage._timeStamp];
|
||||
}
|
||||
assert(ret >= 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
NormalAsyncTest::Decode(int lossValue)
|
||||
{
|
||||
_sumEncBytes += _frameToDecode->_frame->GetLength();
|
||||
double starttime = 0;
|
||||
EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(*(_frameToDecode->_frame), encodedImage);
|
||||
encodedImage._completeFrame = !lossValue;
|
||||
_decodeCompleteTime = 0;
|
||||
_decodeTimes[encodedImage._timeStamp] = tGetTime();
|
||||
int ret = WEBRTC_VIDEO_CODEC_OK;
|
||||
if (!_waitForKey || encodedImage._frameType == kKeyFrame)
|
||||
{
|
||||
_waitForKey = false;
|
||||
ret = _decoder->Decode(encodedImage, _missingFrames,
|
||||
_frameToDecode->_codecSpecificInfo);
|
||||
|
||||
if (ret >= 0)
|
||||
{
|
||||
_missingFrames = false;
|
||||
}
|
||||
}
|
||||
|
||||
// check for SLI
|
||||
if (ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
|
||||
{
|
||||
// add an SLI feedback to the feedback "queue"
|
||||
// to be delivered to encoder with _rttFrames delay
|
||||
_signalSLI.push_back(fbSignal(_rttFrames,
|
||||
static_cast<WebRtc_UWord8>((_lastDecPictureId) & 0x3f))); // 6 lsb
|
||||
|
||||
ret = WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
else if (ret == WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI)
|
||||
{
|
||||
// add an SLI feedback to the feedback "queue"
|
||||
// to be delivered to encoder with _rttFrames delay
|
||||
_signalSLI.push_back(fbSignal(_rttFrames,
|
||||
static_cast<WebRtc_UWord8>((_lastDecPictureId + 1) & 0x3f)));//6 lsb
|
||||
|
||||
ret = WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
else if (ret == WEBRTC_VIDEO_CODEC_ERROR)
|
||||
{
|
||||
// wait for new key frame
|
||||
// add an PLI feedback to the feedback "queue"
|
||||
// to be delivered to encoder with _rttFrames delay
|
||||
_signalPLI.push_back(fbSignal(_rttFrames, 0 /* picId not used*/));
|
||||
_waitForKey = true;
|
||||
|
||||
ret = WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
if (_decodeCompleteTime > 0)
|
||||
{
|
||||
_totalDecodeTime += _decodeCompleteTime -
|
||||
_decodeTimes[encodedImage._timeStamp];
|
||||
}
|
||||
else
|
||||
{
|
||||
_totalDecodeTime += tGetTime() - _decodeTimes[encodedImage._timeStamp];
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void NormalAsyncTest::CodecSpecific_InitBitrate()
|
||||
{
|
||||
if (_bitRate == 0)
|
||||
{
|
||||
_encoder->SetRates(600, _inst.maxFramerate);
|
||||
}
|
||||
else
|
||||
{
|
||||
_encoder->SetRates(_bitRate, _inst.maxFramerate);
|
||||
}
|
||||
}
|
||||
|
||||
void NormalAsyncTest::CopyEncodedImage(TestVideoEncodedBuffer& dest,
|
||||
EncodedImage& src,
|
||||
void* /*codecSpecificInfo*/) const
|
||||
{
|
||||
dest.CopyBuffer(src._length, src._buffer);
|
||||
dest.SetFrameType(src._frameType);
|
||||
dest.SetCaptureWidth((WebRtc_UWord16)src._encodedWidth);
|
||||
dest.SetCaptureHeight((WebRtc_UWord16)src._encodedHeight);
|
||||
dest.SetTimeStamp(src._timeStamp);
|
||||
}
|
||||
double
|
||||
NormalAsyncTest::tGetTime()
|
||||
{// return time in sec
|
||||
return ((double) (TickTime::MillisecondTimestamp())/1000);
|
||||
}
|
||||
184
modules/video_coding/codecs/test_framework/normal_async_test.h
Normal file
184
modules/video_coding/codecs/test_framework/normal_async_test.h
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
|
||||
|
||||
#include "common_types.h"
|
||||
|
||||
#include "normal_test.h"
|
||||
#include "rw_lock_wrapper.h"
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <queue>
|
||||
|
||||
class FrameQueueTuple
|
||||
{
|
||||
public:
|
||||
FrameQueueTuple(TestVideoEncodedBuffer *frame,
|
||||
const void* codecSpecificInfo = NULL)
|
||||
:
|
||||
_frame(frame),
|
||||
_codecSpecificInfo(codecSpecificInfo)
|
||||
{};
|
||||
~FrameQueueTuple();
|
||||
TestVideoEncodedBuffer* _frame;
|
||||
const void* _codecSpecificInfo;
|
||||
};
|
||||
|
||||
class FrameQueue
|
||||
{
|
||||
public:
|
||||
FrameQueue()
|
||||
:
|
||||
_queueRWLock(*webrtc::RWLockWrapper::CreateRWLock()),
|
||||
_prevTS(-1)
|
||||
{
|
||||
}
|
||||
|
||||
~FrameQueue()
|
||||
{
|
||||
delete &_queueRWLock;
|
||||
}
|
||||
|
||||
void PushFrame(TestVideoEncodedBuffer *frame,
|
||||
void* codecSpecificInfo = NULL);
|
||||
FrameQueueTuple* PopFrame();
|
||||
bool Empty();
|
||||
|
||||
private:
|
||||
webrtc::RWLockWrapper& _queueRWLock;
|
||||
std::queue<FrameQueueTuple *> _frameBufferQueue;
|
||||
WebRtc_Word64 _prevTS;
|
||||
};
|
||||
|
||||
// feedback signal to encoder
|
||||
struct fbSignal
|
||||
{
|
||||
fbSignal(int d, WebRtc_UWord8 pid) : delay(d), id(pid) {};
|
||||
int delay;
|
||||
WebRtc_UWord8 id;
|
||||
};
|
||||
|
||||
class NormalAsyncTest : public NormalTest
|
||||
{
|
||||
public:
|
||||
NormalAsyncTest();
|
||||
NormalAsyncTest(WebRtc_UWord32 bitRate);
|
||||
NormalAsyncTest(std::string name, std::string description,
|
||||
unsigned int testNo);
|
||||
NormalAsyncTest(std::string name, std::string description,
|
||||
WebRtc_UWord32 bitRate, unsigned int testNo);
|
||||
NormalAsyncTest(std::string name, std::string description,
|
||||
WebRtc_UWord32 bitRate, unsigned int testNo,
|
||||
unsigned int rttFrames);
|
||||
virtual ~NormalAsyncTest() {};
|
||||
virtual void Perform();
|
||||
virtual void Encoded(const webrtc::EncodedImage& encodedImage);
|
||||
virtual void Decoded(const webrtc::RawImage& decodedImage);
|
||||
virtual void*
|
||||
CopyCodecSpecificInfo(const void* /*codecSpecificInfo */) const
|
||||
{ return NULL; };
|
||||
virtual void CopyEncodedImage(TestVideoEncodedBuffer& dest,
|
||||
webrtc::EncodedImage& src,
|
||||
void* /*codecSpecificInfo*/) const;
|
||||
virtual void* CreateEncoderSpecificInfo() const { return NULL; };
|
||||
virtual WebRtc_Word32
|
||||
ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId) { return 0;};
|
||||
virtual WebRtc_Word32
|
||||
ReceivedDecodedFrame(const WebRtc_UWord64 pictureId) { return 0;};
|
||||
|
||||
protected:
|
||||
virtual void Setup();
|
||||
virtual void Teardown();
|
||||
virtual bool Encode();
|
||||
virtual int Decode(int lossValue = 0);
|
||||
virtual void CodecSpecific_InitBitrate();
|
||||
virtual int SetCodecSpecificParameters() {return 0;};
|
||||
double tGetTime();// return time in sec
|
||||
|
||||
FILE* _sourceFile;
|
||||
FILE* _decodedFile;
|
||||
WebRtc_UWord32 _decodedWidth;
|
||||
WebRtc_UWord32 _decodedHeight;
|
||||
double _totalEncodeTime;
|
||||
double _totalDecodeTime;
|
||||
double _decodeCompleteTime;
|
||||
double _encodeCompleteTime;
|
||||
double _totalEncodePipeTime;
|
||||
double _totalDecodePipeTime;
|
||||
int _framecnt;
|
||||
int _encFrameCnt;
|
||||
int _decFrameCnt;
|
||||
bool _requestKeyFrame;
|
||||
unsigned int _testNo;
|
||||
unsigned int _lengthEncFrame;
|
||||
FrameQueueTuple* _frameToDecode;
|
||||
bool _appendNext;
|
||||
std::map<WebRtc_UWord32, double> _encodeTimes;
|
||||
std::map<WebRtc_UWord32, double> _decodeTimes;
|
||||
bool _missingFrames;
|
||||
std::list<fbSignal> _signalSLI;
|
||||
int _rttFrames;
|
||||
mutable bool _hasReceivedSLI;
|
||||
WebRtc_UWord8 _pictureIdSLI;
|
||||
WebRtc_UWord64 _lastDecPictureId;
|
||||
std::list<fbSignal> _signalPLI;
|
||||
bool _hasReceivedPLI;
|
||||
bool _waitForKey;
|
||||
};
|
||||
|
||||
class VideoEncodeCompleteCallback : public webrtc::EncodedImageCallback
|
||||
{
|
||||
public:
|
||||
VideoEncodeCompleteCallback(FILE* encodedFile, FrameQueue *frameQueue,
|
||||
NormalAsyncTest& test)
|
||||
:
|
||||
_encodedFile(encodedFile),
|
||||
_frameQueue(frameQueue),
|
||||
_test(test),
|
||||
_encodedBytes(0)
|
||||
{}
|
||||
|
||||
WebRtc_Word32
|
||||
Encoded(webrtc::EncodedImage& encodedImage,
|
||||
const void* codecSpecificInfo = NULL,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation = NULL);
|
||||
WebRtc_UWord32 EncodedBytes();
|
||||
private:
|
||||
FILE* _encodedFile;
|
||||
FrameQueue* _frameQueue;
|
||||
NormalAsyncTest& _test;
|
||||
WebRtc_UWord32 _encodedBytes;
|
||||
};
|
||||
|
||||
class VideoDecodeCompleteCallback : public webrtc::DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
VideoDecodeCompleteCallback(FILE* decodedFile, NormalAsyncTest& test)
|
||||
:
|
||||
_decodedFile(decodedFile),
|
||||
_test(test),
|
||||
_decodedBytes(0)
|
||||
{}
|
||||
|
||||
virtual WebRtc_Word32 Decoded(webrtc::RawImage& decodedImage);
|
||||
virtual WebRtc_Word32
|
||||
ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId);
|
||||
virtual WebRtc_Word32 ReceivedDecodedFrame(const WebRtc_UWord64 pictureId);
|
||||
|
||||
WebRtc_UWord32 DecodedBytes();
|
||||
private:
|
||||
FILE* _decodedFile;
|
||||
NormalAsyncTest& _test;
|
||||
WebRtc_UWord32 _decodedBytes;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
|
||||
246
modules/video_coding/codecs/test_framework/normal_test.cc
Normal file
246
modules/video_coding/codecs/test_framework/normal_test.cc
Normal file
@ -0,0 +1,246 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "normal_test.h"
|
||||
#include <time.h>
|
||||
#include <sstream>
|
||||
#include <string.h>
|
||||
|
||||
NormalTest::NormalTest()
|
||||
:
|
||||
_testNo(1),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
Test("Normal Test 1", "A test of normal execution of the codec")
|
||||
{
|
||||
}
|
||||
|
||||
NormalTest::NormalTest(std::string name, std::string description, unsigned int testNo)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(testNo),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
Test(name, description)
|
||||
{
|
||||
}
|
||||
|
||||
NormalTest::NormalTest(std::string name, std::string description, WebRtc_UWord32 bitRate, unsigned int testNo)
|
||||
:
|
||||
_requestKeyFrame(false),
|
||||
_testNo(testNo),
|
||||
_lengthEncFrame(0),
|
||||
_appendNext(false),
|
||||
Test(name, description, bitRate)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
NormalTest::Setup()
|
||||
{
|
||||
Test::Setup();
|
||||
std::stringstream ss;
|
||||
std::string strTestNo;
|
||||
ss << _testNo;
|
||||
ss >> strTestNo;
|
||||
|
||||
// Check if settings exist. Otherwise use defaults.
|
||||
if (_outname == "")
|
||||
{
|
||||
_outname = "../../out_normaltest" + strTestNo + ".yuv";
|
||||
}
|
||||
|
||||
if (_encodedName == "")
|
||||
{
|
||||
_encodedName = "../../encoded_normaltest" + strTestNo + ".yuv";
|
||||
}
|
||||
|
||||
if ((_sourceFile = fopen(_inname.c_str(), "rb")) == NULL)
|
||||
{
|
||||
printf("Cannot read file %s.\n", _inname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((_encodedFile = fopen(_encodedName.c_str(), "wb")) == NULL)
|
||||
{
|
||||
printf("Cannot write encoded file.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char mode[3] = "wb";
|
||||
if (_appendNext)
|
||||
{
|
||||
strncpy(mode, "ab", 3);
|
||||
}
|
||||
|
||||
if ((_decodedFile = fopen(_outname.c_str(), mode)) == NULL)
|
||||
{
|
||||
printf("Cannot write file %s.\n", _outname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
_appendNext = true;
|
||||
}
|
||||
|
||||
void
|
||||
NormalTest::Teardown()
|
||||
{
|
||||
Test::Teardown();
|
||||
fclose(_sourceFile);
|
||||
fclose(_decodedFile);
|
||||
}
|
||||
|
||||
void
|
||||
NormalTest::Perform()
|
||||
{
|
||||
_inname = "../../../../testFiles/foreman.yuv";
|
||||
CodecSettings(352, 288, 30, _bitRate);
|
||||
Setup();
|
||||
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_decodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
|
||||
_encoder->InitEncode(&_inst, 1, 1460);
|
||||
CodecSpecific_InitBitrate();
|
||||
_decoder->InitDecode(&_inst,1);
|
||||
|
||||
_totalEncodeTime = _totalDecodeTime = 0;
|
||||
_framecnt = 0;
|
||||
_sumEncBytes = 0;
|
||||
_lengthEncFrame = 0;
|
||||
int decodeLength = 0;
|
||||
while (!Encode())
|
||||
{
|
||||
DoPacketLoss();
|
||||
_encodedVideoBuffer.UpdateLength(_encodedVideoBuffer.GetLength());
|
||||
fwrite(_encodedVideoBuffer.GetBuffer(), 1, _encodedVideoBuffer.GetLength(), _encodedFile);
|
||||
decodeLength = Decode();
|
||||
if (decodeLength < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", decodeLength);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
fwrite(_decodedVideoBuffer.GetBuffer(), 1, decodeLength, _decodedFile);
|
||||
CodecSpecific_InitBitrate();
|
||||
_framecnt++;
|
||||
}
|
||||
|
||||
// Ensure we empty the decoding queue.
|
||||
while (decodeLength > 0)
|
||||
{
|
||||
decodeLength = Decode();
|
||||
if (decodeLength < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", decodeLength);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
fwrite(_decodedVideoBuffer.GetBuffer(), 1, decodeLength, _decodedFile);
|
||||
}
|
||||
|
||||
double actualBitRate = ActualBitRate(_framecnt) / 1000.0;
|
||||
double avgEncTime = _totalEncodeTime / _framecnt;
|
||||
double avgDecTime = _totalDecodeTime / _framecnt;
|
||||
printf("Actual bitrate: %f kbps\n", actualBitRate);
|
||||
printf("Average encode time: %f s\n", avgEncTime);
|
||||
printf("Average decode time: %f s\n", avgDecTime);
|
||||
(*_log) << "Actual bitrate: " << actualBitRate << " kbps\tTarget: " << _bitRate << " kbps" << std::endl;
|
||||
(*_log) << "Average encode time: " << avgEncTime << " s" << std::endl;
|
||||
(*_log) << "Average decode time: " << avgDecTime << " s" << std::endl;
|
||||
|
||||
_inputVideoBuffer.Free();
|
||||
_encodedVideoBuffer.Reset();
|
||||
_decodedVideoBuffer.Free();
|
||||
|
||||
_encoder->Release();
|
||||
_decoder->Release();
|
||||
|
||||
Teardown();
|
||||
}
|
||||
|
||||
bool
|
||||
NormalTest::Encode()
|
||||
{
|
||||
_lengthEncFrame = 0;
|
||||
fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile);
|
||||
if (feof(_sourceFile) != 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _sourceBuffer);
|
||||
_inputVideoBuffer.SetTimeStamp(_framecnt);
|
||||
|
||||
// This multiple attempt ridiculousness is to accomodate VP7:
|
||||
// 1. The wrapper can unilaterally reduce the framerate for low bitrates.
|
||||
// 2. The codec inexplicably likes to reject some frames. Perhaps there
|
||||
// is a good reason for this...
|
||||
int encodingAttempts = 0;
|
||||
double starttime = 0;
|
||||
double endtime = 0;
|
||||
while (_lengthEncFrame == 0)
|
||||
{
|
||||
starttime = clock()/(double)CLOCKS_PER_SEC;
|
||||
|
||||
_inputVideoBuffer.SetWidth(_inst.width);
|
||||
_inputVideoBuffer.SetHeight(_inst.height);
|
||||
//_lengthEncFrame = _encoder->Encode(_inputVideoBuffer, _encodedVideoBuffer, _frameInfo,
|
||||
// _inst.frameRate, _requestKeyFrame && !(_framecnt%50));
|
||||
|
||||
endtime = clock()/(double)CLOCKS_PER_SEC;
|
||||
|
||||
_encodedVideoBuffer.SetCaptureHeight(_inst.height);
|
||||
_encodedVideoBuffer.SetCaptureWidth(_inst.width);
|
||||
if (_lengthEncFrame < 0)
|
||||
{
|
||||
(*_log) << "Error in encoder: " << _lengthEncFrame << std::endl;
|
||||
fprintf(stderr,"\n\nError in encoder: %d\n\n", _lengthEncFrame);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_sumEncBytes += _lengthEncFrame;
|
||||
|
||||
encodingAttempts++;
|
||||
if (encodingAttempts > 50)
|
||||
{
|
||||
(*_log) << "Unable to encode frame: " << _framecnt << std::endl;
|
||||
fprintf(stderr,"\n\nUnable to encode frame: %d\n\n", _framecnt);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
_totalEncodeTime += endtime - starttime;
|
||||
|
||||
if (encodingAttempts > 1)
|
||||
{
|
||||
(*_log) << encodingAttempts << " attempts required to encode frame: " <<
|
||||
_framecnt + 1 << std::endl;
|
||||
fprintf(stderr,"\n%d attempts required to encode frame: %d\n", encodingAttempts,
|
||||
_framecnt + 1);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
NormalTest::Decode()
|
||||
{
|
||||
double starttime = clock()/(double)CLOCKS_PER_SEC;
|
||||
_encodedVideoBuffer.SetWidth(_inst.width);
|
||||
_encodedVideoBuffer.SetHeight(_inst.height);
|
||||
int lengthDecFrame = 0;
|
||||
//int lengthDecFrame = _decoder->Decode(_encodedVideoBuffer, _decodedVideoBuffer);
|
||||
//_totalDecodeTime += (double)((clock()/(double)CLOCKS_PER_SEC) - starttime);
|
||||
if (lengthDecFrame < 0)
|
||||
{
|
||||
return lengthDecFrame;
|
||||
}
|
||||
_encodedVideoBuffer.Reset();
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
return lengthDecFrame;
|
||||
}
|
||||
|
||||
46
modules/video_coding/codecs/test_framework/normal_test.h
Normal file
46
modules/video_coding/codecs/test_framework/normal_test.h
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_
|
||||
|
||||
#include "test.h"
|
||||
|
||||
class NormalTest : public Test
|
||||
{
|
||||
public:
|
||||
NormalTest();
|
||||
NormalTest(std::string name, std::string description, unsigned int testNo);
|
||||
NormalTest(std::string name, std::string description, WebRtc_UWord32 bitRate, unsigned int testNo);
|
||||
virtual ~NormalTest() {};
|
||||
virtual void Perform();
|
||||
|
||||
protected:
|
||||
virtual void Setup();
|
||||
virtual void Teardown();
|
||||
virtual bool Encode();
|
||||
virtual int Decode();
|
||||
virtual void CodecSpecific_InitBitrate()=0;
|
||||
virtual int DoPacketLoss() {return 0;};
|
||||
|
||||
FILE* _sourceFile;
|
||||
FILE* _decodedFile;
|
||||
FILE* _encodedFile;
|
||||
double _totalEncodeTime;
|
||||
double _totalDecodeTime;
|
||||
unsigned int _framecnt;
|
||||
bool _requestKeyFrame;
|
||||
unsigned int _testNo;
|
||||
int _lengthEncFrame;
|
||||
bool _appendNext;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_
|
||||
|
||||
244
modules/video_coding/codecs/test_framework/packet_loss_test.cc
Normal file
244
modules/video_coding/codecs/test_framework/packet_loss_test.cc
Normal file
@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet_loss_test.h"
|
||||
#include "video_source.h"
|
||||
#include <sstream>
|
||||
#include <cassert>
|
||||
#include <string.h>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
PacketLossTest::PacketLossTest()
|
||||
:
|
||||
_lossRate(0.1),
|
||||
_lossProbability(0.1),
|
||||
_lastFrame(NULL),
|
||||
_lastFrameLength(0),
|
||||
NormalAsyncTest("PacketLossTest", "Encode, remove lost packets, decode", 300, 5)
|
||||
{
|
||||
}
|
||||
|
||||
PacketLossTest::PacketLossTest(std::string name, std::string description)
|
||||
:
|
||||
_lossRate(0.1),
|
||||
_lossProbability(0.1),
|
||||
_lastFrame(NULL),
|
||||
_lastFrameLength(0),
|
||||
NormalAsyncTest(name, description, 300, 5)
|
||||
{
|
||||
}
|
||||
|
||||
PacketLossTest::PacketLossTest(std::string name, std::string description, double lossRate, bool useNack, unsigned int rttFrames /* = 0*/)
|
||||
:
|
||||
_lossRate(lossRate),
|
||||
_lastFrame(NULL),
|
||||
_lastFrameLength(0),
|
||||
NormalAsyncTest(name, description, 300, 5, rttFrames)
|
||||
{
|
||||
assert(lossRate >= 0 && lossRate <= 1);
|
||||
if (useNack)
|
||||
{
|
||||
_lossProbability = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
_lossProbability = lossRate;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PacketLossTest::Encoded(const EncodedImage& encodedImage)
|
||||
{
|
||||
// push timestamp to queue
|
||||
_frameQueue.push_back(encodedImage._timeStamp);
|
||||
NormalAsyncTest::Encoded(encodedImage);
|
||||
}
|
||||
|
||||
void
|
||||
PacketLossTest::Decoded(const RawImage& decodedImage)
|
||||
{
|
||||
// check the frame queue if any frames have gone missing
|
||||
assert(!_frameQueue.empty()); // decoded frame is not in the queue
|
||||
while(_frameQueue.front() < decodedImage._timeStamp)
|
||||
{
|
||||
// this frame is missing
|
||||
// write previous decoded frame again (frame freeze)
|
||||
if (_decodedFile && _lastFrame)
|
||||
{
|
||||
fwrite(_lastFrame, 1, _lastFrameLength, _decodedFile);
|
||||
}
|
||||
|
||||
// remove frame from queue
|
||||
_frameQueue.pop_front();
|
||||
}
|
||||
assert(_frameQueue.front() == decodedImage._timeStamp); // decoded frame is not in the queue
|
||||
|
||||
// pop the current frame
|
||||
_frameQueue.pop_front();
|
||||
|
||||
// save image for future freeze-frame
|
||||
if (_lastFrameLength < decodedImage._length)
|
||||
{
|
||||
if (_lastFrame) delete [] _lastFrame;
|
||||
|
||||
_lastFrame = new WebRtc_UWord8[decodedImage._length];
|
||||
}
|
||||
memcpy(_lastFrame, decodedImage._buffer, decodedImage._length);
|
||||
_lastFrameLength = decodedImage._length;
|
||||
|
||||
NormalAsyncTest::Decoded(decodedImage);
|
||||
}
|
||||
|
||||
void
|
||||
PacketLossTest::Teardown()
|
||||
{
|
||||
if (_totalKept + _totalThrown > 0)
|
||||
{
|
||||
printf("Target packet loss rate: %.4f\n", _lossProbability);
|
||||
printf("Actual packet loss rate: %.4f\n", (_totalThrown * 1.0f) / (_totalKept + _totalThrown));
|
||||
printf("Channel rate: %.2f kbps\n",
|
||||
0.001 * 8.0 * _sumChannelBytes / ((_framecnt * 1.0f) / _inst.maxFramerate));
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("No packet losses inflicted\n");
|
||||
}
|
||||
|
||||
NormalAsyncTest::Teardown();
|
||||
}
|
||||
|
||||
void
|
||||
PacketLossTest::Setup()
|
||||
{
|
||||
const VideoSource source(_inname, _inst.width, _inst.height, _inst.maxFramerate);
|
||||
|
||||
std::stringstream ss;
|
||||
std::string lossRateStr;
|
||||
ss << _lossRate;
|
||||
ss >> lossRateStr;
|
||||
_encodedName = "../../" + source.GetName() + "-" + lossRateStr;
|
||||
_outname = "../../out-" + source.GetName() + "-" + lossRateStr;
|
||||
|
||||
if (_lossProbability != _lossRate)
|
||||
{
|
||||
_encodedName += "-nack";
|
||||
_outname += "-nack";
|
||||
}
|
||||
_encodedName += ".vp8";
|
||||
_outname += ".yuv";
|
||||
|
||||
_totalKept = 0;
|
||||
_totalThrown = 0;
|
||||
_sumChannelBytes = 0;
|
||||
|
||||
NormalAsyncTest::Setup();
|
||||
}
|
||||
|
||||
void
|
||||
PacketLossTest::CodecSpecific_InitBitrate()
|
||||
{
|
||||
assert(_bitRate > 0);
|
||||
WebRtc_UWord32 simulatedBitRate;
|
||||
if (_lossProbability != _lossRate)
|
||||
{
|
||||
// Simulating NACK
|
||||
simulatedBitRate = WebRtc_UWord32(_bitRate / (1 + _lossRate));
|
||||
}
|
||||
else
|
||||
{
|
||||
simulatedBitRate = _bitRate;
|
||||
}
|
||||
_encoder->SetPacketLoss((WebRtc_UWord32)(_lossProbability * 255.0));
|
||||
_encoder->SetRates(simulatedBitRate, _inst.maxFramerate);
|
||||
}
|
||||
|
||||
int PacketLossTest::DoPacketLoss()
|
||||
{
|
||||
// Only packet loss for delta frames
|
||||
if (_frameToDecode->_frame->GetLength() == 0 || _frameToDecode->_frame->GetFrameType() != kDeltaFrame)
|
||||
{
|
||||
_sumChannelBytes += _frameToDecode->_frame->GetLength();
|
||||
return 0;
|
||||
}
|
||||
//printf("Encoded: %d bytes\n", _encodedVideoBuffer.GetLength());
|
||||
unsigned char *packet = NULL;
|
||||
TestVideoEncodedBuffer newEncBuf;
|
||||
newEncBuf.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_inBufIdx = 0;
|
||||
_outBufIdx = 0;
|
||||
int size = 1;
|
||||
int kept = 0;
|
||||
int thrown = 0;
|
||||
int count = 0;
|
||||
while ((size = NextPacket(1500, &packet)) > 0)
|
||||
{
|
||||
if (!PacketLoss(_lossProbability))
|
||||
{
|
||||
InsertPacket(&newEncBuf, packet, size);
|
||||
kept++;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use the ByteLoss function if you want to lose only
|
||||
// parts of a packet, and not the whole packet.
|
||||
|
||||
//int size2 = ByteLoss(size, packet, 15);
|
||||
thrown++;
|
||||
//if (size2 != size)
|
||||
//{
|
||||
// InsertPacket(&newEncBuf, packet, size2);
|
||||
//}
|
||||
}
|
||||
}
|
||||
int lossResult = (thrown!=0); // 0 = no loss 1 = loss(es)
|
||||
if (lossResult)
|
||||
{
|
||||
lossResult += (kept==0); // 2 = all lost = full frame
|
||||
}
|
||||
_frameToDecode->_frame->CopyBuffer(newEncBuf.GetLength(), newEncBuf.GetBuffer());
|
||||
_sumChannelBytes += newEncBuf.GetLength();
|
||||
_totalKept += kept;
|
||||
_totalThrown += thrown;
|
||||
return lossResult;
|
||||
//printf("Threw away: %d out of %d packets\n", thrown, thrown + kept);
|
||||
//printf("Encoded left: %d bytes\n", _encodedVideoBuffer.GetLength());
|
||||
}
|
||||
|
||||
int PacketLossTest::NextPacket(int mtu, unsigned char **pkg)
|
||||
{
|
||||
unsigned char *buf = _frameToDecode->_frame->GetBuffer();
|
||||
*pkg = buf + _inBufIdx;
|
||||
if (static_cast<long>(_frameToDecode->_frame->GetLength()) - _inBufIdx <= mtu)
|
||||
{
|
||||
int size = _frameToDecode->_frame->GetLength() - _inBufIdx;
|
||||
_inBufIdx = _frameToDecode->_frame->GetLength();
|
||||
return size;
|
||||
}
|
||||
_inBufIdx += mtu;
|
||||
return mtu;
|
||||
}
|
||||
|
||||
int PacketLossTest::ByteLoss(int size, unsigned char *pkg, int bytesToLose)
|
||||
{
|
||||
return size;
|
||||
}
|
||||
|
||||
void PacketLossTest::InsertPacket(TestVideoEncodedBuffer *buf, unsigned char *pkg, int size)
|
||||
{
|
||||
if (static_cast<long>(buf->GetSize()) - _outBufIdx < size)
|
||||
{
|
||||
printf("InsertPacket error!\n");
|
||||
return;
|
||||
}
|
||||
memcpy(buf->GetBuffer() + _outBufIdx, pkg, size);
|
||||
buf->UpdateLength(buf->GetLength() + size);
|
||||
_outBufIdx += size;
|
||||
}
|
||||
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "normal_async_test.h"
|
||||
|
||||
class PacketLossTest : public NormalAsyncTest
|
||||
{
|
||||
public:
|
||||
PacketLossTest();
|
||||
virtual ~PacketLossTest() {if(_lastFrame) {delete [] _lastFrame; _lastFrame = NULL;}}
|
||||
virtual void Encoded(const webrtc::EncodedImage& encodedImage);
|
||||
virtual void Decoded(const webrtc::RawImage& decodedImage);
|
||||
protected:
|
||||
PacketLossTest(std::string name, std::string description);
|
||||
PacketLossTest(std::string name,
|
||||
std::string description,
|
||||
double lossRate,
|
||||
bool useNack,
|
||||
unsigned int rttFrames = 0);
|
||||
|
||||
virtual void Setup();
|
||||
virtual void Teardown();
|
||||
virtual void CodecSpecific_InitBitrate();
|
||||
virtual int DoPacketLoss();
|
||||
virtual int NextPacket(int size, unsigned char **pkg);
|
||||
virtual int ByteLoss(int size, unsigned char *pkg, int bytesToLose);
|
||||
virtual void InsertPacket(TestVideoEncodedBuffer *buf, unsigned char *pkg, int size);
|
||||
int _inBufIdx;
|
||||
int _outBufIdx;
|
||||
|
||||
// When NACK is being simulated _lossProbabilty is zero,
|
||||
// otherwise it is set equal to _lossRate.
|
||||
// Desired channel loss rate.
|
||||
double _lossRate;
|
||||
// Probability used to simulate packet drops.
|
||||
double _lossProbability;
|
||||
|
||||
int _totalKept;
|
||||
int _totalThrown;
|
||||
int _sumChannelBytes;
|
||||
std::list<WebRtc_UWord32> _frameQueue;
|
||||
WebRtc_UWord8* _lastFrame;
|
||||
WebRtc_UWord32 _lastFrameLength;
|
||||
};
|
||||
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_
|
||||
290
modules/video_coding/codecs/test_framework/performance_test.cc
Normal file
290
modules/video_coding/codecs/test_framework/performance_test.cc
Normal file
@ -0,0 +1,290 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "performance_test.h"
|
||||
#include "tick_util.h"
|
||||
#include <assert.h>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
#define NUM_FRAMES 300
|
||||
|
||||
PerformanceTest::PerformanceTest(WebRtc_UWord32 bitRate)
|
||||
:
|
||||
_numCodecs(0),
|
||||
_tests(NULL),
|
||||
_encoders(NULL),
|
||||
_decoders(NULL),
|
||||
_threads(NULL),
|
||||
_rawImageLock(NULL),
|
||||
_encodeEvents(new EventWrapper*[1]),
|
||||
_stopped(true),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL),
|
||||
NormalAsyncTest(bitRate)
|
||||
{
|
||||
}
|
||||
|
||||
PerformanceTest::PerformanceTest(WebRtc_UWord32 bitRate, WebRtc_UWord8 numCodecs)
|
||||
:
|
||||
_numCodecs(numCodecs),
|
||||
_tests(new PerformanceTest*[_numCodecs]),
|
||||
_encoders(new VideoEncoder*[_numCodecs]),
|
||||
_decoders(new VideoDecoder*[_numCodecs]),
|
||||
_threads(new ThreadWrapper*[_numCodecs]),
|
||||
_rawImageLock(RWLockWrapper::CreateRWLock()),
|
||||
_encodeEvents(new EventWrapper*[_numCodecs]),
|
||||
_stopped(true),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL),
|
||||
NormalAsyncTest(bitRate)
|
||||
{
|
||||
for (int i=0; i < _numCodecs; i++)
|
||||
{
|
||||
_tests[i] = new PerformanceTest(bitRate);
|
||||
_encodeEvents[i] = EventWrapper::Create();
|
||||
}
|
||||
}
|
||||
|
||||
PerformanceTest::~PerformanceTest()
|
||||
{
|
||||
if (_encoders != NULL)
|
||||
{
|
||||
delete [] _encoders;
|
||||
}
|
||||
if (_decoders != NULL)
|
||||
{
|
||||
delete [] _decoders;
|
||||
}
|
||||
if (_tests != NULL)
|
||||
{
|
||||
delete [] _tests;
|
||||
}
|
||||
if (_threads != NULL)
|
||||
{
|
||||
delete [] _threads;
|
||||
}
|
||||
if (_rawImageLock != NULL)
|
||||
{
|
||||
delete _rawImageLock;
|
||||
}
|
||||
if (_encodeEvents != NULL)
|
||||
{
|
||||
delete [] _encodeEvents;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PerformanceTest::Setup()
|
||||
{
|
||||
_inname = "../../../../testFiles/foreman.yuv";
|
||||
NormalAsyncTest::Setup(); // Setup input and output files
|
||||
CodecSettings(352, 288, 30, _bitRate); // common to all codecs
|
||||
for (int i=0; i < _numCodecs; i++)
|
||||
{
|
||||
_encoders[i] = CreateEncoder();
|
||||
_decoders[i] = CreateDecoder();
|
||||
if (_encoders[i] == NULL)
|
||||
{
|
||||
printf("Must create a codec specific test!\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(_encoders[i]->InitEncode(&_inst, 4, 1440) < 0)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (_decoders[i]->InitDecode(&_inst, 1))
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_tests[i]->SetEncoder(_encoders[i]);
|
||||
_tests[i]->SetDecoder(_decoders[i]);
|
||||
_tests[i]->_rawImageLock = _rawImageLock;
|
||||
_encodeEvents[i]->Reset();
|
||||
_tests[i]->_encodeEvents[0] = _encodeEvents[i];
|
||||
_tests[i]->_inst = _inst;
|
||||
_threads[i] = ThreadWrapper::CreateThread(PerformanceTest::RunThread, _tests[i]);
|
||||
unsigned int id = 0;
|
||||
_tests[i]->_stopped = false;
|
||||
_threads[i]->Start(id);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PerformanceTest::Perform()
|
||||
{
|
||||
Setup();
|
||||
EventWrapper& sleepEvent = *EventWrapper::Create();
|
||||
const WebRtc_Word64 startTime = TickTime::MillisecondTimestamp();
|
||||
for (int i=0; i < NUM_FRAMES; i++)
|
||||
{
|
||||
{
|
||||
// Read a new frame from file
|
||||
WriteLockScoped imageLock(*_rawImageLock);
|
||||
_lengthEncFrame = 0;
|
||||
fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile);
|
||||
if (feof(_sourceFile) != 0)
|
||||
{
|
||||
rewind(_sourceFile);
|
||||
}
|
||||
_inputVideoBuffer.VerifyAndAllocate(_inst.width*_inst.height*3/2);
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _sourceBuffer);
|
||||
_inputVideoBuffer.SetTimeStamp((unsigned int) (_encFrameCnt * 9e4 / static_cast<float>(_inst.maxFramerate)));
|
||||
_inputVideoBuffer.SetWidth(_inst.width);
|
||||
_inputVideoBuffer.SetHeight(_inst.height);
|
||||
for (int i=0; i < _numCodecs; i++)
|
||||
{
|
||||
_tests[i]->_inputVideoBuffer.CopyPointer(_inputVideoBuffer);
|
||||
_encodeEvents[i]->Set();
|
||||
}
|
||||
}
|
||||
if (i < NUM_FRAMES - 1)
|
||||
{
|
||||
sleepEvent.Wait(33);
|
||||
}
|
||||
}
|
||||
for (int i=0; i < _numCodecs; i++)
|
||||
{
|
||||
_tests[i]->_stopped = true;
|
||||
_encodeEvents[i]->Set();
|
||||
_threads[i]->Stop();
|
||||
}
|
||||
const WebRtc_UWord32 totalTime =
|
||||
static_cast<WebRtc_UWord32>(TickTime::MillisecondTimestamp() - startTime);
|
||||
printf("Total time: %u\n", totalTime);
|
||||
delete &sleepEvent;
|
||||
Teardown();
|
||||
}
|
||||
|
||||
void PerformanceTest::Teardown()
|
||||
{
|
||||
if (_encodeCompleteCallback != NULL)
|
||||
{
|
||||
delete _encodeCompleteCallback;
|
||||
}
|
||||
if (_decodeCompleteCallback != NULL)
|
||||
{
|
||||
delete _decodeCompleteCallback;
|
||||
}
|
||||
// main test only, all others have numCodecs = 0:
|
||||
if (_numCodecs > 0)
|
||||
{
|
||||
WriteLockScoped imageLock(*_rawImageLock);
|
||||
_inputVideoBuffer.Free();
|
||||
NormalAsyncTest::Teardown();
|
||||
}
|
||||
for (int i=0; i < _numCodecs; i++)
|
||||
{
|
||||
_encoders[i]->Release();
|
||||
delete _encoders[i];
|
||||
_decoders[i]->Release();
|
||||
delete _decoders[i];
|
||||
_tests[i]->_inputVideoBuffer.ClearPointer();
|
||||
_tests[i]->_rawImageLock = NULL;
|
||||
_tests[i]->Teardown();
|
||||
delete _tests[i];
|
||||
delete _encodeEvents[i];
|
||||
delete _threads[i];
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
PerformanceTest::RunThread(void* obj)
|
||||
{
|
||||
PerformanceTest& test = *static_cast<PerformanceTest*>(obj);
|
||||
return test.PerformSingleTest();
|
||||
}
|
||||
|
||||
bool
|
||||
PerformanceTest::PerformSingleTest()
|
||||
{
|
||||
if (_encodeCompleteCallback == NULL)
|
||||
{
|
||||
_encodeCompleteCallback = new VideoEncodeCompleteCallback(NULL, &_frameQueue, *this);
|
||||
_encoder->RegisterEncodeCompleteCallback(_encodeCompleteCallback);
|
||||
}
|
||||
if (_decodeCompleteCallback == NULL)
|
||||
{
|
||||
_decodeCompleteCallback = new VideoDecodeCompleteCallback(NULL, *this);
|
||||
_decoder->RegisterDecodeCompleteCallback(_decodeCompleteCallback);
|
||||
}
|
||||
(*_encodeEvents)->Wait(WEBRTC_EVENT_INFINITE); // The first event is used for every single test
|
||||
CodecSpecific_InitBitrate();
|
||||
bool complete = false;
|
||||
{
|
||||
ReadLockScoped imageLock(*_rawImageLock);
|
||||
complete = Encode();
|
||||
}
|
||||
if (!_frameQueue.Empty() || complete)
|
||||
{
|
||||
while (!_frameQueue.Empty())
|
||||
{
|
||||
_frameToDecode = static_cast<FrameQueueTuple *>(_frameQueue.PopFrame());
|
||||
int lost = DoPacketLoss();
|
||||
if (lost == 2)
|
||||
{
|
||||
// Lost the whole frame, continue
|
||||
_missingFrames = true;
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
continue;
|
||||
}
|
||||
int ret = Decode(lost);
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
if (ret < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
|
||||
return false;
|
||||
}
|
||||
else if (ret < 0)
|
||||
{
|
||||
fprintf(stderr, "\n\nPositive return value from decode!\n\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_stopped)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PerformanceTest::Encode()
|
||||
{
|
||||
RawImage rawImage;
|
||||
VideoBufferToRawImage(_inputVideoBuffer, rawImage);
|
||||
VideoFrameType frameType = kDeltaFrame;
|
||||
if (_requestKeyFrame && !(_encFrameCnt%50))
|
||||
{
|
||||
frameType = kKeyFrame;
|
||||
}
|
||||
void* codecSpecificInfo = CreateEncoderSpecificInfo();
|
||||
int ret = _encoder->Encode(rawImage, codecSpecificInfo, frameType);
|
||||
if (codecSpecificInfo != NULL)
|
||||
{
|
||||
// TODO(holmer): implement virtual function for deleting this and remove warnings
|
||||
delete codecSpecificInfo;
|
||||
codecSpecificInfo = NULL;
|
||||
}
|
||||
assert(ret >= 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
int PerformanceTest::Decode(int lossValue)
|
||||
{
|
||||
EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(*(_frameToDecode->_frame), encodedImage);
|
||||
encodedImage._completeFrame = !lossValue;
|
||||
int ret = _decoder->Decode(encodedImage, _missingFrames, _frameToDecode->_codecSpecificInfo);
|
||||
_missingFrames = false;
|
||||
return ret;
|
||||
}
|
||||
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PERFORMANCE_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PERFORMANCE_TEST_H_
|
||||
|
||||
#include "normal_async_test.h"
|
||||
#include "thread_wrapper.h"
|
||||
#include "rw_lock_wrapper.h"
|
||||
#include "event_wrapper.h"
|
||||
|
||||
class PerformanceTest : public NormalAsyncTest
|
||||
{
|
||||
public:
|
||||
PerformanceTest(WebRtc_UWord32 bitRate, WebRtc_UWord8 numCodecs);
|
||||
virtual ~PerformanceTest();
|
||||
|
||||
virtual void Perform();
|
||||
virtual void Print() {};
|
||||
|
||||
protected:
|
||||
PerformanceTest(WebRtc_UWord32 bitRate);
|
||||
virtual void Setup();
|
||||
virtual bool Encode();
|
||||
virtual int Decode(int lossValue = 0);
|
||||
virtual void Teardown();
|
||||
static bool RunThread(void* obj);
|
||||
bool PerformSingleTest();
|
||||
|
||||
virtual webrtc::VideoEncoder* CreateEncoder() const { return NULL; };
|
||||
virtual webrtc::VideoDecoder* CreateDecoder() const { return NULL; };
|
||||
|
||||
WebRtc_UWord8 _numCodecs;
|
||||
PerformanceTest** _tests;
|
||||
webrtc::VideoEncoder** _encoders;
|
||||
webrtc::VideoDecoder** _decoders;
|
||||
webrtc::ThreadWrapper** _threads;
|
||||
webrtc::RWLockWrapper* _rawImageLock;
|
||||
webrtc::EventWrapper** _encodeEvents;
|
||||
FrameQueue _frameQueue;
|
||||
bool _stopped;
|
||||
webrtc::EncodedImageCallback* _encodeCompleteCallback;
|
||||
webrtc::DecodedImageCallback* _decodeCompleteCallback;
|
||||
FILE* _outFile;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PERFORMANCE_TEST_H_
|
||||
427
modules/video_coding/codecs/test_framework/plotBenchmark.m
Normal file
427
modules/video_coding/codecs/test_framework/plotBenchmark.m
Normal file
@ -0,0 +1,427 @@
|
||||
function plotBenchmark(fileNames, export)
|
||||
%PLOTBENCHMARK Plots and exports video codec benchmarking results.
|
||||
% PLOTBENCHMARK(FILENAMES, EXPORT) parses the video codec benchmarking result
|
||||
% files given by the cell array of strings FILENAME. It plots the results and
|
||||
% optionally exports each plot to an appropriately named file.
|
||||
%
|
||||
% EXPORT parameter:
|
||||
% 'none' No file exports.
|
||||
% 'eps' Exports to eps files (default).
|
||||
% 'pdf' Exports to eps files and uses the command-line utility
|
||||
% epstopdf to obtain pdf files.
|
||||
%
|
||||
% Example:
|
||||
% plotBenchmark({'H264Benchmark.txt' 'LSVXBenchmark.txt'}, 'pdf')
|
||||
|
||||
if (nargin < 1)
|
||||
error('Too few input arguments');
|
||||
elseif (nargin < 2)
|
||||
export = 'eps';
|
||||
end
|
||||
|
||||
if ~iscell(fileNames)
|
||||
if ischar(fileNames)
|
||||
% one single file name as a string is ok
|
||||
if size(fileNames,1) > 1
|
||||
% this is a char matrix, not ok
|
||||
error('First argument must not be a char matrix');
|
||||
end
|
||||
% wrap in a cell array
|
||||
fileNames = {fileNames};
|
||||
else
|
||||
error('First argument must be a cell array of strings');
|
||||
end
|
||||
end
|
||||
|
||||
if ~ischar(export)
|
||||
error('Second argument must be a string');
|
||||
end
|
||||
|
||||
outpath = 'BenchmarkPlots';
|
||||
[status, errMsg] = mkdir(outpath);
|
||||
if status == 0
|
||||
error(errMsg);
|
||||
end
|
||||
|
||||
nCases = 0;
|
||||
testCases = [];
|
||||
% Read each test result file
|
||||
for fileIdx = 1:length(fileNames)
|
||||
if ~isstr(fileNames{fileIdx})
|
||||
error('First argument must be a cell array of strings');
|
||||
end
|
||||
|
||||
fid = fopen(fileNames{fileIdx}, 'rt');
|
||||
if fid == -1
|
||||
error(['Unable to open ' fileNames{fileIdx}]);
|
||||
end
|
||||
|
||||
version = '1.0';
|
||||
if ~strcmp(fgetl(fid), ['#!benchmark' version])
|
||||
fclose(fid);
|
||||
error(['Requires benchmark file format version ' version]);
|
||||
end
|
||||
|
||||
% Parse results file into testCases struct
|
||||
codec = fgetl(fid);
|
||||
tline = fgetl(fid);
|
||||
while(tline ~= -1)
|
||||
nCases = nCases + 1;
|
||||
|
||||
delim = strfind(tline, ',');
|
||||
name = tline(1:delim(1)-1);
|
||||
% Drop underscored suffix from name
|
||||
underscore = strfind(name, '_');
|
||||
if ~isempty(underscore)
|
||||
name = name(1:underscore(1)-1);
|
||||
end
|
||||
|
||||
resolution = tline(delim(1)+1:delim(2)-1);
|
||||
frameRate = tline(delim(2)+1:end);
|
||||
|
||||
tline = fgetl(fid);
|
||||
delim = strfind(tline, ',');
|
||||
bitrateLabel = tline(1:delim(1)-1);
|
||||
bitrate = sscanf(tline(delim(1):end),',%f');
|
||||
|
||||
tline = fgetl(fid);
|
||||
delim = strfind(tline, ',');
|
||||
psnrLabel = tline(1:delim(1)-1);
|
||||
psnr = sscanf(tline(delim(1):end),',%f');
|
||||
|
||||
|
||||
% Default data for the optional lines
|
||||
speedLabel = 'Default';
|
||||
speed = 0;
|
||||
ssimLabel = 'Default';
|
||||
ssim = 0;
|
||||
|
||||
tline = fgetl(fid);
|
||||
delim = strfind(tline, ',');
|
||||
|
||||
while ~isempty(delim)
|
||||
% More data
|
||||
% Check type of data
|
||||
if strncmp(lower(tline), 'speed', 5)
|
||||
% Speed data included
|
||||
speedLabel = tline(1:delim(1)-1);
|
||||
speed = sscanf(tline(delim(1):end), ',%f');
|
||||
|
||||
tline = fgetl(fid);
|
||||
|
||||
elseif strncmp(lower(tline), 'encode time', 11)
|
||||
% Encode and decode times included
|
||||
% TODO: take care of the data
|
||||
|
||||
% pop two lines from file
|
||||
tline = fgetl(fid);
|
||||
tline = fgetl(fid);
|
||||
|
||||
elseif strncmp(tline, 'SSIM', 4)
|
||||
% SSIM data included
|
||||
ssimLabel = tline(1:delim(1)-1);
|
||||
ssim = sscanf(tline(delim(1):end), ',%f');
|
||||
|
||||
tline = fgetl(fid);
|
||||
end
|
||||
delim = strfind(tline, ',');
|
||||
end
|
||||
|
||||
testCases = [testCases struct('codec', codec, 'name', name, 'resolution', ...
|
||||
resolution, 'frameRate', frameRate, 'bitrate', bitrate, 'psnr', psnr, ...
|
||||
'speed', speed, 'bitrateLabel', bitrateLabel, 'psnrLabel', psnrLabel, ...
|
||||
'speedLabel', speedLabel, ...
|
||||
'ssim', ssim, 'ssimLabel', ssimLabel)];
|
||||
|
||||
tline = fgetl(fid);
|
||||
end
|
||||
|
||||
fclose(fid);
|
||||
end
|
||||
|
||||
i = 0;
|
||||
casesPsnr = testCases;
|
||||
while ~isempty(casesPsnr)
|
||||
i = i + 1;
|
||||
casesPsnr = plotOnePsnr(casesPsnr, i, export, outpath);
|
||||
end
|
||||
|
||||
casesSSIM = testCases;
|
||||
while ~isempty(casesSSIM)
|
||||
i = i + 1;
|
||||
casesSSIM = plotOneSSIM(casesSSIM, i, export, outpath);
|
||||
end
|
||||
|
||||
casesSpeed = testCases;
|
||||
while ~isempty(casesSpeed)
|
||||
if casesSpeed(1).speed == 0
|
||||
casesSpeed = casesSpeed(2:end);
|
||||
else
|
||||
i = i + 1;
|
||||
casesSpeed = plotOneSpeed(casesSpeed, i, export, outpath);
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%
|
||||
%% SUBFUNCTIONS %%
|
||||
%%%%%%%%%%%%%%%%%%
|
||||
|
||||
function casesOut = plotOnePsnr(cases, num, export, outpath)
|
||||
% Find matching specs
|
||||
plotIdx = 1;
|
||||
for i = 2:length(cases)
|
||||
if strcmp(cases(1).resolution, cases(i).resolution) & ...
|
||||
strcmp(cases(1).frameRate, cases(i).frameRate)
|
||||
plotIdx = [plotIdx i];
|
||||
end
|
||||
end
|
||||
|
||||
% Return unplotted cases
|
||||
casesOut = cases(setdiff(1:length(cases), plotIdx));
|
||||
cases = cases(plotIdx);
|
||||
|
||||
% Prune similar results
|
||||
for i = 1:length(cases)
|
||||
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
|
||||
while ~isempty(simIndx)
|
||||
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
|
||||
cases(i).psnr = cases(i).psnr(diffIndx);
|
||||
cases(i).bitrate = cases(i).bitrate(diffIndx);
|
||||
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
|
||||
end
|
||||
end
|
||||
|
||||
% Prepare figure with axis labels and so on
|
||||
hFig = figure(num);
|
||||
clf;
|
||||
hold on;
|
||||
grid on;
|
||||
axis([0 1100 20 50]);
|
||||
set(gca, 'XTick', 0:200:1000);
|
||||
set(gca, 'YTick', 20:10:60);
|
||||
xlabel(cases(1).bitrateLabel);
|
||||
ylabel(cases(1).psnrLabel);
|
||||
res = cases(1).resolution;
|
||||
frRate = cases(1).frameRate;
|
||||
title([res ', ' frRate]);
|
||||
|
||||
hLines = [];
|
||||
codecs = {};
|
||||
sequences = {};
|
||||
i = 0;
|
||||
while ~isempty(cases)
|
||||
i = i + 1;
|
||||
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'psnr', i, sequences, 1);
|
||||
|
||||
% Stored to generate the legend
|
||||
hLines = [hLines ; hLine];
|
||||
codecs = {codecs{:} codec};
|
||||
end
|
||||
legend(hLines, codecs, 4);
|
||||
hold off;
|
||||
|
||||
if ~strcmp(export, 'none')
|
||||
% Export figure to an eps file
|
||||
res = stripws(res);
|
||||
frRate = stripws(frRate);
|
||||
exportName = [outpath '/psnr-' res '-' frRate];
|
||||
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
|
||||
end
|
||||
|
||||
if strcmp(export, 'pdf')
|
||||
% Use the epstopdf utility to convert to pdf
|
||||
system(['epstopdf ' exportName '.eps']);
|
||||
end
|
||||
|
||||
|
||||
function casesOut = plotOneSSIM(cases, num, export, outpath)
|
||||
% Find matching specs
|
||||
plotIdx = 1;
|
||||
for i = 2:length(cases)
|
||||
if strcmp(cases(1).resolution, cases(i).resolution) & ...
|
||||
strcmp(cases(1).frameRate, cases(i).frameRate)
|
||||
plotIdx = [plotIdx i];
|
||||
end
|
||||
end
|
||||
|
||||
% Return unplotted cases
|
||||
casesOut = cases(setdiff(1:length(cases), plotIdx));
|
||||
cases = cases(plotIdx);
|
||||
|
||||
% Prune similar results
|
||||
for i = 1:length(cases)
|
||||
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
|
||||
while ~isempty(simIndx)
|
||||
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
|
||||
cases(i).ssim = cases(i).ssim(diffIndx);
|
||||
cases(i).bitrate = cases(i).bitrate(diffIndx);
|
||||
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
|
||||
end
|
||||
end
|
||||
|
||||
% Prepare figure with axis labels and so on
|
||||
hFig = figure(num);
|
||||
clf;
|
||||
hold on;
|
||||
grid on;
|
||||
axis([0 1100 0.5 1]); % y-limit are set to 'auto' below
|
||||
set(gca, 'XTick', 0:200:1000);
|
||||
%set(gca, 'YTick', 20:10:60);
|
||||
xlabel(cases(1).bitrateLabel);
|
||||
ylabel(cases(1).ssimLabel);
|
||||
res = cases(1).resolution;
|
||||
frRate = cases(1).frameRate;
|
||||
title([res ', ' frRate]);
|
||||
|
||||
hLines = [];
|
||||
codecs = {};
|
||||
sequences = {};
|
||||
i = 0;
|
||||
while ~isempty(cases)
|
||||
i = i + 1;
|
||||
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'ssim', i, sequences, 1);
|
||||
|
||||
% Stored to generate the legend
|
||||
hLines = [hLines ; hLine];
|
||||
codecs = {codecs{:} codec};
|
||||
end
|
||||
%set(gca,'YLimMode','auto')
|
||||
set(gca,'YLim',[0.5 1])
|
||||
set(gca,'YScale','log')
|
||||
legend(hLines, codecs, 4);
|
||||
hold off;
|
||||
|
||||
if ~strcmp(export, 'none')
|
||||
% Export figure to an eps file
|
||||
res = stripws(res);
|
||||
frRate = stripws(frRate);
|
||||
exportName = [outpath '/psnr-' res '-' frRate];
|
||||
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
|
||||
end
|
||||
|
||||
if strcmp(export, 'pdf')
|
||||
% Use the epstopdf utility to convert to pdf
|
||||
system(['epstopdf ' exportName '.eps']);
|
||||
end
|
||||
|
||||
|
||||
function casesOut = plotOneSpeed(cases, num, export, outpath)
|
||||
% Find matching specs
|
||||
plotIdx = 1;
|
||||
for i = 2:length(cases)
|
||||
if strcmp(cases(1).resolution, cases(i).resolution) & ...
|
||||
strcmp(cases(1).frameRate, cases(i).frameRate) & ...
|
||||
strcmp(cases(1).name, cases(i).name)
|
||||
plotIdx = [plotIdx i];
|
||||
end
|
||||
end
|
||||
|
||||
% Return unplotted cases
|
||||
casesOut = cases(setdiff(1:length(cases), plotIdx));
|
||||
cases = cases(plotIdx);
|
||||
|
||||
% Prune similar results
|
||||
for i = 1:length(cases)
|
||||
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
|
||||
while ~isempty(simIndx)
|
||||
diffIndx = setdiff(1:length(cases(i).psnr), simIndx);
|
||||
cases(i).psnr = cases(i).psnr(diffIndx);
|
||||
cases(i).speed = cases(i).speed(diffIndx);
|
||||
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
|
||||
end
|
||||
end
|
||||
|
||||
hFig = figure(num);
|
||||
clf;
|
||||
hold on;
|
||||
%grid on;
|
||||
xlabel(cases(1).psnrLabel);
|
||||
ylabel(cases(1).speedLabel);
|
||||
res = cases(1).resolution;
|
||||
name = cases(1).name;
|
||||
frRate = cases(1).frameRate;
|
||||
title([name ', ' res ', ' frRate]);
|
||||
|
||||
hLines = [];
|
||||
codecs = {};
|
||||
sequences = {};
|
||||
i = 0;
|
||||
while ~isempty(cases)
|
||||
i = i + 1;
|
||||
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'psnr', 'speed', i, sequences, 0);
|
||||
|
||||
% Stored to generate the legend
|
||||
hLines = [hLines ; hLine];
|
||||
codecs = {codecs{:} codec};
|
||||
end
|
||||
legend(hLines, codecs, 1);
|
||||
hold off;
|
||||
|
||||
if ~strcmp(export, 'none')
|
||||
% Export figure to an eps file
|
||||
res = stripws(res);
|
||||
frRate = stripws(frRate);
|
||||
exportName = [outpath '/speed-' name '-' res '-' frRate];
|
||||
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
|
||||
end
|
||||
|
||||
if strcmp(export, 'pdf')
|
||||
% Use the epstopdf utility to convert to pdf
|
||||
system(['epstopdf ' exportName '.eps']);
|
||||
end
|
||||
|
||||
|
||||
function [casesOut, hLine, codec, sequences] = plotOneCodec(cases, xfield, yfield, num, sequences, annotatePlot)
|
||||
plotStr = {'gx-', 'bo-', 'r^-', 'kd-', 'cx-', 'go--', 'b^--'};
|
||||
% Find matching codecs
|
||||
plotIdx = 1;
|
||||
for i = 2:length(cases)
|
||||
if strcmp(cases(1).codec, cases(i).codec)
|
||||
plotIdx = [plotIdx i];
|
||||
end
|
||||
end
|
||||
|
||||
% Return unplotted cases
|
||||
casesOut = cases(setdiff(1:length(cases), plotIdx));
|
||||
cases = cases(plotIdx);
|
||||
|
||||
for i = 1:length(cases)
|
||||
% Plot a single case
|
||||
hLine = plot(getfield(cases(i), xfield), getfield(cases(i), yfield), plotStr{num}, ...
|
||||
'LineWidth', 1.1, 'MarkerSize', 6);
|
||||
end
|
||||
|
||||
% hLine handle and codec are returned to construct the legend afterwards
|
||||
codec = cases(1).codec;
|
||||
|
||||
if annotatePlot == 0
|
||||
return;
|
||||
end
|
||||
|
||||
for i = 1:length(cases)
|
||||
% Print the codec name as a text label
|
||||
% Ensure each codec is only printed once
|
||||
sequencePlotted = 0;
|
||||
for j = 1:length(sequences)
|
||||
if strcmp(cases(i).name, sequences{j})
|
||||
sequencePlotted = 1;
|
||||
break;
|
||||
end
|
||||
end
|
||||
|
||||
if sequencePlotted == 0
|
||||
text(getfield(cases(i), xfield, {1}), getfield(cases(i), yfield, {1}), ...
|
||||
[' ' cases(i).name]);
|
||||
sequences = {sequences{:} cases(i).name};
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
% Strip whitespace from string
|
||||
function str = stripws(str)
|
||||
if ~isstr(str)
|
||||
error('String required');
|
||||
end
|
||||
str = str(setdiff(1:length(str), find(isspace(str) == 1)));
|
||||
534
modules/video_coding/codecs/test_framework/test.cc
Normal file
534
modules/video_coding/codecs/test_framework/test.cc
Normal file
@ -0,0 +1,534 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "test.h"
|
||||
#include "video_source.h"
|
||||
#include "vplib.h"
|
||||
#include "event_wrapper.h"
|
||||
#include "thread_wrapper.h"
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <cmath>
|
||||
#include <ctime>
|
||||
#include <string.h>
|
||||
#include <cassert>
|
||||
#include <vector>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
long filesize(const char *filename); // local function defined at end of file
|
||||
|
||||
struct SSIMcontext
|
||||
{
|
||||
SSIMcontext() :
|
||||
refFileName(NULL), testFileName(NULL), width(0), height(0),
|
||||
SSIMptr(NULL), startFrame(-1), endFrame(-1), evnt(NULL) {};
|
||||
SSIMcontext(const char *ref, const char *test, int w, int h, double *Sptr,
|
||||
int start, int end, EventWrapper* ev) :
|
||||
refFileName(ref), testFileName(test), width(w), height(h),
|
||||
SSIMptr(Sptr), startFrame(start), endFrame(end), evnt(ev) {};
|
||||
const char *refFileName;
|
||||
const char *testFileName;
|
||||
int width;
|
||||
int height;
|
||||
double *SSIMptr;
|
||||
int startFrame;
|
||||
int endFrame;
|
||||
EventWrapper* evnt;
|
||||
};
|
||||
|
||||
Test::Test(std::string name, std::string description)
|
||||
:
|
||||
_name(name),
|
||||
_description(description),
|
||||
_bitRate(0),
|
||||
_inname(""),
|
||||
_outname(""),
|
||||
_encodedName("")
|
||||
{
|
||||
memset(&_inst, 0, sizeof(_inst));
|
||||
unsigned int seed = static_cast<unsigned int>(0);
|
||||
std::srand(seed);
|
||||
}
|
||||
|
||||
Test::Test(std::string name, std::string description, WebRtc_UWord32 bitRate)
|
||||
:
|
||||
_name(name),
|
||||
_description(description),
|
||||
_bitRate(bitRate),
|
||||
_inname(""),
|
||||
_outname(""),
|
||||
_encodedName("")
|
||||
{
|
||||
memset(&_inst, 0, sizeof(_inst));
|
||||
unsigned int seed = static_cast<unsigned int>(0);
|
||||
std::srand(seed);
|
||||
}
|
||||
|
||||
void
|
||||
Test::Print()
|
||||
{
|
||||
std::cout << _name << " completed!" << std::endl;
|
||||
(*_log) << _name << std::endl;
|
||||
(*_log) << _description << std::endl;
|
||||
(*_log) << "Input file: " << _inname << std::endl;
|
||||
(*_log) << "Output file: " << _outname << std::endl;
|
||||
double psnr = -1.0, ssim = -1.0;
|
||||
PSNRfromFiles(_inname.c_str(), _outname.c_str(), _inst.width, _inst.height, &psnr);
|
||||
ssim = SSIMfromFilesMT(4 /* number of threads*/);
|
||||
|
||||
(*_log) << "PSNR: " << psnr << std::endl;
|
||||
std::cout << "PSNR: " << psnr << std::endl << std::endl;
|
||||
(*_log) << "SSIM: " << ssim << std::endl;
|
||||
std::cout << "SSIM: " << ssim << std::endl << std::endl;
|
||||
(*_log) << std::endl;
|
||||
}
|
||||
|
||||
void
|
||||
Test::Setup()
|
||||
{
|
||||
int widhei = _inst.width*_inst.height;
|
||||
_lengthSourceFrame = 3*widhei/2;
|
||||
_sourceBuffer = new unsigned char[_lengthSourceFrame];
|
||||
}
|
||||
|
||||
void
|
||||
Test::CodecSettings(int width, int height, WebRtc_UWord32 frameRate /*=30*/, WebRtc_UWord32 bitRate /*=0*/)
|
||||
{
|
||||
if (bitRate > 0)
|
||||
{
|
||||
_bitRate = bitRate;
|
||||
}
|
||||
else if (_bitRate == 0)
|
||||
{
|
||||
_bitRate = 600;
|
||||
}
|
||||
_inst.maxFramerate = (unsigned char)frameRate;
|
||||
_inst.startBitrate = (int)_bitRate;
|
||||
_inst.maxBitrate = 8000;
|
||||
_inst.width = width;
|
||||
_inst.height = height;
|
||||
}
|
||||
|
||||
void
|
||||
Test::Teardown()
|
||||
{
|
||||
delete [] _sourceBuffer;
|
||||
}
|
||||
|
||||
void
|
||||
Test::SetEncoder(webrtc::VideoEncoder*encoder)
|
||||
{
|
||||
_encoder = encoder;
|
||||
}
|
||||
|
||||
void
|
||||
Test::SetDecoder(VideoDecoder*decoder)
|
||||
{
|
||||
_decoder = decoder;
|
||||
}
|
||||
|
||||
void
|
||||
Test::SetLog(std::fstream* log)
|
||||
{
|
||||
_log = log;
|
||||
}
|
||||
|
||||
int
|
||||
Test::PSNRfromFiles(const char *refFileName, const char *testFileName, int width, int height, double *YPSNRptr)
|
||||
{
|
||||
FILE *refFp = fopen(refFileName, "rb");
|
||||
if( refFp == NULL ) {
|
||||
// cannot open reference file
|
||||
fprintf(stderr, "Cannot open file %s\n", refFileName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
FILE *testFp = fopen(testFileName, "rb");
|
||||
if( testFp == NULL ) {
|
||||
// cannot open test file
|
||||
fprintf(stderr, "Cannot open file %s\n", testFileName);
|
||||
return -2;
|
||||
}
|
||||
|
||||
double mse = 0.0;
|
||||
double mseLogSum = 0.0;
|
||||
int frames = 0;
|
||||
|
||||
int frameBytes = 3*width*height/2; // bytes in one frame I420
|
||||
unsigned char *ref = new unsigned char[frameBytes]; // space for one frame I420
|
||||
unsigned char *test = new unsigned char[frameBytes]; // space for one frame I420
|
||||
|
||||
int refBytes = (int) fread(ref, 1, frameBytes, refFp);
|
||||
int testBytes = (int) fread(test, 1, frameBytes, testFp);
|
||||
|
||||
while( refBytes == frameBytes && testBytes == frameBytes )
|
||||
{
|
||||
mse = 0.0;
|
||||
|
||||
// calculate Y sum-square-difference
|
||||
for( int k = 0; k < width * height; k++ )
|
||||
{
|
||||
mse += (test[k] - ref[k]) * (test[k] - ref[k]);
|
||||
}
|
||||
|
||||
// divide by number of pixels
|
||||
mse /= (double) (width * height);
|
||||
|
||||
// accumulate for total average
|
||||
mseLogSum += std::log10( mse );
|
||||
frames++;
|
||||
|
||||
refBytes = (int) fread(ref, 1, frameBytes, refFp);
|
||||
testBytes = (int) fread(test, 1, frameBytes, testFp);
|
||||
}
|
||||
|
||||
// ypsnrAvg = sum( 10 log (255^2 / MSE) ) / frames
|
||||
// = 20 * log(255) - 10 * mseLogSum / frames
|
||||
*YPSNRptr = 20.0 * std::log10(255.0) - 10.0 * mseLogSum / frames;
|
||||
|
||||
delete [] ref;
|
||||
delete [] test;
|
||||
|
||||
fclose(refFp);
|
||||
fclose(testFp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
int
|
||||
Test::SSIMfromFiles(const char *refFileName, const char *testFileName, int width, int height, double *SSIMptr,
|
||||
int startFrame /*= -1*/, int endFrame /*= -1*/)
|
||||
{
|
||||
FILE *refFp = fopen(refFileName, "rb");
|
||||
if( refFp == NULL ) {
|
||||
// cannot open reference file
|
||||
fprintf(stderr, "Cannot open file %s\n", refFileName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
FILE *testFp = fopen(testFileName, "rb");
|
||||
if( testFp == NULL ) {
|
||||
// cannot open test file
|
||||
fprintf(stderr, "Cannot open file %s\n", testFileName);
|
||||
return -2;
|
||||
}
|
||||
|
||||
int frames = 0;
|
||||
|
||||
int frameBytes = 3*width*height/2; // bytes in one frame I420
|
||||
unsigned char *ref = new unsigned char[frameBytes]; // space for one frame I420
|
||||
unsigned char *test = new unsigned char[frameBytes]; // space for one frame I420
|
||||
|
||||
if (startFrame >= 0)
|
||||
{
|
||||
if (fseek(refFp, frameBytes * startFrame, SEEK_SET) != 0){
|
||||
fprintf(stderr, "Cannot go to frame %i in %s\n", startFrame, refFileName);
|
||||
return -1;
|
||||
}
|
||||
if (fseek(testFp, frameBytes * startFrame, SEEK_SET) != 0){
|
||||
fprintf(stderr, "Cannot go to frame %i in %s\n", startFrame, testFileName);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int refBytes = (int) fread(ref, 1, frameBytes, refFp);
|
||||
int testBytes = (int) fread(test, 1, frameBytes, testFp);
|
||||
|
||||
//
|
||||
// SSIM: variable definition, window function, initialization
|
||||
int window = 10;
|
||||
int flag_window = 0; //0 for uniform window filter, 1 for gaussian symmetric window
|
||||
float variance_window = 2.0; //variance for window function
|
||||
float ssimFilter[121]; //2d window filter: typically 11x11 = (window+1)*(window+1)
|
||||
//statistics per column of window (#columns = window+1), 0 element for avg over all columns
|
||||
float avgTest[12];
|
||||
float avgRef[12];
|
||||
float contrastTest[12];
|
||||
float contrastRef[12];
|
||||
float crossCorr[12];
|
||||
//
|
||||
//offsets for stability
|
||||
float offset1 = 0.1f;
|
||||
float offset2 = 0.1f;
|
||||
float offset3 = offset2/2;
|
||||
//
|
||||
//define window for SSIM: take uniform filter for now
|
||||
float sumfil = 0.0;
|
||||
int nn=-1;
|
||||
for(int j=-window/2;j<=window/2;j++)
|
||||
for(int i=-window/2;i<=window/2;i++)
|
||||
{
|
||||
nn+=1;
|
||||
if (flag_window == 0)
|
||||
ssimFilter[nn] = 1.0;
|
||||
else
|
||||
{
|
||||
float dist = (float)(i*i) + (float)(j*j);
|
||||
float tmp = 0.5f*dist/variance_window;
|
||||
ssimFilter[nn] = exp(-tmp);
|
||||
}
|
||||
sumfil +=ssimFilter[nn];
|
||||
}
|
||||
//normalize window
|
||||
nn=-1;
|
||||
for(int j=-window/2;j<=window/2;j++)
|
||||
for(int i=-window/2;i<=window/2;i++)
|
||||
{
|
||||
nn+=1;
|
||||
ssimFilter[nn] = ssimFilter[nn]/((float)sumfil);
|
||||
}
|
||||
//
|
||||
float ssimScene = 0.0; //avgerage SSIM for sequence
|
||||
//
|
||||
//SSIM: done with variables and defintion
|
||||
//
|
||||
|
||||
while( refBytes == frameBytes && testBytes == frameBytes &&
|
||||
!(endFrame >= 0 && frames > endFrame - startFrame))
|
||||
{
|
||||
float ssimFrame = 0.0;
|
||||
int sh = window/2+1;
|
||||
int numPixels = 0;
|
||||
for(int i=sh;i<height-sh;i++)
|
||||
for(int j=sh;j<width-sh;j++)
|
||||
{
|
||||
avgTest[0] = 0.0;
|
||||
avgRef[0] = 0.0;
|
||||
contrastTest[0] = 0.0;
|
||||
contrastRef[0] = 0.0;
|
||||
crossCorr[0] = 0.0;
|
||||
|
||||
numPixels +=1;
|
||||
|
||||
//for uniform window, only need to loop over whole window for first column pixel in image, and then shift
|
||||
if (j == sh || flag_window == 1)
|
||||
{
|
||||
//initialize statistics
|
||||
for(int k=1;k<window+2;k++)
|
||||
{
|
||||
avgTest[k] = 0.0;
|
||||
avgRef[k] = 0.0;
|
||||
contrastTest[k] = 0.0;
|
||||
contrastRef[k] = 0.0;
|
||||
crossCorr[k] = 0.0;
|
||||
}
|
||||
int nn=-1;
|
||||
//compute contrast and correlation
|
||||
for(int jj=-window/2;jj<=window/2;jj++)
|
||||
for(int ii=-window/2;ii<=window/2;ii++)
|
||||
{
|
||||
nn+=1;
|
||||
int i2 = i+ii;
|
||||
int j2 = j+jj;
|
||||
float tmp1 = (float)test[i2*width+j2];
|
||||
float tmp2 = (float)ref[i2*width+j2];
|
||||
//local average of each signal
|
||||
avgTest[jj+window/2+1] += ssimFilter[nn]*tmp1;
|
||||
avgRef[jj+window/2+1] += ssimFilter[nn]*tmp2;
|
||||
//local correlation/contrast of each signal
|
||||
contrastTest[jj+window/2+1] += ssimFilter[nn]*tmp1*tmp1;
|
||||
contrastRef[jj+window/2+1] += ssimFilter[nn]*tmp2*tmp2;
|
||||
//local cross correlation
|
||||
crossCorr[jj+window/2+1] += ssimFilter[nn]*tmp1*tmp2;
|
||||
}
|
||||
}
|
||||
//for uniform window case, can shift window horiz, then compute statistics for last column in window
|
||||
else
|
||||
{
|
||||
//shift statistics horiz.
|
||||
for(int k=1;k<window+1;k++)
|
||||
{
|
||||
avgTest[k]=avgTest[k+1];
|
||||
avgRef[k]=avgRef[k+1];
|
||||
contrastTest[k] = contrastTest[k+1];
|
||||
contrastRef[k] = contrastRef[k+1];
|
||||
crossCorr[k] = crossCorr[k+1];
|
||||
}
|
||||
//compute statistics for last column
|
||||
avgTest[window+1] = 0.0;
|
||||
avgRef[window+1] = 0.0;
|
||||
contrastTest[window+1] = 0.0;
|
||||
contrastRef[window+1] = 0.0;
|
||||
crossCorr[window+1] = 0.0;
|
||||
int nn = (window+1)*window - 1;
|
||||
int jj = window/2;
|
||||
int j2 = j + jj;
|
||||
for(int ii=-window/2;ii<=window/2;ii++)
|
||||
{
|
||||
nn+=1;
|
||||
int i2 = i+ii;
|
||||
float tmp1 = (float)test[i2*width+j2];
|
||||
float tmp2 = (float)ref[i2*width+j2];
|
||||
//local average of each signal
|
||||
avgTest[jj+window/2+1] += ssimFilter[nn]*tmp1;
|
||||
avgRef[jj+window/2+1] += ssimFilter[nn]*tmp2;
|
||||
//local correlation/contrast of each signal
|
||||
contrastTest[jj+window/2+1] += ssimFilter[nn]*tmp1*tmp1;
|
||||
contrastRef[jj+window/2+1] += ssimFilter[nn]*tmp2*tmp2;
|
||||
//local cross correlation
|
||||
crossCorr[jj+window/2+1] += ssimFilter[nn]*tmp1*tmp2;
|
||||
}
|
||||
}
|
||||
|
||||
//sum over all columns
|
||||
for(int k=1;k<window+2;k++)
|
||||
{
|
||||
avgTest[0] += avgTest[k];
|
||||
avgRef[0] += avgRef[k];
|
||||
contrastTest[0] += contrastTest[k];
|
||||
contrastRef[0] += contrastRef[k];
|
||||
crossCorr[0] += crossCorr[k];
|
||||
}
|
||||
|
||||
float tmp1 = (contrastTest[0] - avgTest[0]*avgTest[0]);
|
||||
if (tmp1 < 0.0) tmp1 = 0.0;
|
||||
contrastTest[0] = sqrt(tmp1);
|
||||
float tmp2 = (contrastRef[0] - avgRef[0]*avgRef[0]);
|
||||
if (tmp2 < 0.0) tmp2 = 0.0;
|
||||
contrastRef[0] = sqrt(tmp2);
|
||||
crossCorr[0] = crossCorr[0] - avgTest[0]*avgRef[0];
|
||||
|
||||
float ssimCorrCoeff = (crossCorr[0]+offset3)/(contrastTest[0]*contrastRef[0] + offset3);
|
||||
float ssimLuminance = (2*avgTest[0]*avgRef[0]+offset1)/(avgTest[0]*avgTest[0] + avgRef[0]*avgRef[0] + offset1);
|
||||
float ssimContrast = (2*contrastTest[0]*contrastRef[0]+offset2)/(contrastTest[0]*contrastTest[0] + contrastRef[0]*contrastRef[0] + offset2);
|
||||
|
||||
float ssimPixel = ssimCorrCoeff * ssimLuminance * ssimContrast;
|
||||
ssimFrame += ssimPixel;
|
||||
}
|
||||
ssimFrame = ssimFrame / (numPixels);
|
||||
//printf("***SSIM for frame ***%f \n",ssimFrame);
|
||||
ssimScene += ssimFrame;
|
||||
//
|
||||
//SSIM: done with SSIM computation
|
||||
//
|
||||
|
||||
frames++;
|
||||
|
||||
refBytes = (int) fread(ref, 1, frameBytes, refFp);
|
||||
testBytes = (int) fread(test, 1, frameBytes, testFp);
|
||||
|
||||
}
|
||||
|
||||
//SSIM: normalize/average for sequence
|
||||
ssimScene = ssimScene / frames;
|
||||
*SSIMptr = ssimScene;
|
||||
|
||||
delete [] ref;
|
||||
delete [] test;
|
||||
|
||||
fclose(refFp);
|
||||
fclose(testFp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
Test::SSIMthread(void *vctx)
|
||||
{
|
||||
SSIMcontext *ctx = (SSIMcontext *) vctx;
|
||||
SSIMfromFiles(ctx->refFileName, ctx->testFileName, ctx->width, ctx->height, ctx->SSIMptr, ctx->startFrame, ctx->endFrame);
|
||||
ctx->evnt->Set();
|
||||
return false;
|
||||
}
|
||||
|
||||
double Test::SSIMfromFilesMT(const int numThreads)
|
||||
{
|
||||
int numFrames = filesize(_inname.c_str()) / _lengthSourceFrame;
|
||||
std::vector<int> nFramesVec(numThreads);
|
||||
std::vector<double> ssimVec(numThreads);
|
||||
int framesPerCore = (numFrames + numThreads - 1) / numThreads; // rounding up
|
||||
int i = 0;
|
||||
int nFrames;
|
||||
for (nFrames = numFrames; nFrames >= framesPerCore; nFrames -= framesPerCore)
|
||||
{
|
||||
nFramesVec[i++] = framesPerCore;
|
||||
}
|
||||
if (nFrames > 0)
|
||||
{
|
||||
assert(i == numThreads - 1);
|
||||
nFramesVec[i] = nFrames; // remainder
|
||||
}
|
||||
|
||||
int frameIx = 0;
|
||||
std::vector<EventWrapper*> eventVec(numThreads);
|
||||
std::vector<ThreadWrapper*> threadVec(numThreads);
|
||||
std::vector<SSIMcontext> ctxVec(numThreads);
|
||||
for (i = 0; i < numThreads; i++)
|
||||
{
|
||||
eventVec[i] = EventWrapper::Create();
|
||||
ctxVec[i] = SSIMcontext(_inname.c_str(), _outname.c_str(), _inst.width, _inst.height, &ssimVec[i], frameIx, frameIx + nFramesVec[i] - 1, eventVec[i]);
|
||||
threadVec[i] = ThreadWrapper::CreateThread(SSIMthread, &(ctxVec[i]), kLowPriority);
|
||||
unsigned int id;
|
||||
threadVec[i]->Start(id);
|
||||
frameIx += nFramesVec[i];
|
||||
}
|
||||
|
||||
// wait for all events
|
||||
for (i = 0; i < numThreads; i++) {
|
||||
eventVec[i]->Wait(100000 /* ms*/);
|
||||
threadVec[i]->Stop();
|
||||
delete threadVec[i];
|
||||
delete eventVec[i];
|
||||
}
|
||||
|
||||
double avgSsim = 0;
|
||||
for (i = 0; i < numThreads; i++)
|
||||
{
|
||||
avgSsim += (ssimVec[i] * nFramesVec[i]);
|
||||
}
|
||||
|
||||
avgSsim /= numFrames;
|
||||
return avgSsim;
|
||||
}
|
||||
|
||||
|
||||
double Test::ActualBitRate(int nFrames)
|
||||
{
|
||||
return 8.0 * _sumEncBytes / (nFrames / _inst.maxFramerate);
|
||||
}
|
||||
|
||||
bool Test::PacketLoss(double lossRate)
|
||||
{
|
||||
return RandUniform() < lossRate;
|
||||
}
|
||||
|
||||
void
|
||||
Test::VideoBufferToRawImage(TestVideoBuffer& videoBuffer, RawImage &image)
|
||||
{
|
||||
image._buffer = videoBuffer.GetBuffer();
|
||||
image._size = videoBuffer.GetSize();
|
||||
image._length = videoBuffer.GetLength();
|
||||
image._width = videoBuffer.GetWidth();
|
||||
image._height = videoBuffer.GetHeight();
|
||||
image._timeStamp = videoBuffer.GetTimeStamp();
|
||||
}
|
||||
void
|
||||
Test::VideoEncodedBufferToEncodedImage(TestVideoEncodedBuffer& videoBuffer, EncodedImage &image)
|
||||
{
|
||||
image._buffer = videoBuffer.GetBuffer();
|
||||
image._length = videoBuffer.GetLength();
|
||||
image._size = videoBuffer.GetSize();
|
||||
image._frameType = static_cast<VideoFrameType>(videoBuffer.GetFrameType());
|
||||
image._timeStamp = videoBuffer.GetTimeStamp();
|
||||
image._encodedWidth = videoBuffer.GetCaptureWidth();
|
||||
image._encodedHeight = videoBuffer.GetCaptureHeight();
|
||||
image._completeFrame = true;
|
||||
}
|
||||
|
||||
long filesize(const char *filename)
|
||||
{
|
||||
FILE *f = fopen(filename,"rb"); /* open the file in read only */
|
||||
|
||||
long size = 0;
|
||||
if (fseek(f,0,SEEK_END)==0) /* seek was successful */
|
||||
size = ftell(f);
|
||||
fclose(f);
|
||||
return size;
|
||||
}
|
||||
82
modules/video_coding/codecs/test_framework/test.h
Normal file
82
modules/video_coding/codecs/test_framework/test.h
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_
|
||||
|
||||
#include "video_codec_interface.h"
|
||||
#include "video_buffer.h"
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <cstdlib>
|
||||
|
||||
class Test
|
||||
{
|
||||
public:
|
||||
Test(std::string name, std::string description);
|
||||
Test(std::string name, std::string description, WebRtc_UWord32 bitRate);
|
||||
virtual ~Test() {};
|
||||
virtual void Perform()=0;
|
||||
virtual void Print();
|
||||
void SetEncoder(webrtc::VideoEncoder *encoder);
|
||||
void SetDecoder(webrtc::VideoDecoder *decoder);
|
||||
void SetLog(std::fstream* log);
|
||||
|
||||
protected:
|
||||
virtual void Setup();
|
||||
virtual void CodecSettings(int width,
|
||||
int height,
|
||||
WebRtc_UWord32 frameRate=30,
|
||||
WebRtc_UWord32 bitRate=0);
|
||||
virtual void Teardown();
|
||||
static int PSNRfromFiles(const char *refFileName,
|
||||
const char *testFileName,
|
||||
int width,
|
||||
int height,
|
||||
double *YPSNRptr);
|
||||
static int SSIMfromFiles(const char *refFileName,
|
||||
const char *testFileName,
|
||||
int width,
|
||||
int height,
|
||||
double *SSIMptr,
|
||||
int startByte = -1, int endByte = -1);
|
||||
double SSIMfromFilesMT(int numThreads);
|
||||
static bool SSIMthread(void *ctx);
|
||||
|
||||
double ActualBitRate(int nFrames);
|
||||
static bool PacketLoss(double lossRate);
|
||||
static double RandUniform() { return (std::rand() + 1.0)/(RAND_MAX + 1.0); }
|
||||
static void VideoBufferToRawImage(TestVideoBuffer& videoBuffer,
|
||||
webrtc::RawImage &image);
|
||||
static void VideoEncodedBufferToEncodedImage(TestVideoEncodedBuffer& videoBuffer,
|
||||
webrtc::EncodedImage &image);
|
||||
|
||||
webrtc::VideoEncoder* _encoder;
|
||||
webrtc::VideoDecoder* _decoder;
|
||||
WebRtc_UWord32 _bitRate;
|
||||
unsigned int _lengthSourceFrame;
|
||||
unsigned char* _sourceBuffer;
|
||||
TestVideoBuffer _inputVideoBuffer;
|
||||
TestVideoEncodedBuffer _encodedVideoBuffer;
|
||||
TestVideoBuffer _decodedVideoBuffer;
|
||||
webrtc::VideoCodec _inst;
|
||||
std::fstream* _log;
|
||||
std::string _inname;
|
||||
std::string _outname;
|
||||
std::string _encodedName;
|
||||
int _sumEncBytes;
|
||||
|
||||
private:
|
||||
std::string _name;
|
||||
std::string _description;
|
||||
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_
|
||||
@ -0,0 +1,61 @@
|
||||
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../../../../common_settings.gypi', # Common settings
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'test_framework',
|
||||
'type': '<(library)',
|
||||
|
||||
'dependencies': [
|
||||
'../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'../../../../common_video/vplib/main/source/vplib.gyp:webrtc_vplib',
|
||||
],
|
||||
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
],
|
||||
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
],
|
||||
},
|
||||
|
||||
'sources': [
|
||||
# header files
|
||||
'benchmark.h',
|
||||
'normal_async_test.h',
|
||||
'normal_test.h',
|
||||
'packet_loss_test.h',
|
||||
'performance_test.h',
|
||||
'test.h',
|
||||
'unit_test.h',
|
||||
'video_buffer.h',
|
||||
'video_source.h',
|
||||
|
||||
# source files
|
||||
'benchmark.cc',
|
||||
'normal_async_test.cc',
|
||||
'normal_test.cc',
|
||||
'packet_loss_test.cc',
|
||||
'performance_test.cc',
|
||||
'test.cc',
|
||||
'unit_test.cc',
|
||||
'video_buffer.cc',
|
||||
'video_source.cc',
|
||||
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
||||
815
modules/video_coding/codecs/test_framework/unit_test.cc
Normal file
815
modules/video_coding/codecs/test_framework/unit_test.cc
Normal file
@ -0,0 +1,815 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "unit_test.h"
|
||||
#include "video_source.h"
|
||||
#include "tick_util.h"
|
||||
#include <cassert>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
UnitTest::UnitTest()
|
||||
:
|
||||
Test("UnitTest", "Unit test"),
|
||||
_tests(0),
|
||||
_errors(0),
|
||||
_source(NULL),
|
||||
_refFrame(NULL),
|
||||
_refEncFrame(NULL),
|
||||
_refDecFrame(NULL),
|
||||
_refEncFrameLength(0),
|
||||
_sourceFile(NULL),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
UnitTest::UnitTest(std::string name, std::string description)
|
||||
:
|
||||
_tests(0),
|
||||
_errors(0),
|
||||
_source(NULL),
|
||||
_refFrame(NULL),
|
||||
_refEncFrame(NULL),
|
||||
_refDecFrame(NULL),
|
||||
_refEncFrameLength(0),
|
||||
_sourceFile(NULL),
|
||||
_encodeCompleteCallback(NULL),
|
||||
_decodeCompleteCallback(NULL),
|
||||
Test(name, description)
|
||||
{
|
||||
}
|
||||
|
||||
UnitTest::~UnitTest()
|
||||
{
|
||||
if (_encodeCompleteCallback) {
|
||||
delete _encodeCompleteCallback;
|
||||
}
|
||||
|
||||
if (_decodeCompleteCallback) {
|
||||
delete _decodeCompleteCallback;
|
||||
}
|
||||
|
||||
if (_source) {
|
||||
delete _source;
|
||||
}
|
||||
|
||||
if (_refFrame) {
|
||||
delete [] _refFrame;
|
||||
}
|
||||
|
||||
if (_refDecFrame) {
|
||||
delete [] _refDecFrame;
|
||||
}
|
||||
|
||||
if (_sourceBuffer) {
|
||||
delete [] _sourceBuffer;
|
||||
}
|
||||
|
||||
if (_sourceFile) {
|
||||
fclose(_sourceFile);
|
||||
}
|
||||
|
||||
if (_refEncFrame) {
|
||||
delete [] _refEncFrame;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
UnitTestEncodeCompleteCallback::Encoded(EncodedImage& encodedImage,
|
||||
const void* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader*
|
||||
fragmentation)
|
||||
{
|
||||
_encodedVideoBuffer->VerifyAndAllocate(encodedImage._size);
|
||||
_encodedVideoBuffer->CopyBuffer(encodedImage._size, encodedImage._buffer);
|
||||
_encodedVideoBuffer->UpdateLength(encodedImage._length);
|
||||
_encodedVideoBuffer->SetFrameType(encodedImage._frameType);
|
||||
_encodedVideoBuffer->SetCaptureWidth(
|
||||
(WebRtc_UWord16)encodedImage._encodedWidth);
|
||||
_encodedVideoBuffer->SetCaptureHeight(
|
||||
(WebRtc_UWord16)encodedImage._encodedHeight);
|
||||
_encodedVideoBuffer->SetTimeStamp(encodedImage._timeStamp);
|
||||
_encodeComplete = true;
|
||||
_encodedFrameType = encodedImage._frameType;
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 UnitTestDecodeCompleteCallback::Decoded(RawImage& image)
|
||||
{
|
||||
_decodedVideoBuffer->VerifyAndAllocate(image._length);
|
||||
_decodedVideoBuffer->CopyBuffer(image._length, image._buffer);
|
||||
_decodedVideoBuffer->SetWidth(image._width);
|
||||
_decodedVideoBuffer->SetHeight(image._height);
|
||||
_decodedVideoBuffer->SetTimeStamp(image._timeStamp);
|
||||
_decodeComplete = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
UnitTestEncodeCompleteCallback::EncodeComplete()
|
||||
{
|
||||
if (_encodeComplete)
|
||||
{
|
||||
_encodeComplete = false;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
VideoFrameType
|
||||
UnitTestEncodeCompleteCallback::EncodedFrameType() const
|
||||
{
|
||||
return _encodedFrameType;
|
||||
}
|
||||
|
||||
bool
|
||||
UnitTestDecodeCompleteCallback::DecodeComplete()
|
||||
{
|
||||
if (_decodeComplete)
|
||||
{
|
||||
_decodeComplete = false;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
UnitTest::WaitForEncodedFrame() const
|
||||
{
|
||||
WebRtc_Word64 startTime = TickTime::MillisecondTimestamp();
|
||||
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitEncTimeMs)
|
||||
{
|
||||
if (_encodeCompleteCallback->EncodeComplete())
|
||||
{
|
||||
return _encodedVideoBuffer.GetLength();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
UnitTest::WaitForDecodedFrame() const
|
||||
{
|
||||
WebRtc_Word64 startTime = TickTime::MillisecondTimestamp();
|
||||
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitDecTimeMs)
|
||||
{
|
||||
if (_decodeCompleteCallback->DecodeComplete())
|
||||
{
|
||||
return _decodedVideoBuffer.GetLength();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
UnitTest::CodecSpecific_SetBitrate(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord32 /* frameRate */)
|
||||
{
|
||||
return _encoder->SetRates(bitRate, _inst.maxFramerate);
|
||||
}
|
||||
|
||||
void
|
||||
UnitTest::Setup()
|
||||
{
|
||||
// Use _sourceFile as a check to prevent multiple Setup() calls.
|
||||
if (_sourceFile != NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (_encodeCompleteCallback == NULL)
|
||||
{
|
||||
_encodeCompleteCallback =
|
||||
new UnitTestEncodeCompleteCallback(&_encodedVideoBuffer);
|
||||
}
|
||||
if (_decodeCompleteCallback == NULL)
|
||||
{
|
||||
_decodeCompleteCallback =
|
||||
new UnitTestDecodeCompleteCallback(&_decodedVideoBuffer);
|
||||
}
|
||||
|
||||
_encoder->RegisterEncodeCompleteCallback(_encodeCompleteCallback);
|
||||
_decoder->RegisterDecodeCompleteCallback(_decodeCompleteCallback);
|
||||
|
||||
_source = new VideoSource("test/testFiles/foreman_cif.yuv", kCIF);
|
||||
|
||||
_lengthSourceFrame = _source->GetFrameLength();
|
||||
_refFrame = new unsigned char[_lengthSourceFrame];
|
||||
_refDecFrame = new unsigned char[_lengthSourceFrame];
|
||||
_sourceBuffer = new unsigned char [_lengthSourceFrame];
|
||||
_sourceFile = fopen(_source->GetFileName().c_str(), "rb");
|
||||
VIDEO_TEST_EXIT_ON_ERR(_sourceFile != NULL);
|
||||
|
||||
_inst.maxFramerate = _source->GetFrameRate();
|
||||
_bitRate = 300;
|
||||
_inst.startBitrate = 300;
|
||||
_inst.maxBitrate = 4000;
|
||||
_inst.width = _source->GetWidth();
|
||||
_inst.height = _source->GetHeight();
|
||||
|
||||
// Get input frame.
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
VIDEO_TEST_EXIT_ON_ERR(fread(_refFrame, 1, _lengthSourceFrame, _sourceFile)
|
||||
== _lengthSourceFrame);
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _refFrame);
|
||||
rewind(_sourceFile);
|
||||
|
||||
// Get a reference encoded frame.
|
||||
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
|
||||
RawImage image;
|
||||
VideoBufferToRawImage(_inputVideoBuffer, image);
|
||||
|
||||
// Ensures our initial parameters are valid.
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
_encoder->Encode(image, NULL);
|
||||
_refEncFrameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST_EXIT_ON_ERR(_refEncFrameLength > 0);
|
||||
_refEncFrame = new unsigned char[_refEncFrameLength];
|
||||
memcpy(_refEncFrame, _encodedVideoBuffer.GetBuffer(), _refEncFrameLength);
|
||||
|
||||
// Get a reference decoded frame.
|
||||
_decodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int frameLength = 0;
|
||||
int i=0;
|
||||
while (frameLength == 0)
|
||||
{
|
||||
if (i > 0)
|
||||
{
|
||||
// Insert yet another frame
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
VIDEO_TEST_EXIT_ON_ERR(fread(_refFrame, 1, _lengthSourceFrame,
|
||||
_sourceFile) == _lengthSourceFrame);
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _refFrame);
|
||||
_inputVideoBuffer.SetWidth(_source->GetWidth());
|
||||
_inputVideoBuffer.SetHeight(_source->GetHeight());
|
||||
VideoBufferToRawImage(_inputVideoBuffer, image);
|
||||
_encoder->Encode(image, NULL);
|
||||
VIDEO_TEST_EXIT_ON_ERR(WaitForEncodedFrame() > 0);
|
||||
}
|
||||
EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
VIDEO_TEST_EXIT_ON_ERR(_decoder->Decode(encodedImage, 0, NULL)
|
||||
== WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
_encodedVideoBuffer.Reset();
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
i++;
|
||||
}
|
||||
rewind(_sourceFile);
|
||||
VIDEO_TEST(frameLength == _lengthSourceFrame);
|
||||
memcpy(_refDecFrame, _decodedVideoBuffer.GetBuffer(), _lengthSourceFrame);
|
||||
}
|
||||
|
||||
void
|
||||
UnitTest::Teardown()
|
||||
{
|
||||
// Use _sourceFile as a check to prevent multiple Teardown() calls.
|
||||
if (_sourceFile == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_encoder->Release();
|
||||
_decoder->Release();
|
||||
|
||||
fclose(_sourceFile);
|
||||
_sourceFile = NULL;
|
||||
delete [] _refFrame;
|
||||
_refFrame = NULL;
|
||||
delete [] _refEncFrame;
|
||||
_refEncFrame = NULL;
|
||||
delete [] _refDecFrame;
|
||||
_refDecFrame = NULL;
|
||||
delete [] _sourceBuffer;
|
||||
_sourceBuffer = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
UnitTest::Print()
|
||||
{
|
||||
printf("Unit Test\n\n%i tests completed\n", _tests);
|
||||
if (_errors > 0)
|
||||
{
|
||||
printf("%i FAILED\n\n", _errors);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("ALL PASSED\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
UnitTest::DecodeWithoutAssert()
|
||||
{
|
||||
EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
int ret = _decoder->Decode(encodedImage, 0, NULL);
|
||||
int frameLength = WaitForDecodedFrame();
|
||||
_encodedVideoBuffer.Reset();
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
return ret == WEBRTC_VIDEO_CODEC_OK ? frameLength : ret;
|
||||
}
|
||||
|
||||
int
|
||||
UnitTest::Decode()
|
||||
{
|
||||
EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
if (encodedImage._length == 0)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
int ret = _decoder->Decode(encodedImage, 0, NULL);
|
||||
int frameLength = WaitForDecodedFrame();
|
||||
assert(ret == WEBRTC_VIDEO_CODEC_OK && (frameLength == 0 || frameLength
|
||||
== _lengthSourceFrame));
|
||||
VIDEO_TEST(ret == WEBRTC_VIDEO_CODEC_OK && (frameLength == 0 || frameLength
|
||||
== _lengthSourceFrame));
|
||||
_encodedVideoBuffer.Reset();
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
return ret == WEBRTC_VIDEO_CODEC_OK ? frameLength : ret;
|
||||
}
|
||||
|
||||
// Test pure virtual VideoEncoder and VideoDecoder APIs.
|
||||
void
|
||||
UnitTest::Perform()
|
||||
{
|
||||
UnitTest::Setup();
|
||||
int frameLength;
|
||||
RawImage inputImage;
|
||||
EncodedImage encodedImage;
|
||||
EventWrapper& sleepEvent = *EventWrapper::Create();
|
||||
|
||||
//----- Encoder parameter tests -----
|
||||
|
||||
//-- Calls before InitEncode() --
|
||||
// We want to revert the initialization done in Setup().
|
||||
VIDEO_TEST(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VideoBufferToRawImage(_inputVideoBuffer, inputImage);
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL)
|
||||
== WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
VIDEO_TEST(_encoder->Reset() == WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
|
||||
//-- InitEncode() errors --
|
||||
// Null pointer.
|
||||
VIDEO_TEST(_encoder->InitEncode(NULL, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
// bit rate exceeds max bit rate
|
||||
WebRtc_Word32 tmpBitRate = _inst.startBitrate;
|
||||
WebRtc_Word32 tmpMaxBitRate = _inst.maxBitrate;
|
||||
_inst.startBitrate = 4000;
|
||||
_inst.maxBitrate = 3000;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_inst.startBitrate = tmpBitRate;
|
||||
_inst.maxBitrate = tmpMaxBitRate; //unspecified value
|
||||
|
||||
// Bad framerate.
|
||||
_inst.maxFramerate = 0;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
// Seems like we should allow any framerate in range [0, 255].
|
||||
//_inst.frameRate = 100;
|
||||
//VIDEO_TEST(_encoder->InitEncode(&_inst, 1) == -1); // FAILS
|
||||
_inst.maxFramerate = 30;
|
||||
|
||||
// Bad bitrate.
|
||||
_inst.startBitrate = -1;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_inst.maxBitrate = _inst.startBitrate - 1;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_inst.maxBitrate = 0;
|
||||
_inst.startBitrate = 300;
|
||||
|
||||
// Bad maxBitRate.
|
||||
_inst.maxBitrate = 200;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_inst.maxBitrate = 4000;
|
||||
|
||||
// Bad width.
|
||||
_inst.width = 0;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) < 0);
|
||||
// Should there be a width and height cap?
|
||||
//_inst.width = 10000;
|
||||
//VIDEO_TEST(_encoder->InitEncode(&_inst, 1) == -1);
|
||||
_inst.width = _source->GetWidth();
|
||||
|
||||
// Bad height.
|
||||
_inst.height = 0;
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) < 0);
|
||||
_inst.height = _source->GetHeight();
|
||||
|
||||
// Bad number of cores.
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, -1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
//-- Encode() errors --
|
||||
|
||||
// inputVideoBuffer unallocated.
|
||||
_inputVideoBuffer.Free();
|
||||
VideoBufferToRawImage(_inputVideoBuffer, inputImage);
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _refFrame);
|
||||
|
||||
//----- Encoder stress tests -----
|
||||
|
||||
// Vary frame rate and I-frame request.
|
||||
VideoBufferToRawImage(_inputVideoBuffer, inputImage);
|
||||
for (int i = 1; i <= 60; i++)
|
||||
{
|
||||
VideoFrameType frameType = !(i % 2) ? kKeyFrame : kDeltaFrame;
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL, frameType) ==
|
||||
WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(WaitForEncodedFrame() > 0);
|
||||
sleepEvent.Wait(10); // Allow the encoder's queue to realize it's empty.
|
||||
}
|
||||
|
||||
// Init then encode.
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
_encodedVideoBuffer.Reset();
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL) == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(WaitForEncodedFrame() > 0);
|
||||
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
_encoder->Encode(inputImage, NULL);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
VIDEO_TEST(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
|
||||
_encodedVideoBuffer.GetBuffer(), frameLength) == true);
|
||||
|
||||
// Reset then encode.
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
_encodedVideoBuffer.Reset();
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL) == WEBRTC_VIDEO_CODEC_OK);
|
||||
WaitForEncodedFrame();
|
||||
VIDEO_TEST(_encoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
_encoder->Encode(inputImage, NULL);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
VIDEO_TEST(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
|
||||
_encodedVideoBuffer.GetBuffer(), frameLength) == true);
|
||||
|
||||
// Release then encode.
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
_encodedVideoBuffer.Reset();
|
||||
VIDEO_TEST(_encoder->Encode(inputImage, NULL) == WEBRTC_VIDEO_CODEC_OK);
|
||||
WaitForEncodedFrame();
|
||||
VIDEO_TEST(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
_encoder->Encode(inputImage, NULL);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
VIDEO_TEST(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
|
||||
_encodedVideoBuffer.GetBuffer(), frameLength) == true);
|
||||
|
||||
//----- Decoder parameter tests -----
|
||||
|
||||
//-- Calls before InitDecode() --
|
||||
// We want to revert the initialization done in Setup().
|
||||
VIDEO_TEST(_decoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
VIDEO_TEST(_decoder->Decode(encodedImage, false, NULL) ==
|
||||
WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
WaitForDecodedFrame();
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
//-- Decode() errors --
|
||||
// Unallocated encodedVideoBuffer.
|
||||
_encodedVideoBuffer.Free();
|
||||
//_encodedVideoBuffer.UpdateLength(10); // Buffer NULL but length > 0
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
VIDEO_TEST(_decoder->Decode(encodedImage, false, NULL) ==
|
||||
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
|
||||
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
|
||||
//----- Decoder stress tests -----
|
||||
unsigned char* tmpBuf = new unsigned char[_lengthSourceFrame];
|
||||
|
||||
// "Random" and zero data.
|
||||
// We either expect an error, or at the least, no output.
|
||||
// This relies on the codec's ability to detect an erroneous bitstream.
|
||||
/*
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
VIDEO_TEST_EXIT_ON_ERR(fread(tmpBuf, 1, _refEncFrameLength, _sourceFile)
|
||||
== _refEncFrameLength);
|
||||
_encodedVideoBuffer.CopyBuffer(_refEncFrameLength, tmpBuf);
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
FillDecoderSpecificInfo(encodedImage);
|
||||
int ret = _decoder->Decode(encodedImage, false, _decoderSpecificInfo);
|
||||
VIDEO_TEST(ret <= 0);
|
||||
if (ret == 0)
|
||||
{
|
||||
VIDEO_TEST(WaitForDecodedFrame() == 0);
|
||||
}
|
||||
|
||||
memset(tmpBuf, 0, _refEncFrameLength);
|
||||
_encodedVideoBuffer.CopyBuffer(_refEncFrameLength, tmpBuf);
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
FillDecoderSpecificInfo(encodedImage);
|
||||
ret = _decoder->Decode(encodedImage, false, _decoderSpecificInfo);
|
||||
VIDEO_TEST(ret <= 0);
|
||||
if (ret == 0)
|
||||
{
|
||||
VIDEO_TEST(WaitForDecodedFrame() == 0);
|
||||
}
|
||||
}
|
||||
*/
|
||||
rewind(_sourceFile);
|
||||
|
||||
_encodedVideoBuffer.UpdateLength(_refEncFrameLength);
|
||||
_encodedVideoBuffer.CopyBuffer(_refEncFrameLength, _refEncFrame);
|
||||
|
||||
// Init then decode.
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
}
|
||||
VIDEO_TEST(CheckIfBitExact(_decodedVideoBuffer.GetBuffer(), frameLength,
|
||||
_refDecFrame, _lengthSourceFrame) == true);
|
||||
|
||||
// Reset then decode.
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
}
|
||||
VIDEO_TEST(CheckIfBitExact(_decodedVideoBuffer.GetBuffer(), frameLength,
|
||||
_refDecFrame, _lengthSourceFrame) == true);
|
||||
|
||||
// Decode with other size, reset, then decode with original size again
|
||||
// to verify that decoder is reset to a "fresh" state upon Reset().
|
||||
{
|
||||
// assert that input frame size is a factor of two, so that we can use
|
||||
// quarter size below
|
||||
VIDEO_TEST((_inst.width % 2 == 0) && (_inst.height % 2 == 0));
|
||||
|
||||
VideoCodec tempInst;
|
||||
memcpy(&tempInst, &_inst, sizeof(VideoCodec));
|
||||
tempInst.width /= 2;
|
||||
tempInst.height /= 2;
|
||||
|
||||
// Encode reduced (quarter) frame size
|
||||
VIDEO_TEST(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&tempInst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_OK);
|
||||
RawImage tempInput(inputImage._buffer, inputImage._length/4,
|
||||
inputImage._size/4);
|
||||
_encoder->Encode(tempInput, NULL);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
|
||||
// Reset then decode.
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
}
|
||||
|
||||
// Encode original frame again
|
||||
VIDEO_TEST(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) ==
|
||||
WEBRTC_VIDEO_CODEC_OK);
|
||||
_encoder->Encode(inputImage, NULL);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
|
||||
// Reset then decode original frame again.
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
}
|
||||
|
||||
// check that decoded frame matches with reference
|
||||
VIDEO_TEST(CheckIfBitExact(_decodedVideoBuffer.GetBuffer(), frameLength,
|
||||
_refDecFrame, _lengthSourceFrame) == true);
|
||||
|
||||
}
|
||||
|
||||
// Release then decode.
|
||||
VIDEO_TEST(_decoder->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
frameLength = 0;
|
||||
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
|
||||
while (frameLength == 0)
|
||||
{
|
||||
_decoder->Decode(encodedImage, false, NULL);
|
||||
frameLength = WaitForDecodedFrame();
|
||||
}
|
||||
VIDEO_TEST(CheckIfBitExact(_decodedVideoBuffer.GetBuffer(), frameLength,
|
||||
_refDecFrame, _lengthSourceFrame) == true);
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
_encodedVideoBuffer.Reset();
|
||||
|
||||
delete [] tmpBuf;
|
||||
|
||||
//----- Function tests -----
|
||||
int frames = 0;
|
||||
// Do not specify maxBitRate (as in ViE).
|
||||
_inst.maxBitrate = 0;
|
||||
|
||||
//-- Timestamp propagation --
|
||||
VIDEO_TEST(_encoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
printf("\nTimestamp propagation test...\n");
|
||||
frames = 0;
|
||||
int frameDelay = 0;
|
||||
int encTimeStamp;
|
||||
_decodedVideoBuffer.SetTimeStamp(0);
|
||||
while (fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile) ==
|
||||
_lengthSourceFrame)
|
||||
{
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _sourceBuffer);
|
||||
_inputVideoBuffer.SetTimeStamp(frames);
|
||||
VideoBufferToRawImage(_inputVideoBuffer, inputImage);
|
||||
VIDEO_TEST_EXIT_ON_ERR(_encoder->Encode(inputImage, NULL) ==
|
||||
WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
//VIDEO_TEST_EXIT_ON_ERR(frameLength);
|
||||
VIDEO_TEST(frameLength > 0);
|
||||
encTimeStamp = _encodedVideoBuffer.GetTimeStamp();
|
||||
VIDEO_TEST(_inputVideoBuffer.GetTimeStamp() == encTimeStamp);
|
||||
|
||||
frameLength = Decode();
|
||||
if (frameLength == 0)
|
||||
{
|
||||
frameDelay++;
|
||||
}
|
||||
|
||||
encTimeStamp -= frameDelay;
|
||||
if (encTimeStamp < 0)
|
||||
{
|
||||
encTimeStamp = 0;
|
||||
}
|
||||
VIDEO_TEST(_decodedVideoBuffer.GetTimeStamp() == encTimeStamp);
|
||||
frames++;
|
||||
sleepEvent.Wait(33);
|
||||
}
|
||||
delete &sleepEvent;
|
||||
VIDEO_TEST_EXIT_ON_ERR(feof(_sourceFile) != 0);
|
||||
rewind(_sourceFile);
|
||||
|
||||
RateControlTests();
|
||||
|
||||
Teardown();
|
||||
}
|
||||
|
||||
void
|
||||
UnitTest::RateControlTests()
|
||||
{
|
||||
FILE *outFile = NULL;
|
||||
std::string outFileName;
|
||||
int frames = 0;
|
||||
RawImage inputImage;
|
||||
WebRtc_UWord32 frameLength;
|
||||
EventWrapper& sleepEvent = *EventWrapper::Create();
|
||||
|
||||
// Do not specify maxBitRate (as in ViE).
|
||||
_inst.maxBitrate = 0;
|
||||
//-- Verify rate control --
|
||||
VIDEO_TEST(_encoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
// add: should also be 0, and 1
|
||||
const int bitRate[] =
|
||||
{100, 200, 300, 400, 500, 600, 800, 1000, 2000, 3000, 4000, 10000};
|
||||
const int nBitrates = sizeof(bitRate)/sizeof(*bitRate);
|
||||
|
||||
printf("\nRate control test\n");
|
||||
for (int i = 0; i < nBitrates; i++)
|
||||
{
|
||||
_bitRate = bitRate[i];
|
||||
int totalBytes = 0;
|
||||
_encoder->Reset();
|
||||
_inst.startBitrate = _bitRate;
|
||||
_encoder->InitEncode(&_inst, 4, 1440);
|
||||
_decoder->Reset();
|
||||
_decoder->InitDecode(&_inst, 1);
|
||||
frames = 0;
|
||||
|
||||
if (_bitRate > _inst.maxBitrate)
|
||||
{
|
||||
CodecSpecific_SetBitrate(_bitRate, _inst.maxFramerate);
|
||||
}
|
||||
else
|
||||
{
|
||||
CodecSpecific_SetBitrate(_bitRate, _inst.maxFramerate);
|
||||
}
|
||||
|
||||
while (fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile) ==
|
||||
_lengthSourceFrame)
|
||||
{
|
||||
_inputVideoBuffer.CopyBuffer(_lengthSourceFrame, _sourceBuffer);
|
||||
_inputVideoBuffer.SetTimeStamp(_inputVideoBuffer.GetTimeStamp() +
|
||||
static_cast<WebRtc_UWord32>(9e4 /
|
||||
static_cast<float>(_inst.maxFramerate)));
|
||||
VideoBufferToRawImage(_inputVideoBuffer, inputImage);
|
||||
VIDEO_TEST_EXIT_ON_ERR(_encoder->Encode(inputImage, NULL) ==
|
||||
WEBRTC_VIDEO_CODEC_OK);
|
||||
frameLength = WaitForEncodedFrame();
|
||||
VIDEO_TEST_EXIT_ON_ERR(frameLength > 0);
|
||||
//VIDEO_TEST(frameLength > 0);
|
||||
totalBytes += frameLength;
|
||||
frames++;
|
||||
|
||||
_encodedVideoBuffer.UpdateLength(0);
|
||||
_encodedVideoBuffer.Reset();
|
||||
|
||||
sleepEvent.Wait(10);
|
||||
}
|
||||
WebRtc_UWord32 actualBitrate =
|
||||
(totalBytes / frames * _inst.maxFramerate * 8)/1000;
|
||||
printf("Target bitrate: %d kbps, actual bitrate: %d kbps\n", _bitRate,
|
||||
actualBitrate);
|
||||
// Test for close match over reasonable range.
|
||||
if (_bitRate >= 100 && _bitRate <= 4000)
|
||||
{
|
||||
//VIDEO_TEST(fabs(actualBitrate - _bitRate) < 0.05 * _bitRate);
|
||||
VIDEO_TEST(abs(WebRtc_Word32(actualBitrate - _bitRate)) <
|
||||
0.1 * _bitRate); // for VP8
|
||||
}
|
||||
VIDEO_TEST_EXIT_ON_ERR(feof(_sourceFile) != 0);
|
||||
rewind(_sourceFile);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
UnitTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
|
||||
const void* ptrB, unsigned int bLengthBytes)
|
||||
{
|
||||
if (aLengthBytes != bLengthBytes)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return memcmp(ptrA, ptrB, aLengthBytes) == 0;
|
||||
}
|
||||
133
modules/video_coding/codecs/test_framework/unit_test.h
Normal file
133
modules/video_coding/codecs/test_framework/unit_test.h
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_
|
||||
|
||||
#include "test.h"
|
||||
#include "event_wrapper.h"
|
||||
|
||||
// Disable "conditional expression is constant" warnings on the perfectly
|
||||
// acceptable
|
||||
// do { ... } while (0) constructions below.
|
||||
// Refer to http://stackoverflow.com/questions/1946445/
|
||||
// is-there-better-way-to-write-do-while0-construct-to-avoid-compiler-warnings
|
||||
// for some discussion of the issue.
|
||||
#pragma warning(disable : 4127)
|
||||
|
||||
#define VIDEO_TEST(expr) \
|
||||
do \
|
||||
{ \
|
||||
_tests++; \
|
||||
if (!(expr)) \
|
||||
{ \
|
||||
fprintf(stderr, "Error at line %i of %s\nAssertion failed: %s\n\n",\
|
||||
__LINE__, __FILE__, #expr); \
|
||||
_errors++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define VIDEO_TEST_EXIT_ON_ERR(expr) \
|
||||
do \
|
||||
{ \
|
||||
if (!(expr)) \
|
||||
{ \
|
||||
fprintf(stderr, "Error at line %i of %s\nAssertion failed: %s\n", \
|
||||
__LINE__, __FILE__, #expr); \
|
||||
fprintf(stderr, "Exiting...\n\n"); \
|
||||
exit(EXIT_FAILURE); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
class VideoSource;
|
||||
class UnitTestEncodeCompleteCallback;
|
||||
class UnitTestDecodeCompleteCallback;
|
||||
|
||||
class UnitTest : public Test
|
||||
{
|
||||
public:
|
||||
UnitTest();
|
||||
virtual ~UnitTest();
|
||||
virtual void Perform();
|
||||
virtual void Print();
|
||||
|
||||
protected:
|
||||
UnitTest(std::string name, std::string description);
|
||||
virtual WebRtc_UWord32 CodecSpecific_SetBitrate(
|
||||
WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord32 /* frameRate */);
|
||||
virtual void Setup();
|
||||
virtual void Teardown();
|
||||
virtual void RateControlTests();
|
||||
virtual int Decode();
|
||||
virtual int DecodeWithoutAssert();
|
||||
virtual int SetCodecSpecificParameters() {return 0;};
|
||||
|
||||
virtual bool CheckIfBitExact(const void *ptrA, unsigned int aLengthBytes,
|
||||
const void *ptrB, unsigned int bLengthBytes);
|
||||
|
||||
WebRtc_UWord32 WaitForEncodedFrame() const;
|
||||
WebRtc_UWord32 WaitForDecodedFrame() const;
|
||||
|
||||
int _tests;
|
||||
int _errors;
|
||||
|
||||
VideoSource* _source;
|
||||
unsigned char* _refFrame;
|
||||
unsigned char* _refEncFrame;
|
||||
unsigned char* _refDecFrame;
|
||||
int _refEncFrameLength;
|
||||
FILE* _sourceFile;
|
||||
|
||||
UnitTestEncodeCompleteCallback* _encodeCompleteCallback;
|
||||
UnitTestDecodeCompleteCallback* _decodeCompleteCallback;
|
||||
enum { kMaxWaitEncTimeMs = 100 };
|
||||
enum { kMaxWaitDecTimeMs = 25 };
|
||||
};
|
||||
|
||||
class UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback
|
||||
{
|
||||
public:
|
||||
UnitTestEncodeCompleteCallback(TestVideoEncodedBuffer* buffer,
|
||||
WebRtc_UWord32 decoderSpecificSize = 0,
|
||||
void* decoderSpecificInfo = NULL) :
|
||||
_encodedVideoBuffer(buffer),
|
||||
_decoderSpecificSize(decoderSpecificSize),
|
||||
_decoderSpecificInfo(decoderSpecificInfo),
|
||||
_encodeComplete(false) {}
|
||||
WebRtc_Word32 Encoded(webrtc::EncodedImage& encodedImage,
|
||||
const void* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader*
|
||||
fragmentation = NULL);
|
||||
bool EncodeComplete();
|
||||
// Note that this only makes sense if an encode has been completed
|
||||
webrtc::VideoFrameType EncodedFrameType() const;
|
||||
private:
|
||||
TestVideoEncodedBuffer* _encodedVideoBuffer;
|
||||
void* _decoderSpecificInfo;
|
||||
WebRtc_UWord32 _decoderSpecificSize;
|
||||
bool _encodeComplete;
|
||||
webrtc::VideoFrameType _encodedFrameType;
|
||||
};
|
||||
|
||||
class UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
UnitTestDecodeCompleteCallback(TestVideoBuffer* buffer) :
|
||||
_decodedVideoBuffer(buffer), _decodeComplete(false) {}
|
||||
WebRtc_Word32 Decoded(webrtc::RawImage& image);
|
||||
bool DecodeComplete();
|
||||
private:
|
||||
TestVideoBuffer* _decodedVideoBuffer;
|
||||
bool _decodeComplete;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_
|
||||
|
||||
319
modules/video_coding/codecs/test_framework/video_buffer.cc
Normal file
319
modules/video_coding/codecs/test_framework/video_buffer.cc
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "video_buffer.h"
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
TestVideoBuffer::TestVideoBuffer():
|
||||
_buffer(0),
|
||||
_bufferSize(0),
|
||||
_bufferLength(0),
|
||||
_startOffset(0),
|
||||
_timeStamp(0),
|
||||
_width(0),
|
||||
_height(0)
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
|
||||
TestVideoBuffer::~TestVideoBuffer()
|
||||
{
|
||||
_timeStamp = 0;
|
||||
_startOffset = 0;
|
||||
_bufferLength = 0;
|
||||
_bufferSize = 0;
|
||||
|
||||
if(_buffer)
|
||||
{
|
||||
delete [] _buffer;
|
||||
_buffer = 0;
|
||||
}
|
||||
}
|
||||
|
||||
TestVideoBuffer::TestVideoBuffer(const TestVideoBuffer& rhs)
|
||||
:
|
||||
_buffer(0),
|
||||
_bufferSize(rhs._bufferSize),
|
||||
_bufferLength(rhs._bufferLength),
|
||||
_startOffset(rhs._startOffset),
|
||||
_timeStamp(rhs._timeStamp),
|
||||
_width(rhs._width),
|
||||
_height(rhs._height)
|
||||
{
|
||||
// make sure that our buffer is big enough
|
||||
_buffer = new unsigned char[_bufferSize];
|
||||
|
||||
// only copy required length
|
||||
memcpy(_buffer + _startOffset, rhs._buffer, _bufferLength); // GetBuffer() includes _startOffset
|
||||
}
|
||||
|
||||
void TestVideoBuffer::SetTimeStamp(unsigned int timeStamp)
|
||||
{
|
||||
_timeStamp = timeStamp;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
TestVideoBuffer::GetWidth() const
|
||||
{
|
||||
return _width;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
TestVideoBuffer::GetHeight() const
|
||||
{
|
||||
return _height;
|
||||
}
|
||||
|
||||
void
|
||||
TestVideoBuffer::SetWidth(unsigned int width)
|
||||
{
|
||||
_width = width;
|
||||
}
|
||||
|
||||
void
|
||||
TestVideoBuffer::SetHeight(unsigned int height)
|
||||
{
|
||||
_height = height;
|
||||
}
|
||||
|
||||
|
||||
void TestVideoBuffer::Free()
|
||||
{
|
||||
_timeStamp = 0;
|
||||
_startOffset = 0;
|
||||
_bufferLength = 0;
|
||||
_bufferSize = 0;
|
||||
_height = 0;
|
||||
_width = 0;
|
||||
|
||||
if(_buffer)
|
||||
{
|
||||
delete [] _buffer;
|
||||
_buffer = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void TestVideoBuffer::VerifyAndAllocate(unsigned int minimumSize)
|
||||
{
|
||||
if(minimumSize > _bufferSize)
|
||||
{
|
||||
// make sure that our buffer is big enough
|
||||
unsigned char * newBufferBuffer = new unsigned char[minimumSize];
|
||||
if(_buffer)
|
||||
{
|
||||
// copy the old data
|
||||
memcpy(newBufferBuffer, _buffer, _bufferSize);
|
||||
delete [] _buffer;
|
||||
}
|
||||
_buffer = newBufferBuffer;
|
||||
_bufferSize = minimumSize;
|
||||
}
|
||||
}
|
||||
|
||||
int TestVideoBuffer::SetOffset(unsigned int length)
|
||||
{
|
||||
if (length > _bufferSize ||
|
||||
length > _bufferLength)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned int oldOffset = _startOffset;
|
||||
|
||||
if(oldOffset > length)
|
||||
{
|
||||
unsigned int newLength = _bufferLength + (oldOffset-length);// increase by the diff
|
||||
assert(newLength <= _bufferSize);
|
||||
_bufferLength = newLength;
|
||||
}
|
||||
if(oldOffset < length)
|
||||
{
|
||||
if(_bufferLength > (length-oldOffset))
|
||||
{
|
||||
_bufferLength -= (length-oldOffset); // decrease by the diff
|
||||
}
|
||||
}
|
||||
_startOffset = length; // update
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void TestVideoBuffer::UpdateLength(unsigned int newLength)
|
||||
{
|
||||
assert(newLength +_startOffset <= _bufferSize);
|
||||
_bufferLength = newLength;
|
||||
}
|
||||
|
||||
void TestVideoBuffer::CopyBuffer(unsigned int length, const unsigned char* buffer)
|
||||
{
|
||||
assert(length+_startOffset <= _bufferSize);
|
||||
memcpy(_buffer+_startOffset, buffer, length);
|
||||
_bufferLength = length;
|
||||
}
|
||||
|
||||
void TestVideoBuffer::CopyBuffer(TestVideoBuffer& fromVideoBuffer)
|
||||
{
|
||||
assert(fromVideoBuffer.GetLength() + fromVideoBuffer.GetStartOffset() <= _bufferSize);
|
||||
assert(fromVideoBuffer.GetSize() <= _bufferSize);
|
||||
|
||||
_bufferLength = fromVideoBuffer.GetLength();
|
||||
_startOffset = fromVideoBuffer.GetStartOffset();
|
||||
_timeStamp = fromVideoBuffer.GetTimeStamp();
|
||||
_height = fromVideoBuffer.GetHeight();
|
||||
_width = fromVideoBuffer.GetWidth();
|
||||
|
||||
// only copy required length
|
||||
memcpy(_buffer+_startOffset, fromVideoBuffer.GetBuffer(), fromVideoBuffer.GetLength()); // GetBuffer() includes _startOffset
|
||||
|
||||
}
|
||||
|
||||
void TestVideoBuffer::CopyPointer(const TestVideoBuffer& fromVideoBuffer)
|
||||
{
|
||||
_bufferSize = fromVideoBuffer.GetSize();
|
||||
_bufferLength = fromVideoBuffer.GetLength();
|
||||
_startOffset = fromVideoBuffer.GetStartOffset();
|
||||
_timeStamp = fromVideoBuffer.GetTimeStamp();
|
||||
_height = fromVideoBuffer.GetHeight();
|
||||
_width = fromVideoBuffer.GetWidth();
|
||||
|
||||
_buffer = fromVideoBuffer.GetBuffer();
|
||||
}
|
||||
|
||||
void TestVideoBuffer::ClearPointer()
|
||||
{
|
||||
_buffer = NULL;
|
||||
}
|
||||
|
||||
void TestVideoBuffer::SwapBuffers(TestVideoBuffer& videoBuffer)
|
||||
{
|
||||
unsigned char* tempBuffer = _buffer;
|
||||
unsigned int tempSize = _bufferSize;
|
||||
unsigned int tempLength =_bufferLength;
|
||||
unsigned int tempOffset = _startOffset;
|
||||
unsigned int tempTime = _timeStamp;
|
||||
unsigned int tempHeight = _height;
|
||||
unsigned int tempWidth = _width;
|
||||
|
||||
_buffer = videoBuffer.GetBuffer();
|
||||
_bufferSize = videoBuffer.GetSize();
|
||||
_bufferLength = videoBuffer.GetLength();
|
||||
_startOffset = videoBuffer.GetStartOffset();
|
||||
_timeStamp = videoBuffer.GetTimeStamp();
|
||||
_height = videoBuffer.GetHeight();
|
||||
_width = videoBuffer.GetWidth();
|
||||
|
||||
|
||||
videoBuffer.Set(tempBuffer, tempSize, tempLength, tempOffset, tempTime);
|
||||
videoBuffer.SetHeight(tempHeight);
|
||||
videoBuffer.SetWidth(tempWidth);
|
||||
}
|
||||
|
||||
void TestVideoBuffer::Set(unsigned char* tempBuffer,unsigned int tempSize,unsigned int tempLength, unsigned int tempOffset,unsigned int timeStamp)
|
||||
{
|
||||
_buffer = tempBuffer;
|
||||
_bufferSize = tempSize;
|
||||
_bufferLength = tempLength;
|
||||
_startOffset = tempOffset;
|
||||
_timeStamp = timeStamp;
|
||||
}
|
||||
|
||||
unsigned char* TestVideoBuffer::GetBuffer() const
|
||||
{
|
||||
return _buffer+_startOffset;
|
||||
}
|
||||
|
||||
unsigned int TestVideoBuffer::GetStartOffset() const
|
||||
{
|
||||
return _startOffset;
|
||||
}
|
||||
|
||||
unsigned int TestVideoBuffer::GetSize() const
|
||||
{
|
||||
return _bufferSize;
|
||||
}
|
||||
|
||||
unsigned int TestVideoBuffer::GetLength() const
|
||||
{
|
||||
return _bufferLength;
|
||||
}
|
||||
|
||||
unsigned int TestVideoBuffer::GetTimeStamp() const
|
||||
{
|
||||
return _timeStamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* TestVideoEncodedBuffer
|
||||
*
|
||||
*/
|
||||
|
||||
TestVideoEncodedBuffer::TestVideoEncodedBuffer() :
|
||||
_captureWidth(0),
|
||||
_captureHeight(0),
|
||||
_frameRate(-1)
|
||||
{
|
||||
_frameType = kDeltaFrame;
|
||||
}
|
||||
|
||||
TestVideoEncodedBuffer::~TestVideoEncodedBuffer()
|
||||
{
|
||||
}
|
||||
|
||||
void TestVideoEncodedBuffer::SetCaptureWidth(unsigned short width)
|
||||
{
|
||||
_captureWidth = width;
|
||||
}
|
||||
|
||||
void TestVideoEncodedBuffer::SetCaptureHeight(unsigned short height)
|
||||
{
|
||||
_captureHeight = height;
|
||||
}
|
||||
|
||||
unsigned short TestVideoEncodedBuffer::GetCaptureWidth()
|
||||
{
|
||||
return _captureWidth;
|
||||
}
|
||||
|
||||
unsigned short TestVideoEncodedBuffer::GetCaptureHeight()
|
||||
{
|
||||
return _captureHeight;
|
||||
}
|
||||
|
||||
VideoFrameType TestVideoEncodedBuffer::GetFrameType()
|
||||
{
|
||||
return _frameType;
|
||||
}
|
||||
|
||||
void TestVideoEncodedBuffer::SetFrameType(VideoFrameType frametype)
|
||||
{
|
||||
_frameType = frametype;
|
||||
}
|
||||
|
||||
void TestVideoEncodedBuffer::Reset()
|
||||
{
|
||||
_captureWidth = 0;
|
||||
_captureHeight = 0;
|
||||
_frameRate = -1;
|
||||
_frameType = kDeltaFrame;
|
||||
}
|
||||
|
||||
void TestVideoEncodedBuffer::SetFrameRate(float frameRate)
|
||||
{
|
||||
_frameRate = frameRate;
|
||||
}
|
||||
|
||||
float TestVideoEncodedBuffer::GetFrameRate()
|
||||
{
|
||||
return _frameRate;
|
||||
}
|
||||
122
modules/video_coding/codecs/test_framework/video_buffer.h
Normal file
122
modules/video_coding/codecs/test_framework/video_buffer.h
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_BUFFER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_BUFFER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "video_image.h"
|
||||
|
||||
class TestVideoBuffer
|
||||
{
|
||||
public:
|
||||
TestVideoBuffer();
|
||||
|
||||
virtual ~TestVideoBuffer();
|
||||
|
||||
TestVideoBuffer(const TestVideoBuffer& rhs);
|
||||
|
||||
/**
|
||||
* Verifies that current allocated buffer size is larger than or equal to the input size.
|
||||
* If the current buffer size is smaller, a new allocation is made and the old buffer data is copied to the new buffer.
|
||||
*/
|
||||
void VerifyAndAllocate(unsigned int minimumSize);
|
||||
|
||||
void UpdateLength(unsigned int newLength);
|
||||
|
||||
void SwapBuffers(TestVideoBuffer& videoBuffer);
|
||||
|
||||
void CopyBuffer(unsigned int length, const unsigned char* fromBuffer);
|
||||
|
||||
void CopyBuffer(TestVideoBuffer& fromVideoBuffer);
|
||||
|
||||
// Use with care, and remember to call ClearPointer() when done.
|
||||
void CopyPointer(const TestVideoBuffer& fromVideoBuffer);
|
||||
|
||||
void ClearPointer();
|
||||
|
||||
int SetOffset(unsigned int length); // Sets offset to beginning of frame in buffer
|
||||
|
||||
void Free(); // Deletes frame buffer and resets members to zero
|
||||
|
||||
void SetTimeStamp(unsigned int timeStamp); // Sets timestamp of frame (90kHz)
|
||||
|
||||
/**
|
||||
* Gets pointer to frame buffer
|
||||
*/
|
||||
unsigned char* GetBuffer() const;
|
||||
|
||||
/**
|
||||
* Gets allocated buffer size
|
||||
*/
|
||||
unsigned int GetSize() const;
|
||||
|
||||
/**
|
||||
* Gets length of frame
|
||||
*/
|
||||
unsigned int GetLength() const;
|
||||
|
||||
/**
|
||||
* Gets timestamp of frame (90kHz)
|
||||
*/
|
||||
unsigned int GetTimeStamp() const;
|
||||
|
||||
unsigned int GetWidth() const;
|
||||
unsigned int GetHeight() const;
|
||||
|
||||
void SetWidth(unsigned int width);
|
||||
void SetHeight(unsigned int height);
|
||||
|
||||
private:
|
||||
TestVideoBuffer& operator=(const TestVideoBuffer& inBuffer);
|
||||
|
||||
private:
|
||||
void Set(unsigned char* buffer,unsigned int size,unsigned int length,unsigned int offset, unsigned int timeStamp);
|
||||
unsigned int GetStartOffset() const;
|
||||
|
||||
unsigned char* _buffer; // Pointer to frame buffer
|
||||
unsigned int _bufferSize; // Allocated buffer size
|
||||
unsigned int _bufferLength; // Length (in bytes) of frame
|
||||
unsigned int _startOffset; // Offset (in bytes) to beginning of frame in buffer
|
||||
unsigned int _timeStamp; // Timestamp of frame (90kHz)
|
||||
unsigned int _width;
|
||||
unsigned int _height;
|
||||
};
|
||||
|
||||
class TestVideoEncodedBuffer: public TestVideoBuffer
|
||||
{
|
||||
public:
|
||||
TestVideoEncodedBuffer();
|
||||
~TestVideoEncodedBuffer();
|
||||
|
||||
void SetCaptureWidth(unsigned short width);
|
||||
void SetCaptureHeight(unsigned short height);
|
||||
unsigned short GetCaptureWidth();
|
||||
unsigned short GetCaptureHeight();
|
||||
|
||||
webrtc::VideoFrameType GetFrameType();
|
||||
void SetFrameType(webrtc::VideoFrameType frametype);
|
||||
|
||||
void Reset();
|
||||
|
||||
void SetFrameRate(float frameRate);
|
||||
float GetFrameRate();
|
||||
|
||||
private:
|
||||
TestVideoEncodedBuffer& operator=(const TestVideoEncodedBuffer& inBuffer);
|
||||
|
||||
private:
|
||||
unsigned short _captureWidth;
|
||||
unsigned short _captureHeight;
|
||||
webrtc::VideoFrameType _frameType;
|
||||
float _frameRate;
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_BUFFER_H_
|
||||
417
modules/video_coding/codecs/test_framework/video_source.cc
Normal file
417
modules/video_coding/codecs/test_framework/video_source.cc
Normal file
@ -0,0 +1,417 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video_source.h"
|
||||
#include "vplib.h"
|
||||
#include <cassert>
|
||||
#include <stdio.h>
|
||||
|
||||
VideoSource::VideoSource()
|
||||
:
|
||||
_fileName("../../../../testFiles/foreman.yuv"),
|
||||
_width(352),
|
||||
_height(288),
|
||||
_type(webrtc::kI420),
|
||||
_frameRate(30)
|
||||
{
|
||||
}
|
||||
|
||||
VideoSource::VideoSource(std::string fileName, VideoSize size,
|
||||
int frameRate /*= 30*/, webrtc::VideoType type /*= webrtc::kI420*/)
|
||||
:
|
||||
_fileName(fileName),
|
||||
_type(type),
|
||||
_frameRate(frameRate)
|
||||
{
|
||||
assert(size != kUndefined && size != kNumberOfVideoSizes);
|
||||
assert(type != webrtc::kUnknown);
|
||||
assert(frameRate > 0);
|
||||
assert(GetWidthHeight(size, _width, _height) == 0);
|
||||
}
|
||||
|
||||
VideoSource::VideoSource(std::string fileName, int width, int height,
|
||||
int frameRate /*= 30*/, webrtc::VideoType type /*= webrtc::kI420*/)
|
||||
:
|
||||
_fileName(fileName),
|
||||
_width(width),
|
||||
_height(height),
|
||||
_type(type),
|
||||
_frameRate(frameRate)
|
||||
{
|
||||
assert(width > 0);
|
||||
assert(height > 0);
|
||||
assert(type != webrtc::kUnknown);
|
||||
assert(frameRate > 0);
|
||||
}
|
||||
|
||||
VideoSize
|
||||
VideoSource::GetSize() const
|
||||
{
|
||||
return GetSize(_width, _height);
|
||||
}
|
||||
|
||||
VideoSize
|
||||
VideoSource::GetSize(WebRtc_UWord16 width, WebRtc_UWord16 height)
|
||||
{
|
||||
if(width == 128 && height == 96)
|
||||
{
|
||||
return kSQCIF;
|
||||
}else if(width == 160 && height == 120)
|
||||
{
|
||||
return kQQVGA;
|
||||
}else if(width == 176 && height == 144)
|
||||
{
|
||||
return kQCIF;
|
||||
}else if(width == 320 && height == 240)
|
||||
{
|
||||
return kQVGA;
|
||||
}else if(width == 352 && height == 288)
|
||||
{
|
||||
return kCIF;
|
||||
}else if(width == 640 && height == 480)
|
||||
{
|
||||
return kVGA;
|
||||
}else if(width == 720 && height == 480)
|
||||
{
|
||||
return kNTSC;
|
||||
}else if(width == 704 && height == 576)
|
||||
{
|
||||
return k4CIF;
|
||||
}else if(width == 800 && height == 600)
|
||||
{
|
||||
return kSVGA;
|
||||
}else if(width == 960 && height == 720)
|
||||
{
|
||||
return kHD;
|
||||
}else if(width == 1024 && height == 768)
|
||||
{
|
||||
return kXGA;
|
||||
}else if(width == 1440 && height == 1080)
|
||||
{
|
||||
return kFullHD;
|
||||
}else if(width == 400 && height == 240)
|
||||
{
|
||||
return kWQVGA;
|
||||
}else if(width == 800 && height == 480)
|
||||
{
|
||||
return kWVGA;
|
||||
}else if(width == 1280 && height == 720)
|
||||
{
|
||||
return kWHD;
|
||||
}else if(width == 1920 && height == 1080)
|
||||
{
|
||||
return kWFullHD;
|
||||
}
|
||||
return kUndefined;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
VideoSource::GetFrameLength() const
|
||||
{
|
||||
return webrtc::CalcBufferSize(_type, _width, _height);
|
||||
}
|
||||
|
||||
const char*
|
||||
VideoSource::GetMySizeString() const
|
||||
{
|
||||
return VideoSource::GetSizeString(GetSize());
|
||||
}
|
||||
|
||||
const char*
|
||||
VideoSource::GetSizeString(VideoSize size)
|
||||
{
|
||||
switch (size)
|
||||
{
|
||||
case kSQCIF:
|
||||
return "SQCIF";
|
||||
case kQQVGA:
|
||||
return "QQVGA";
|
||||
case kQCIF:
|
||||
return "QCIF";
|
||||
case kQVGA:
|
||||
return "QVGA";
|
||||
case kCIF:
|
||||
return "CIF";
|
||||
case kVGA:
|
||||
return "VGA";
|
||||
case kNTSC:
|
||||
return "NTSC";
|
||||
case k4CIF:
|
||||
return "4CIF";
|
||||
case kSVGA:
|
||||
return "SVGA";
|
||||
case kHD:
|
||||
return "HD";
|
||||
case kXGA:
|
||||
return "XGA";
|
||||
case kFullHD:
|
||||
return "Full_HD";
|
||||
case kWQVGA:
|
||||
return "WQVGA";
|
||||
case kWHD:
|
||||
return "WHD";
|
||||
case kWFullHD:
|
||||
return "WFull_HD";
|
||||
default:
|
||||
return "Undefined";
|
||||
}
|
||||
}
|
||||
|
||||
std::string
|
||||
VideoSource::GetFilePath() const
|
||||
{
|
||||
size_t slashPos = _fileName.find_last_of("/\\");
|
||||
if (slashPos == std::string::npos)
|
||||
{
|
||||
return ".";
|
||||
}
|
||||
|
||||
return _fileName.substr(0, slashPos);
|
||||
}
|
||||
|
||||
std::string
|
||||
VideoSource::GetName() const
|
||||
{
|
||||
// Remove path.
|
||||
size_t slashPos = _fileName.find_last_of("/\\");
|
||||
if (slashPos == std::string::npos)
|
||||
{
|
||||
slashPos = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
slashPos++;
|
||||
}
|
||||
|
||||
// Remove extension and underscored suffix if it exists.
|
||||
return _fileName.substr(slashPos, std::min(_fileName.find_last_of("_"),
|
||||
_fileName.find_last_of(".")) - slashPos);
|
||||
}
|
||||
|
||||
void
|
||||
VideoSource::Convert(const VideoSource &target, bool force /* = false */) const
|
||||
{
|
||||
// Ensure target rate is less than or equal to source
|
||||
// (i.e. we are only temporally downsampling).
|
||||
assert(target.GetFrameRate() <= _frameRate);
|
||||
// Only supports YUV420 currently.
|
||||
assert(_type == webrtc::kI420 && target.GetType() == webrtc::kI420);
|
||||
if (!force && (FileExists(target.GetFileName().c_str()) ||
|
||||
(target.GetWidth() == _width && target.GetHeight() == _height && target.GetFrameRate() == _frameRate)))
|
||||
{
|
||||
// Assume that the filename uniquely defines the content.
|
||||
// If the file already exists, it is the correct file.
|
||||
return;
|
||||
}
|
||||
FILE *inFile = NULL;
|
||||
FILE *outFile = NULL;
|
||||
|
||||
inFile = fopen(_fileName.c_str(), "rb");
|
||||
assert(inFile != NULL);
|
||||
|
||||
outFile = fopen(target.GetFileName().c_str(), "wb");
|
||||
assert(outFile != NULL);
|
||||
|
||||
FrameDropper fd;
|
||||
fd.SetFrameRate(target.GetFrameRate(), _frameRate);
|
||||
|
||||
const size_t lengthOutFrame = webrtc::CalcBufferSize(target.GetType(),
|
||||
target.GetWidth(), target.GetHeight());
|
||||
assert(lengthOutFrame > 0);
|
||||
unsigned char *outFrame = new unsigned char[lengthOutFrame];
|
||||
|
||||
const size_t lengthInFrame = webrtc::CalcBufferSize(_type, _width, _height);
|
||||
assert(lengthInFrame > 0);
|
||||
unsigned char *inFrame = new unsigned char[lengthInFrame];
|
||||
|
||||
while (fread(inFrame, 1, lengthInFrame, inFile) == lengthInFrame)
|
||||
{
|
||||
if (!fd.DropFrame())
|
||||
{
|
||||
assert(target.GetWidth() == _width &&
|
||||
target.GetHeight() == _height); // Add video interpolator here!
|
||||
fwrite(outFrame, 1, lengthOutFrame, outFile);
|
||||
}
|
||||
}
|
||||
|
||||
delete inFrame;
|
||||
delete outFrame;
|
||||
fclose(inFile);
|
||||
fclose(outFile);
|
||||
}
|
||||
|
||||
bool VideoSource::FileExists(const char* fileName)
|
||||
{
|
||||
FILE* fp = NULL;
|
||||
fp = fopen(fileName, "rb");
|
||||
if(fp != NULL)
|
||||
{
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
VideoSource::GetWidthHeight( VideoSize size, int & width, int& height)
|
||||
{
|
||||
switch(size)
|
||||
{
|
||||
case kSQCIF:
|
||||
width = 128;
|
||||
height = 96;
|
||||
return 0;
|
||||
case kQQVGA:
|
||||
width = 160;
|
||||
height = 120;
|
||||
return 0;
|
||||
case kQCIF:
|
||||
width = 176;
|
||||
height = 144;
|
||||
return 0;
|
||||
case kCGA:
|
||||
width = 320;
|
||||
height = 200;
|
||||
return 0;
|
||||
case kQVGA:
|
||||
width = 320;
|
||||
height = 240;
|
||||
return 0;
|
||||
case kSIF:
|
||||
width = 352;
|
||||
height = 240;
|
||||
return 0;
|
||||
case kWQVGA:
|
||||
width = 400;
|
||||
height = 240;
|
||||
return 0;
|
||||
case kCIF:
|
||||
width = 352;
|
||||
height = 288;
|
||||
return 0;
|
||||
case kW288p:
|
||||
width = 512;
|
||||
height = 288;
|
||||
return 0;
|
||||
case k448p:
|
||||
width = 576;
|
||||
height = 448;
|
||||
return 0;
|
||||
case kVGA:
|
||||
width = 640;
|
||||
height = 480;
|
||||
return 0;
|
||||
case k432p:
|
||||
width = 720;
|
||||
height = 432;
|
||||
return 0;
|
||||
case kW432p:
|
||||
width = 768;
|
||||
height = 432;
|
||||
return 0;
|
||||
case k4SIF:
|
||||
width = 704;
|
||||
height = 480;
|
||||
return 0;
|
||||
case kW448p:
|
||||
width = 768;
|
||||
height = 448;
|
||||
return 0;
|
||||
case kNTSC:
|
||||
width = 720;
|
||||
height = 480;
|
||||
return 0;
|
||||
case kFW448p:
|
||||
width = 800;
|
||||
height = 448;
|
||||
return 0;
|
||||
case kWVGA:
|
||||
width = 800;
|
||||
height = 480;
|
||||
return 0;
|
||||
case k4CIF:
|
||||
width = 704;
|
||||
height = 576;
|
||||
return 0;
|
||||
case kSVGA:
|
||||
width = 800;
|
||||
height = 600;
|
||||
return 0;
|
||||
case kW544p:
|
||||
width = 960;
|
||||
height = 544;
|
||||
return 0;
|
||||
case kW576p:
|
||||
width = 1024;
|
||||
height = 576;
|
||||
return 0;
|
||||
case kHD:
|
||||
width = 960;
|
||||
height = 720;
|
||||
return 0;
|
||||
case kXGA:
|
||||
width = 1024;
|
||||
height = 768;
|
||||
return 0;
|
||||
case kFullHD:
|
||||
width = 1440;
|
||||
height = 1080;
|
||||
return 0;
|
||||
case kWHD:
|
||||
width = 1280;
|
||||
height = 720;
|
||||
return 0;
|
||||
case kWFullHD:
|
||||
width = 1920;
|
||||
height = 1080;
|
||||
return 0;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
FrameDropper::FrameDropper()
|
||||
:
|
||||
_dropsBetweenRenders(0),
|
||||
_frameCounter(0)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
FrameDropper::DropFrame()
|
||||
{
|
||||
_frameCounter++;
|
||||
if (_frameCounter > _dropsBetweenRenders)
|
||||
{
|
||||
_frameCounter = 0;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
FrameDropper::DropsBetweenRenders()
|
||||
{
|
||||
return _dropsBetweenRenders;
|
||||
}
|
||||
|
||||
void
|
||||
FrameDropper::SetFrameRate(double frameRate, double maxFrameRate)
|
||||
{
|
||||
if (frameRate >= 1.0)
|
||||
{
|
||||
_dropsBetweenRenders = static_cast<unsigned int>(maxFrameRate / frameRate + 0.5) - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropsBetweenRenders = 0;
|
||||
}
|
||||
}
|
||||
110
modules/video_coding/codecs/test_framework/video_source.h
Normal file
110
modules/video_coding/codecs/test_framework/video_source.h
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "vplib.h"
|
||||
|
||||
enum VideoSize
|
||||
{
|
||||
kUndefined,
|
||||
kSQCIF, // 128*96 = 12 288
|
||||
kQQVGA, // 160*120 = 19 200
|
||||
kQCIF, // 176*144 = 25 344
|
||||
kCGA, // 320*200 = 64 000
|
||||
kQVGA, // 320*240 = 76 800
|
||||
kSIF, // 352*240 = 84 480
|
||||
kWQVGA, // 400*240 = 96 000
|
||||
kCIF, // 352*288 = 101 376
|
||||
kW288p, // 512*288 = 147 456 (WCIF)
|
||||
k448p, // 576*448 = 281 088
|
||||
kVGA, // 640*480 = 307 200
|
||||
k432p, // 720*432 = 311 040
|
||||
kW432p, // 768*432 = 331 776
|
||||
k4SIF, // 704*480 = 337 920
|
||||
kW448p, // 768*448 = 344 064
|
||||
kNTSC, // 720*480 = 345 600
|
||||
kFW448p, // 800*448 = 358 400
|
||||
kWVGA, // 800*480 = 384 000
|
||||
k4CIF, // 704�576 = 405 504
|
||||
kSVGA, // 800*600 = 480 000
|
||||
kW544p, // 960*544 = 522 240
|
||||
kW576p, // 1024*576 = 589 824 (W4CIF)
|
||||
kHD, // 960*720 = 691 200
|
||||
kXGA, // 1024*768 = 786 432
|
||||
kWHD, // 1280*720 = 921 600
|
||||
kFullHD, // 1440*1080 = 1 555 200
|
||||
kWFullHD, // 1920*1080 = 2 073 600
|
||||
|
||||
kNumberOfVideoSizes
|
||||
};
|
||||
|
||||
class VideoSource
|
||||
{
|
||||
public:
|
||||
VideoSource();
|
||||
VideoSource(std::string fileName, VideoSize size, int frameRate = 30,
|
||||
webrtc::VideoType type = webrtc::kI420);
|
||||
VideoSource(std::string fileName, int width, int height, int frameRate = 30,
|
||||
webrtc::VideoType type = webrtc::kI420);
|
||||
|
||||
std::string GetFileName() const { return _fileName; }
|
||||
int GetWidth() const { return _width; }
|
||||
int GetHeight() const { return _height; }
|
||||
webrtc::VideoType GetType() const { return _type; }
|
||||
int GetFrameRate() const { return _frameRate; }
|
||||
|
||||
// Returns the file path without a trailing slash.
|
||||
std::string GetFilePath() const;
|
||||
|
||||
// Returns the filename with the path (including the leading slash) removed.
|
||||
std::string GetName() const;
|
||||
|
||||
VideoSize GetSize() const;
|
||||
static VideoSize GetSize(WebRtc_UWord16 width, WebRtc_UWord16 height);
|
||||
unsigned int GetFrameLength() const;
|
||||
|
||||
// Returns a human-readable size string.
|
||||
static const char* GetSizeString(VideoSize size);
|
||||
const char* GetMySizeString() const;
|
||||
|
||||
// Opens the video source, converting and writing to the specified target.
|
||||
// If force is true, the conversion will be done even if the target file
|
||||
// already exists.
|
||||
void Convert(const VideoSource& target, bool force = false) const;
|
||||
static bool FileExists(const char* fileName);
|
||||
private:
|
||||
static int GetWidthHeight( VideoSize size, int& width, int& height);
|
||||
std::string _fileName;
|
||||
int _width;
|
||||
int _height;
|
||||
webrtc::VideoType _type;
|
||||
int _frameRate;
|
||||
};
|
||||
|
||||
class FrameDropper
|
||||
{
|
||||
public:
|
||||
FrameDropper();
|
||||
bool DropFrame();
|
||||
unsigned int DropsBetweenRenders();
|
||||
void SetFrameRate(double frameRate, double maxFrameRate);
|
||||
|
||||
private:
|
||||
unsigned int _dropsBetweenRenders;
|
||||
unsigned int _frameCounter;
|
||||
};
|
||||
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_
|
||||
|
||||
223
modules/video_coding/codecs/vp8/main/interface/vp8.h
Normal file
223
modules/video_coding/codecs/vp8/main/interface/vp8.h
Normal file
@ -0,0 +1,223 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/*
|
||||
* vp8.h
|
||||
* WEBRTC VP8 wrapper interface
|
||||
*/
|
||||
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_H_
|
||||
|
||||
#include "video_codec_interface.h"
|
||||
|
||||
// VPX forward declaration
|
||||
typedef struct vpx_codec_ctx vpx_codec_ctx_t;
|
||||
typedef struct vpx_codec_ctx vpx_dec_ctx_t;
|
||||
typedef struct vpx_codec_enc_cfg vpx_codec_enc_cfg_t;
|
||||
typedef struct vpx_image vpx_image_t;
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
/******************************/
|
||||
/* VP8Encoder class */
|
||||
/******************************/
|
||||
class VP8Encoder : public VideoEncoder
|
||||
{
|
||||
public:
|
||||
VP8Encoder();
|
||||
virtual ~VP8Encoder();
|
||||
|
||||
// Free encoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 Release();
|
||||
|
||||
// Reset encoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
virtual WebRtc_Word32 Reset();
|
||||
|
||||
// Initialize the encoder with the information from the codecSettings
|
||||
//
|
||||
// Input:
|
||||
// - codecSettings : Codec settings
|
||||
// - numberOfCores : Number of cores available for the encoder
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : Set bit rate if OK
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
|
||||
// WEBRTC_VIDEO_CODEC_ERR_SIZE
|
||||
// WEBRTC_VIDEO_CODEC_LEVEL_EXCEEDED
|
||||
// WEBRTC_VIDEO_CODEC_MEMORY
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
virtual WebRtc_Word32 InitEncode(const VideoCodec* codecSettings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize);
|
||||
|
||||
// Encode an I420 image (as a part of a video stream). The encoded image
|
||||
// will be returned to the user through the encode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Image to be encoded
|
||||
// - frameTypes : Frame type to be generated by the encoder.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
|
||||
// WEBRTC_VIDEO_CODEC_MEMORY
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
// WEBRTC_VIDEO_CODEC_TIMEOUT
|
||||
|
||||
virtual WebRtc_Word32 Encode(const RawImage& inputImage,
|
||||
const void* codecSpecificInfo,
|
||||
VideoFrameType frameType);
|
||||
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles encoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterEncodeCompleteCallback(EncodedImageCallback* callback);
|
||||
|
||||
// Inform the encoder of the new packet loss rate in the network
|
||||
//
|
||||
// - packetLoss : Fraction lost
|
||||
// (loss rate in percent = 100 * packetLoss / 255)
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
//
|
||||
virtual WebRtc_Word32 SetPacketLoss(WebRtc_UWord32 packetLoss);
|
||||
|
||||
// Inform the encoder about the new target bit rate.
|
||||
//
|
||||
// - newBitRate : New target bit rate
|
||||
// - frameRate : The target frame rate
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRateKbit, WebRtc_UWord32 frameRate);
|
||||
|
||||
// Get version number for the codec.
|
||||
//
|
||||
// Input:
|
||||
// - version : Pointer to allocated char buffer.
|
||||
// - buflen : Length of provided char buffer.
|
||||
//
|
||||
// Output:
|
||||
// - version : Version number string written to char buffer.
|
||||
//
|
||||
// Return value : >0 - Length of written string.
|
||||
// <0 - WEBRTC_VIDEO_CODEC_ERR_SIZE
|
||||
virtual WebRtc_Word32 Version(WebRtc_Word8 *version, WebRtc_Word32 length) const;
|
||||
static WebRtc_Word32 VersionStatic(WebRtc_Word8 *version, WebRtc_Word32 length);
|
||||
|
||||
private:
|
||||
EncodedImage _encodedImage;
|
||||
EncodedImageCallback* _encodedCompleteCallback;
|
||||
WebRtc_Word32 _width;
|
||||
WebRtc_Word32 _height;
|
||||
WebRtc_Word32 _maxBitRateKbit;
|
||||
int _maxFrameRate;
|
||||
bool _inited;
|
||||
WebRtc_UWord16 _pictureID;
|
||||
bool _pictureLossIndicationOn;
|
||||
bool _feedbackModeOn;
|
||||
bool _nextRefIsGolden;
|
||||
bool _lastAcknowledgedIsGolden;
|
||||
bool _haveReceivedAcknowledgement;
|
||||
WebRtc_UWord16 _pictureIDLastSentRef;
|
||||
WebRtc_UWord16 _pictureIDLastAcknowledgedRef;
|
||||
|
||||
vpx_codec_ctx_t* _encoder;
|
||||
vpx_codec_enc_cfg_t* _cfg;
|
||||
vpx_image_t* _raw;
|
||||
};// end of VP8Encoder class
|
||||
|
||||
/******************************/
|
||||
/* VP8Decoder class */
|
||||
/******************************/
|
||||
class VP8Decoder : public VideoDecoder
|
||||
{
|
||||
public:
|
||||
VP8Decoder();
|
||||
virtual ~VP8Decoder();
|
||||
|
||||
// Initialize the decoder.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
virtual WebRtc_Word32 InitDecode(const VideoCodec* inst,
|
||||
WebRtc_Word32 numberOfCores);
|
||||
|
||||
// Decode encoded image (as a part of a video stream). The decoded image
|
||||
// will be returned to the user through the decode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Encoded image to be decoded
|
||||
// - missingFrames : True if one or more frames have been lost
|
||||
// since the previous decode call.
|
||||
// - codecSpecificInfo : pointer to specific codec data
|
||||
// - renderTimeMs : Render time in Ms
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
|
||||
virtual WebRtc_Word32 Decode(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const void* /*codecSpecificInfo*/,
|
||||
WebRtc_Word64 /*renderTimeMs*/);
|
||||
|
||||
// Register a decode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles decoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
virtual WebRtc_Word32 RegisterDecodeCompleteCallback(DecodedImageCallback* callback);
|
||||
|
||||
// Free decoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
virtual WebRtc_Word32 Release();
|
||||
|
||||
// Reset decoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Errors:
|
||||
// WEBRTC_VIDEO_CODEC_UNINITIALIZED
|
||||
// WEBRTC_VIDEO_CODEC_ERROR
|
||||
virtual WebRtc_Word32 Reset();
|
||||
virtual WebRtc_Word32 SetCodecConfigParameters(WebRtc_UWord8* /*buffer*/, WebRtc_Word32 /*size*/) { return -1; }
|
||||
|
||||
private:
|
||||
RawImage _decodedImage;
|
||||
DecodedImageCallback* _decodeCompleteCallback;
|
||||
bool _inited;
|
||||
bool _feedbackModeOn;
|
||||
vpx_dec_ctx_t* _decoder;
|
||||
|
||||
};// end of VP8Decoder class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_H_
|
||||
806
modules/video_coding/codecs/vp8/main/source/vp8.cc
Normal file
806
modules/video_coding/codecs/vp8/main/source/vp8.cc
Normal file
@ -0,0 +1,806 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/*
|
||||
* vp8.cc
|
||||
*
|
||||
* This file contains the WEBRTC VP8 wrapper implementation
|
||||
*
|
||||
*/
|
||||
#include "vp8.h"
|
||||
#include "tick_util.h"
|
||||
|
||||
#include "vpx/vpx_encoder.h"
|
||||
#include "vpx/vpx_decoder.h"
|
||||
#include "vpx/vp8cx.h"
|
||||
#include "vpx/vp8dx.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "module_common_types.h"
|
||||
|
||||
#define VP8_FREQ_HZ 90000
|
||||
//#define DEV_PIC_LOSS
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VP8Encoder::VP8Encoder():
|
||||
_encodedImage(),
|
||||
_encodedCompleteCallback(NULL),
|
||||
_width(0),
|
||||
_height(0),
|
||||
_maxBitRateKbit(0),
|
||||
_inited(false),
|
||||
_pictureID(0),
|
||||
_pictureLossIndicationOn(false),
|
||||
_feedbackModeOn(false),
|
||||
_nextRefIsGolden(true),
|
||||
_lastAcknowledgedIsGolden(true),
|
||||
_haveReceivedAcknowledgement(false),
|
||||
_pictureIDLastSentRef(0),
|
||||
_pictureIDLastAcknowledgedRef(0),
|
||||
_encoder(NULL),
|
||||
_cfg(NULL),
|
||||
_raw(NULL)
|
||||
{
|
||||
srand((WebRtc_UWord32)TickTime::MillisecondTimestamp());
|
||||
}
|
||||
|
||||
VP8Encoder::~VP8Encoder()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::VersionStatic(WebRtc_Word8* version, WebRtc_Word32 length)
|
||||
{
|
||||
const WebRtc_Word8* str = "WebM/VP8 version 1.0.0\n"; // Bali
|
||||
WebRtc_Word32 verLen = (WebRtc_Word32)strlen(str);
|
||||
if (verLen > length)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
strncpy(version, str, length);
|
||||
return verLen;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::Version(WebRtc_Word8 *version, WebRtc_Word32 length) const
|
||||
{
|
||||
return VersionStatic(version, length);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::Release()
|
||||
{
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
}
|
||||
if (_encoder != NULL)
|
||||
{
|
||||
if (vpx_codec_destroy(_encoder))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
delete _encoder;
|
||||
_encoder = NULL;
|
||||
}
|
||||
if (_cfg != NULL)
|
||||
{
|
||||
delete _cfg;
|
||||
_cfg = NULL;
|
||||
}
|
||||
if (_raw != NULL)
|
||||
{
|
||||
vpx_img_free(_raw);
|
||||
delete _raw;
|
||||
_raw = NULL;
|
||||
}
|
||||
_inited = false;
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::Reset()
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
// reinitialize encoder to initial state
|
||||
if (vpx_codec_enc_init(_encoder, vpx_codec_vp8_cx(), _cfg, 0))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::SetRates(WebRtc_UWord32 newBitRateKbit, WebRtc_UWord32 newFrameRate)
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (_encoder->err)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
if (newFrameRate < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
// update bit rate
|
||||
if (_maxBitRateKbit > 0 && newBitRateKbit > static_cast<WebRtc_UWord32>(_maxBitRateKbit))
|
||||
{
|
||||
newBitRateKbit = _maxBitRateKbit;
|
||||
}
|
||||
_cfg->rc_target_bitrate = newBitRateKbit; // in kbit/s
|
||||
|
||||
// update frame rate
|
||||
if (newFrameRate != _maxFrameRate)
|
||||
{
|
||||
_maxFrameRate = static_cast<int>(newFrameRate);
|
||||
_cfg->g_timebase.num = 1;
|
||||
_cfg->g_timebase.den = _maxFrameRate;//VP8_FREQ_HZ;
|
||||
}
|
||||
|
||||
// update encoder context
|
||||
if (vpx_codec_enc_config_set(_encoder, _cfg))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::InitEncode(const VideoCodec* inst,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
WebRtc_UWord32 /*maxPayloadSize */)
|
||||
{
|
||||
if (inst == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (inst->maxFramerate < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (inst->startBitrate < 0 || inst->maxBitrate < 0)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
// allow zero to represent an unspecified maxBitRate
|
||||
if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (inst->width < 1 || inst->height < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (numberOfCores < 1)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
#ifdef DEV_PIC_LOSS
|
||||
// we need to know if we use feedback
|
||||
_feedbackModeOn = inst->codecSpecific.VP8.feedbackModeOn;
|
||||
_pictureLossIndicationOn = inst->codecSpecific.VP8.pictureLossIndicationOn;
|
||||
#endif
|
||||
|
||||
WebRtc_Word32 retVal = Release();
|
||||
if (retVal < 0)
|
||||
{
|
||||
return retVal;
|
||||
}
|
||||
if (_encoder == NULL)
|
||||
{
|
||||
_encoder = new vpx_codec_ctx_t;
|
||||
}
|
||||
if (_cfg == NULL)
|
||||
{
|
||||
_cfg = new vpx_codec_enc_cfg_t;
|
||||
}
|
||||
if (_raw == NULL)
|
||||
{
|
||||
_raw = new vpx_image_t;
|
||||
}
|
||||
|
||||
_maxBitRateKbit = inst->maxBitrate;
|
||||
_maxFrameRate = inst->maxFramerate;
|
||||
_width = inst->width;
|
||||
_height = inst->height;
|
||||
|
||||
// random start 16 bits is enough
|
||||
_pictureID = (WebRtc_UWord16)rand();
|
||||
|
||||
// allocate memory for encoded image
|
||||
if (_encodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _encodedImage._buffer;
|
||||
}
|
||||
_encodedImage._size = (3 * inst->width * inst->height) >> 1;
|
||||
_encodedImage._buffer = new WebRtc_UWord8[_encodedImage._size];
|
||||
if (_encodedImage._buffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
|
||||
vpx_img_alloc(_raw, IMG_FMT_I420, inst->width, inst->height, 1);
|
||||
// populate encoder configuration with default values
|
||||
if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), _cfg, 0))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
_cfg->g_w = inst->width;
|
||||
_cfg->g_h = inst->height;
|
||||
if (_maxBitRateKbit > 0 && inst->startBitrate > static_cast<unsigned int>(_maxBitRateKbit))
|
||||
{
|
||||
_cfg->rc_target_bitrate = _maxBitRateKbit;
|
||||
}
|
||||
else
|
||||
{
|
||||
_cfg->rc_target_bitrate = inst->startBitrate; // in kbit/s
|
||||
}
|
||||
|
||||
// setting the time base of the codec
|
||||
_cfg->g_timebase.num = 1;
|
||||
_cfg->g_timebase.den = _maxFrameRate;
|
||||
|
||||
_cfg->g_error_resilient = 1; //enabled
|
||||
_cfg->g_lag_in_frames = 0; // 0- no frame lagging
|
||||
|
||||
_cfg->g_threads = numberOfCores;
|
||||
|
||||
// rate control settings
|
||||
_cfg->rc_dropframe_thresh = 0;
|
||||
_cfg->rc_end_usage = VPX_CBR;
|
||||
_cfg->g_pass = VPX_RC_ONE_PASS;
|
||||
_cfg->rc_resize_allowed = 0;
|
||||
_cfg->rc_min_quantizer = 4;
|
||||
_cfg->rc_max_quantizer = 56;
|
||||
_cfg->rc_undershoot_pct = 98;
|
||||
_cfg->rc_buf_initial_sz = 500;
|
||||
_cfg->rc_buf_optimal_sz = 600;
|
||||
_cfg->rc_buf_sz = 1000;
|
||||
|
||||
|
||||
#ifdef DEV_PIC_LOSS
|
||||
// this can only be off if we know we use feedback
|
||||
if (_pictureLossIndicationOn)
|
||||
{
|
||||
_cfg->kf_mode = VPX_KF_DISABLED; // don't generate key frame unless we tell you
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
_cfg->kf_mode = VPX_KF_AUTO;
|
||||
_cfg->kf_max_dist = 300;
|
||||
}
|
||||
|
||||
// construct encoder context
|
||||
if (vpx_codec_enc_init(_encoder, vpx_codec_vp8_cx(), _cfg, 0))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
switch (inst->codecSpecific.VP8.complexity)
|
||||
{
|
||||
case kComplexityHigh:
|
||||
{
|
||||
vpx_codec_control(_encoder, VP8E_SET_CPUUSED, -5);
|
||||
break;
|
||||
}
|
||||
case kComplexityHigher:
|
||||
{
|
||||
vpx_codec_control(_encoder, VP8E_SET_CPUUSED, -4);
|
||||
break;
|
||||
}
|
||||
case kComplexityMax:
|
||||
{
|
||||
vpx_codec_control(_encoder, VP8E_SET_CPUUSED, -3);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
vpx_codec_control(_encoder, VP8E_SET_CPUUSED, -6);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_inited = true;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::Encode(const RawImage& inputImage,
|
||||
const void* codecSpecificInfo,
|
||||
VideoFrameType frameTypes)
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (inputImage._buffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (_encodedCompleteCallback == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
|
||||
// image in vpx_image_t format
|
||||
_raw->planes[PLANE_Y] = inputImage._buffer;
|
||||
_raw->planes[PLANE_U] = &inputImage._buffer[_height * _width];
|
||||
_raw->planes[PLANE_V] = &inputImage._buffer[_height * _width * 5 >> 2];
|
||||
|
||||
int flags = 0;
|
||||
if (frameTypes == kKeyFrame)
|
||||
{
|
||||
flags |= VPX_EFLAG_FORCE_KF; // will update both golden and altref
|
||||
_encodedImage._frameType = kKeyFrame;
|
||||
_pictureIDLastSentRef = _pictureID;
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef DEV_PIC_LOSS
|
||||
if (_feedbackModeOn && codecSpecificInfo)
|
||||
{
|
||||
const CodecSpecificInfo* info = static_cast<const CodecSpecificInfo*>(codecSpecificInfo);
|
||||
if (info->codecType == kVideoCodecVP8)
|
||||
{
|
||||
// codecSpecificInfo will contain received RPSI and SLI picture IDs
|
||||
// this will help us decide on when to switch type of reference frame
|
||||
|
||||
// if we receive SLI
|
||||
// force using an old golden or altref as a reference
|
||||
|
||||
if (info->codecSpecific.VP8.hasReceivedSLI)
|
||||
{
|
||||
// if this is older than my last acked ref we can ignore it
|
||||
// info->codecSpecific.VP8.pictureIdSLI valid 6 bits => 64 frames
|
||||
|
||||
// since picture id can wrap check if in between our last sent and last acked
|
||||
|
||||
bool sendRefresh = false;
|
||||
// check for a wrap in picture ID
|
||||
if ((_pictureIDLastAcknowledgedRef & 0x3f) > (_pictureID & 0x3f))
|
||||
{
|
||||
// we have a wrap
|
||||
if ( info->codecSpecific.VP8.pictureIdSLI > (_pictureIDLastAcknowledgedRef&0x3f)||
|
||||
info->codecSpecific.VP8.pictureIdSLI < (_pictureID & 0x3f))
|
||||
{
|
||||
sendRefresh = true;
|
||||
}
|
||||
}
|
||||
else if (info->codecSpecific.VP8.pictureIdSLI > (_pictureIDLastAcknowledgedRef&0x3f)&&
|
||||
info->codecSpecific.VP8.pictureIdSLI < (_pictureID & 0x3f))
|
||||
{
|
||||
sendRefresh = true;
|
||||
}
|
||||
|
||||
// right now we could also ignore it if it's older than our last sent ref since
|
||||
// last sent ref only refers back to last acked
|
||||
// _pictureIDLastSentRef;
|
||||
if (sendRefresh)
|
||||
{
|
||||
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame
|
||||
|
||||
if (_haveReceivedAcknowledgement)
|
||||
{
|
||||
// we cant set this if we refer to a key frame
|
||||
if (_lastAcknowledgedIsGolden)
|
||||
{
|
||||
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alternate reference frame
|
||||
}
|
||||
else
|
||||
{
|
||||
flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (info->codecSpecific.VP8.hasReceivedRPSI)
|
||||
{
|
||||
if ((info->codecSpecific.VP8.pictureIdRPSI & 0x3fff) == (_pictureIDLastSentRef & 0x3fff)) // compare 14 bits
|
||||
{
|
||||
// remote peer have received our last reference frame
|
||||
// switch frame type
|
||||
_haveReceivedAcknowledgement = true;
|
||||
_nextRefIsGolden = !_nextRefIsGolden;
|
||||
_pictureIDLastAcknowledgedRef = _pictureIDLastSentRef;
|
||||
}
|
||||
}
|
||||
}
|
||||
const WebRtc_UWord16 periodX = 64; // we need a period X to decide on the distance between golden and altref
|
||||
if (_pictureID % periodX == 0)
|
||||
{
|
||||
// only required if we have had a loss
|
||||
// however we don't acknowledge a SLI so if that is lost it's no good
|
||||
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame
|
||||
|
||||
if (_nextRefIsGolden)
|
||||
{
|
||||
flags |= VP8_EFLAG_FORCE_GF; // force a golden
|
||||
flags |= VP8_EFLAG_NO_UPD_ARF; // don't update altref
|
||||
if (_haveReceivedAcknowledgement)
|
||||
{
|
||||
// we can't set this if we refer to a key frame
|
||||
// pw temporary as proof of concept
|
||||
flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
flags |= VP8_EFLAG_FORCE_ARF; // force an altref
|
||||
flags |= VP8_EFLAG_NO_UPD_GF; // Don't update golden
|
||||
if (_haveReceivedAcknowledgement)
|
||||
{
|
||||
// we can't set this if we refer to a key frame
|
||||
// pw temporary as proof of concept
|
||||
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alternate reference frame
|
||||
}
|
||||
}
|
||||
// remember our last reference frame
|
||||
_pictureIDLastSentRef = _pictureID;
|
||||
}
|
||||
else
|
||||
{
|
||||
flags |= VP8_EFLAG_NO_UPD_GF; // don't update golden
|
||||
flags |= VP8_EFLAG_NO_UPD_ARF; // don't update altref
|
||||
}
|
||||
}
|
||||
#endif
|
||||
_encodedImage._frameType = kDeltaFrame;
|
||||
}
|
||||
|
||||
if (vpx_codec_encode(_encoder, _raw, _maxFrameRate * inputImage._timeStamp / VP8_FREQ_HZ, 1, flags, VPX_DL_REALTIME))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
const vpx_codec_cx_pkt_t *pkt= vpx_codec_get_cx_data(_encoder, &iter); // no lagging => 1 frame at a time
|
||||
if (pkt == NULL && !_encoder->err)
|
||||
{
|
||||
// dropped frame
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
else if (pkt->kind == VPX_CODEC_CX_FRAME_PKT)
|
||||
{
|
||||
// attach Picture ID
|
||||
// we use 14 bits generating 1 or 2 bytes
|
||||
// TODO(hlundin): update to follow latest RTP spec
|
||||
WebRtc_UWord8 pictureIdSize = 2;
|
||||
// TODO(hlundin): we should refactor this so that the pictureID is
|
||||
// signaled through a codec specific struct and added in the RTP module.
|
||||
if (_pictureID > 0x7f)
|
||||
{
|
||||
// more than 7 bits
|
||||
_encodedImage._buffer[0] = 0x80 | (WebRtc_UWord8)(_pictureID >> 7);
|
||||
_encodedImage._buffer[1] = (WebRtc_UWord8)(_pictureID & 0x7f);
|
||||
}
|
||||
else
|
||||
{
|
||||
_encodedImage._buffer[0] = (WebRtc_UWord8)_pictureID;
|
||||
pictureIdSize = 1;
|
||||
}
|
||||
|
||||
memcpy(_encodedImage._buffer+pictureIdSize, pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
_encodedImage._length = WebRtc_UWord32(pkt->data.frame.sz) + pictureIdSize;
|
||||
_encodedImage._encodedHeight = _raw->h;
|
||||
_encodedImage._encodedWidth = _raw->w;
|
||||
|
||||
// check if encoded frame is a key frame
|
||||
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
|
||||
{
|
||||
_encodedImage._frameType = kKeyFrame;
|
||||
}
|
||||
|
||||
if (_encodedImage._length > 0)
|
||||
{
|
||||
_encodedImage._timeStamp = inputImage._timeStamp;
|
||||
|
||||
// Figure out where partition boundaries are located.
|
||||
RTPFragmentationHeader fragInfo;
|
||||
fragInfo.VerifyAndAllocateFragmentationHeader(2); // two partitions: 1st and 2nd
|
||||
|
||||
// First partition
|
||||
fragInfo.fragmentationOffset[0] = 0;
|
||||
WebRtc_UWord8 *firstByte = &_encodedImage._buffer[pictureIdSize];
|
||||
WebRtc_UWord32 tmpSize = (firstByte[2] << 16) | (firstByte[1] << 8)
|
||||
| firstByte[0];
|
||||
fragInfo.fragmentationLength[0] = (tmpSize >> 5) & 0x7FFFF;
|
||||
// Let the PictureID belong to the first partition.
|
||||
fragInfo.fragmentationLength[0] += pictureIdSize;
|
||||
fragInfo.fragmentationPlType[0] = 0; // not known here
|
||||
fragInfo.fragmentationTimeDiff[0] = 0;
|
||||
|
||||
// Second partition
|
||||
fragInfo.fragmentationOffset[1] = fragInfo.fragmentationLength[0];
|
||||
fragInfo.fragmentationLength[1] = _encodedImage._length -
|
||||
fragInfo.fragmentationLength[0];
|
||||
fragInfo.fragmentationPlType[1] = 0; // not known here
|
||||
fragInfo.fragmentationTimeDiff[1] = 0;
|
||||
|
||||
_encodedCompleteCallback->Encoded(_encodedImage, NULL, &fragInfo);
|
||||
}
|
||||
|
||||
_pictureID++; // prepare next
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::SetPacketLoss(WebRtc_UWord32 packetLoss)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback)
|
||||
{
|
||||
_encodedCompleteCallback = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
VP8Decoder::VP8Decoder():
|
||||
_inited(false),
|
||||
_feedbackModeOn(false),
|
||||
_decoder(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
VP8Decoder::~VP8Decoder()
|
||||
{
|
||||
_inited = true; // in order to do the actual release
|
||||
Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Decoder::Reset()
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
InitDecode(NULL, 1);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Decoder::InitDecode(const VideoCodec* inst,
|
||||
WebRtc_Word32 numberOfCores)
|
||||
{
|
||||
vp8_postproc_cfg_t ppcfg;
|
||||
WebRtc_Word32 retVal = Release();
|
||||
if (retVal < 0 )
|
||||
{
|
||||
return retVal;
|
||||
}
|
||||
if (_decoder == NULL)
|
||||
{
|
||||
_decoder = new vpx_dec_ctx_t;
|
||||
}
|
||||
#ifdef DEV_PIC_LOSS
|
||||
if(inst && inst->codecType == kVideoCodecVP8)
|
||||
{
|
||||
_feedbackModeOn = inst->codecSpecific.VP8.feedbackModeOn;
|
||||
}
|
||||
#endif
|
||||
|
||||
vpx_codec_dec_cfg_t cfg;
|
||||
cfg.threads = numberOfCores;
|
||||
cfg.h = cfg.w = 0; // set after decode
|
||||
|
||||
if(vpx_codec_dec_init(_decoder, vpx_codec_vp8_dx(), NULL, 0))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
// config post-processing settings for decoder
|
||||
ppcfg.post_proc_flag = VP8_DEBLOCK;
|
||||
ppcfg.deblocking_level = 5; //Strength of deblocking filter. Valid range:[0,16]
|
||||
//ppcfg.NoiseLevel = 1; //Noise intensity. Valid range: [0,7]
|
||||
vpx_codec_control(_decoder, VP8_SET_POSTPROC, &ppcfg);
|
||||
|
||||
_inited = true;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Decoder::Decode(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const void* /*codecSpecificInfo*/,
|
||||
WebRtc_Word64 /*renderTimeMs*/)
|
||||
{
|
||||
if (!_inited)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (inputImage._buffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (_decodeCompleteCallback == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (inputImage._length <= 0)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
if (inputImage._completeFrame == false)
|
||||
{
|
||||
// future improvement
|
||||
// we can't decode this frame
|
||||
if (_feedbackModeOn)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI;
|
||||
}
|
||||
else
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
vpx_dec_iter_t _iter = NULL;
|
||||
vpx_image_t* img;
|
||||
WebRtc_UWord64 pictureID = 0;
|
||||
|
||||
// scan for number of bytes used for picture ID
|
||||
WebRtc_UWord8 numberOfBytes;
|
||||
for (numberOfBytes = 0;(inputImage._buffer[numberOfBytes] & 0x80 )&& numberOfBytes < 8; numberOfBytes++)
|
||||
{
|
||||
pictureID += inputImage._buffer[numberOfBytes] & 0x7f;
|
||||
pictureID <<= 7;
|
||||
}
|
||||
pictureID += inputImage._buffer[numberOfBytes] & 0x7f;
|
||||
numberOfBytes++;
|
||||
|
||||
// check for missing frames
|
||||
if (missingFrames)
|
||||
{
|
||||
// call decoder with zero data length to signal missing frames
|
||||
if (vpx_codec_decode(_decoder, NULL, 0, 0, VPX_DL_REALTIME))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// we remove the picture ID here
|
||||
if (vpx_codec_decode(_decoder,
|
||||
inputImage._buffer+numberOfBytes,
|
||||
inputImage._length-numberOfBytes,
|
||||
0,
|
||||
VPX_DL_REALTIME))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
int lastRefUpdates = 0;
|
||||
#ifdef DEV_PIC_LOSS
|
||||
if (vpx_codec_control(_decoder, VP8D_GET_LAST_REF_UPDATES, &lastRefUpdates))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
int corrupted = 0;
|
||||
if (vpx_codec_control(_decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
#endif
|
||||
|
||||
img = vpx_codec_get_frame(_decoder, &_iter);
|
||||
|
||||
// Allocate memory for decoded image
|
||||
WebRtc_UWord32 requiredSize = (3*img->h*img->w) >> 1;
|
||||
if (_decodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _decodedImage._buffer;
|
||||
_decodedImage._buffer = NULL;
|
||||
}
|
||||
if (_decodedImage._buffer == NULL)
|
||||
{
|
||||
_decodedImage._size = requiredSize;
|
||||
_decodedImage._buffer = new WebRtc_UWord8[_decodedImage._size];
|
||||
if (_decodedImage._buffer == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord8* buf;
|
||||
WebRtc_UWord32 locCnt = 0;
|
||||
WebRtc_UWord32 plane, y;
|
||||
|
||||
for (plane = 0; plane < 3; plane++)
|
||||
{
|
||||
buf = img->planes[plane];
|
||||
WebRtc_UWord32 shiftFactor = plane ? 1 : 0;
|
||||
for(y = 0; y < img->d_h >> shiftFactor; y++)
|
||||
{
|
||||
memcpy(&_decodedImage._buffer[locCnt], buf, img->d_w >> shiftFactor);
|
||||
locCnt += img->d_w >> shiftFactor;
|
||||
buf += img->stride[plane];
|
||||
}
|
||||
}
|
||||
|
||||
// Set image parameters
|
||||
_decodedImage._height = img->d_h;
|
||||
_decodedImage._width = img->d_w;
|
||||
_decodedImage._length = (3 * img->d_h * img->d_w) >> 1;
|
||||
_decodedImage._timeStamp = inputImage._timeStamp;
|
||||
_decodeCompleteCallback->Decoded(_decodedImage);
|
||||
|
||||
// we need to communicate that we should send a RPSI with a specific picture ID
|
||||
|
||||
// TODO(pw): how do we know it's a golden or alt reference frame? On2 will provide an API
|
||||
// for now I added it temporarily
|
||||
if((lastRefUpdates & VP8_GOLD_FRAME) || (lastRefUpdates & VP8_ALTR_FRAME))
|
||||
{
|
||||
if (!missingFrames && (inputImage._completeFrame == true))
|
||||
//if (!corrupted) // TODO(pw): Can we engage this line intead of the above?
|
||||
{
|
||||
_decodeCompleteCallback->ReceivedDecodedReferenceFrame(pictureID);
|
||||
}
|
||||
}
|
||||
_decodeCompleteCallback->ReceivedDecodedFrame(pictureID);
|
||||
|
||||
#ifdef DEV_PIC_LOSS
|
||||
if (corrupted)
|
||||
{
|
||||
// we can decode but with artifacts
|
||||
return WEBRTC_VIDEO_CODEC_REQUEST_SLI;
|
||||
}
|
||||
#endif
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Decoder::RegisterDecodeCompleteCallback(DecodedImageCallback* callback)
|
||||
{
|
||||
_decodeCompleteCallback = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8Decoder::Release()
|
||||
{
|
||||
if (_decodedImage._buffer != NULL)
|
||||
{
|
||||
delete [] _decodedImage._buffer;
|
||||
_decodedImage._buffer = NULL;
|
||||
}
|
||||
if (_decoder != NULL)
|
||||
{
|
||||
if(vpx_codec_destroy(_decoder))
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
delete _decoder;
|
||||
_decoder = NULL;
|
||||
}
|
||||
|
||||
_inited = false;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
}
|
||||
94
modules/video_coding/codecs/vp8/main/source/vp8.gyp
Normal file
94
modules/video_coding/codecs/vp8/main/source/vp8.gyp
Normal file
@ -0,0 +1,94 @@
|
||||
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../../../../../../common_settings.gypi', # Common settings
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'webrtc_vp8',
|
||||
'type': '<(library)',
|
||||
'dependencies': [
|
||||
'../../../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
'../../../../../interface',
|
||||
],
|
||||
'conditions': [
|
||||
['build_with_chromium==1', {
|
||||
'conditions': [
|
||||
['OS=="win"', {
|
||||
'dependencies': [
|
||||
# We don't want to link with the static library inside Chromium
|
||||
# on Windows. Chromium uses the ffmpeg DLL and exports the
|
||||
# necessary libvpx symbols for us.
|
||||
'../../../../../../../libvpx/libvpx.gyp:libvpx_include',
|
||||
],
|
||||
},{
|
||||
'dependencies': [
|
||||
'../../../../../../../libvpx/libvpx.gyp:libvpx',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../../../../../../../libvpx/source/libvpx',
|
||||
],
|
||||
}],
|
||||
],
|
||||
},{
|
||||
'dependencies': [
|
||||
'../../../../../../../third_party/libvpx/libvpx.gyp:libvpx',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../../../../../../../third_party/libvpx',
|
||||
],
|
||||
}],
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
'../interface/vp8.h',
|
||||
'vp8.cc',
|
||||
],
|
||||
},
|
||||
|
||||
{
|
||||
'target_name': 'vp8_test',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'webrtc_vp8',
|
||||
'../../../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'../../../test_framework/test_framework.gyp:test_framework',
|
||||
'../../../../../../common_video/vplib/main/source/vplib.gyp:webrtc_vplib'
|
||||
],
|
||||
'sources': [
|
||||
# header files
|
||||
'../test/benchmark.h',
|
||||
'../test/normal_async_test.h',
|
||||
'../test/packet_loss_test.h',
|
||||
'../test/unit_test.h',
|
||||
'../test/dual_decoder_test.h',
|
||||
|
||||
# source files
|
||||
'../test/benchmark.cc',
|
||||
'../test/normal_async_test.cc',
|
||||
'../test/packet_loss_test.cc',
|
||||
'../test/tester.cc',
|
||||
'../test/unit_test.cc',
|
||||
'../test/dual_decoder_test.cc',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
||||
44
modules/video_coding/codecs/vp8/main/test/benchmark.cc
Normal file
44
modules/video_coding/codecs/vp8/main/test/benchmark.cc
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "benchmark.h"
|
||||
#include "vp8.h"
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
VP8Benchmark::VP8Benchmark()
|
||||
:
|
||||
Benchmark("VP8Benchmark", "VP8 benchmark over a range of test cases", "../../VP8Benchmark.txt", "VP8")
|
||||
{
|
||||
}
|
||||
|
||||
VP8Benchmark::VP8Benchmark(std::string name, std::string description)
|
||||
:
|
||||
Benchmark(name, description, "../../VP8Benchmark.txt", "VP8")
|
||||
{
|
||||
}
|
||||
|
||||
VP8Benchmark::VP8Benchmark(std::string name, std::string description, std::string resultsFileName)
|
||||
:
|
||||
Benchmark(name, description, resultsFileName, "VP8")
|
||||
{
|
||||
}
|
||||
|
||||
VideoEncoder*
|
||||
VP8Benchmark::GetNewEncoder()
|
||||
{
|
||||
return new VP8Encoder();
|
||||
}
|
||||
|
||||
VideoDecoder*
|
||||
VP8Benchmark::GetNewDecoder()
|
||||
{
|
||||
return new VP8Decoder();
|
||||
}
|
||||
28
modules/video_coding/codecs/vp8/main/test/benchmark.h
Normal file
28
modules/video_coding/codecs/vp8/main/test/benchmark.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_BENCHMARK_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_BENCHMARK_H_
|
||||
|
||||
#include "../../../test_framework/benchmark.h"
|
||||
|
||||
class VP8Benchmark : public Benchmark
|
||||
{
|
||||
public:
|
||||
VP8Benchmark();
|
||||
VP8Benchmark(std::string name, std::string description);
|
||||
VP8Benchmark(std::string name, std::string description, std::string resultsFileName);
|
||||
|
||||
protected:
|
||||
virtual webrtc::VideoEncoder* GetNewEncoder();
|
||||
virtual webrtc::VideoDecoder* GetNewDecoder();
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_BENCHMARK_H_
|
||||
219
modules/video_coding/codecs/vp8/main/test/dual_decoder_test.cc
Normal file
219
modules/video_coding/codecs/vp8/main/test/dual_decoder_test.cc
Normal file
@ -0,0 +1,219 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "dual_decoder_test.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h> // memcmp
|
||||
#include <time.h>
|
||||
|
||||
VP8DualDecoderTest::VP8DualDecoderTest(float bitRate)
|
||||
:
|
||||
VP8NormalAsyncTest(bitRate)
|
||||
{
|
||||
_decoder2 = NULL;
|
||||
}
|
||||
|
||||
VP8DualDecoderTest::VP8DualDecoderTest()
|
||||
:
|
||||
_decoder2(NULL),
|
||||
VP8NormalAsyncTest("VP8 Dual Decoder Test", "Tests VP8 dual decoder", 1)
|
||||
{}
|
||||
|
||||
VP8DualDecoderTest::~VP8DualDecoderTest()
|
||||
{
|
||||
if(_decoder2)
|
||||
{
|
||||
_decoder2->Release();
|
||||
delete _decoder2;
|
||||
}
|
||||
|
||||
_decodedVideoBuffer2.Free();
|
||||
}
|
||||
|
||||
void
|
||||
VP8DualDecoderTest::Perform()
|
||||
{
|
||||
_inname = "test/testFiles/foreman_cif.yuv";
|
||||
CodecSettings(352, 288, 30, _bitRate);
|
||||
Setup();
|
||||
_inputVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_decodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
|
||||
_decodedVideoBuffer2.VerifyAndAllocate(_lengthSourceFrame);
|
||||
if(_encoder->InitEncode(&_inst, 4, 1460) < 0)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_decoder->InitDecode(&_inst,1);
|
||||
|
||||
FrameQueue frameQueue;
|
||||
VideoEncodeCompleteCallback encCallback(_encodedFile, &frameQueue, *this);
|
||||
DualDecoderCompleteCallback decCallback(&_decodedVideoBuffer);
|
||||
DualDecoderCompleteCallback decCallback2(&_decodedVideoBuffer2);
|
||||
_encoder->RegisterEncodeCompleteCallback(&encCallback);
|
||||
_decoder->RegisterDecodeCompleteCallback(&decCallback);
|
||||
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
_totalEncodeTime = _totalDecodeTime = 0;
|
||||
_totalEncodePipeTime = _totalDecodePipeTime = 0;
|
||||
bool complete = false;
|
||||
_framecnt = 0;
|
||||
_encFrameCnt = 0;
|
||||
_decFrameCnt = 0;
|
||||
_sumEncBytes = 0;
|
||||
_lengthEncFrame = 0;
|
||||
double starttime = clock()/(double)CLOCKS_PER_SEC;
|
||||
while (!complete)
|
||||
{
|
||||
if (_encFrameCnt == 10)
|
||||
{
|
||||
// initialize second decoder and copy state
|
||||
_decoder2 = static_cast<webrtc::VP8Decoder *>(_decoder->Copy());
|
||||
assert(_decoder2 != NULL);
|
||||
_decoder2->RegisterDecodeCompleteCallback(&decCallback2);
|
||||
}
|
||||
CodecSpecific_InitBitrate();
|
||||
complete = Encode();
|
||||
if (!frameQueue.Empty() || complete)
|
||||
{
|
||||
while (!frameQueue.Empty())
|
||||
{
|
||||
_frameToDecode =
|
||||
static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
|
||||
int lost = DoPacketLoss();
|
||||
if (lost == 2)
|
||||
{
|
||||
// Lost the whole frame, continue
|
||||
_missingFrames = true;
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
continue;
|
||||
}
|
||||
int ret = Decode(lost);
|
||||
delete _frameToDecode;
|
||||
_frameToDecode = NULL;
|
||||
if (ret < 0)
|
||||
{
|
||||
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
else if (ret == 0)
|
||||
{
|
||||
_framecnt++;
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr,
|
||||
"\n\nPositive return value from decode!\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
double endtime = clock()/(double)CLOCKS_PER_SEC;
|
||||
double totalExecutionTime = endtime - starttime;
|
||||
printf("Total execution time: %.1f s\n", totalExecutionTime);
|
||||
_sumEncBytes = encCallback.EncodedBytes();
|
||||
double actualBitRate = ActualBitRate(_encFrameCnt) / 1000.0;
|
||||
double avgEncTime = _totalEncodeTime / _encFrameCnt;
|
||||
double avgDecTime = _totalDecodeTime / _decFrameCnt;
|
||||
printf("Actual bitrate: %f kbps\n", actualBitRate);
|
||||
printf("Average encode time: %.1f ms\n", 1000 * avgEncTime);
|
||||
printf("Average decode time: %.1f ms\n", 1000 * avgDecTime);
|
||||
printf("Average encode pipeline time: %.1f ms\n",
|
||||
1000 * _totalEncodePipeTime / _encFrameCnt);
|
||||
printf("Average decode pipeline time: %.1f ms\n",
|
||||
1000 * _totalDecodePipeTime / _decFrameCnt);
|
||||
printf("Number of encoded frames: %u\n", _encFrameCnt);
|
||||
printf("Number of decoded frames: %u\n", _decFrameCnt);
|
||||
(*_log) << "Actual bitrate: " << actualBitRate << " kbps\tTarget: " <<
|
||||
_bitRate << " kbps" << std::endl;
|
||||
(*_log) << "Average encode time: " << avgEncTime << " s" << std::endl;
|
||||
(*_log) << "Average decode time: " << avgDecTime << " s" << std::endl;
|
||||
_encoder->Release();
|
||||
_decoder->Release();
|
||||
Teardown();
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
VP8DualDecoderTest::Decode(int lossValue)
|
||||
{
|
||||
_sumEncBytes += _frameToDecode->_frame->GetLength();
|
||||
double starttime = 0;
|
||||
webrtc::EncodedImage encodedImage;
|
||||
VideoEncodedBufferToEncodedImage(*(_frameToDecode->_frame), encodedImage);
|
||||
encodedImage._completeFrame = !lossValue;
|
||||
_decodeCompleteTime = 0;
|
||||
_decodeTimes[encodedImage._timeStamp] = clock()/(double)CLOCKS_PER_SEC;
|
||||
int ret = _decoder->Decode(encodedImage, _missingFrames,
|
||||
_frameToDecode->_codecSpecificInfo);
|
||||
// second decoder
|
||||
if (_decoder2)
|
||||
{
|
||||
int ret2 = _decoder2->Decode(encodedImage, _missingFrames,
|
||||
_frameToDecode->_codecSpecificInfo, 0 /* dummy */);
|
||||
|
||||
// check return values
|
||||
if (ret < 0 || ret2 < 0 || ret2 != ret)
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// compare decoded images
|
||||
if (!CheckIfBitExact(_decodedVideoBuffer.GetBuffer(),
|
||||
_decodedVideoBuffer.GetLength(),
|
||||
_decodedVideoBuffer2.GetBuffer(), _decodedVideoBuffer.GetLength()))
|
||||
{
|
||||
fprintf(stderr,"\n\nClone output different from master.\n\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
_missingFrames = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VP8DualDecoderTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
|
||||
const void* ptrB, unsigned int bLengthBytes)
|
||||
{
|
||||
if (aLengthBytes != bLengthBytes)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return memcmp(ptrA, ptrB, aLengthBytes) == 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 DualDecoderCompleteCallback::Decoded(webrtc::RawImage& image)
|
||||
{
|
||||
_decodedVideoBuffer->VerifyAndAllocate(image._length);
|
||||
_decodedVideoBuffer->CopyBuffer(image._length, image._buffer);
|
||||
_decodedVideoBuffer->SetWidth(image._width);
|
||||
_decodedVideoBuffer->SetHeight(image._height);
|
||||
_decodedVideoBuffer->SetTimeStamp(image._timeStamp);
|
||||
_decodeComplete = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool DualDecoderCompleteCallback::DecodeComplete()
|
||||
{
|
||||
if (_decodeComplete)
|
||||
{
|
||||
_decodeComplete = false;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_DUAL_DECODER_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_DUAL_DECODER_TEST_H_
|
||||
|
||||
#include "vp8.h"
|
||||
#include "normal_async_test.h"
|
||||
|
||||
class DualDecoderCompleteCallback;
|
||||
|
||||
class VP8DualDecoderTest : public VP8NormalAsyncTest
|
||||
{
|
||||
public:
|
||||
VP8DualDecoderTest(float bitRate);
|
||||
VP8DualDecoderTest();
|
||||
virtual ~VP8DualDecoderTest();
|
||||
virtual void Perform();
|
||||
protected:
|
||||
VP8DualDecoderTest(std::string name, std::string description,
|
||||
unsigned int testNo)
|
||||
: VP8NormalAsyncTest(name, description, testNo) {}
|
||||
virtual int Decode(int lossValue = 0);
|
||||
|
||||
webrtc::VP8Decoder* _decoder2;
|
||||
TestVideoBuffer _decodedVideoBuffer2;
|
||||
static bool CheckIfBitExact(const void *ptrA, unsigned int aLengthBytes,
|
||||
const void *ptrB, unsigned int bLengthBytes);
|
||||
private:
|
||||
};
|
||||
|
||||
class DualDecoderCompleteCallback : public webrtc::DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
DualDecoderCompleteCallback(TestVideoBuffer* buffer)
|
||||
: _decodedVideoBuffer(buffer), _decodeComplete(false) {}
|
||||
WebRtc_Word32 Decoded(webrtc::RawImage& decodedImage);
|
||||
bool DecodeComplete();
|
||||
private:
|
||||
TestVideoBuffer* _decodedVideoBuffer;
|
||||
bool _decodeComplete;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "normal_async_test.h"
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
VP8NormalAsyncTest::VP8NormalAsyncTest(WebRtc_UWord32 bitRate) :
|
||||
NormalAsyncTest("VP8 Normal Test 1", "Tests VP8 normal execution", bitRate, 1),
|
||||
_hasReceivedRPSI(false)
|
||||
{
|
||||
}
|
||||
|
||||
VP8NormalAsyncTest::VP8NormalAsyncTest(WebRtc_UWord32 bitRate, unsigned int testNo):
|
||||
NormalAsyncTest("VP8 Normal Test 1", "Tests VP8 normal execution", bitRate, testNo),
|
||||
_hasReceivedRPSI(false)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
VP8NormalAsyncTest::CodecSettings(int width, int height, WebRtc_UWord32 frameRate /*=30*/, WebRtc_UWord32 bitRate /*=0*/)
|
||||
{
|
||||
if (bitRate > 0)
|
||||
{
|
||||
_bitRate = bitRate;
|
||||
|
||||
}else if (_bitRate == 0)
|
||||
{
|
||||
_bitRate = 600;
|
||||
}
|
||||
_inst.codecType = kVideoCodecVP8;
|
||||
_inst.codecSpecific.VP8.feedbackModeOn = true;
|
||||
_inst.codecSpecific.VP8.pictureLossIndicationOn = true;
|
||||
_inst.codecSpecific.VP8.complexity;
|
||||
_inst.maxFramerate = (unsigned char)frameRate;
|
||||
_inst.startBitrate = _bitRate;
|
||||
_inst.maxBitrate = 8000;
|
||||
_inst.width = width;
|
||||
_inst.height = height;
|
||||
}
|
||||
|
||||
void
|
||||
VP8NormalAsyncTest::CodecSpecific_InitBitrate()
|
||||
{
|
||||
if (_bitRate == 0)
|
||||
{
|
||||
_encoder->SetRates(600, _inst.maxFramerate);
|
||||
}else
|
||||
{
|
||||
_encoder->SetRates(_bitRate, _inst.maxFramerate);
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VP8NormalAsyncTest::ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
_pictureIdRPSI = pictureId;
|
||||
_hasReceivedRPSI = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void*
|
||||
VP8NormalAsyncTest::CreateEncoderSpecificInfo() const
|
||||
{
|
||||
CodecSpecificInfo* vp8CodecSpecificInfo = new CodecSpecificInfo();
|
||||
vp8CodecSpecificInfo->codecType = kVideoCodecVP8;
|
||||
vp8CodecSpecificInfo->codecSpecific.VP8.hasReceivedRPSI = _hasReceivedRPSI;
|
||||
vp8CodecSpecificInfo->codecSpecific.VP8.pictureIdRPSI = _pictureIdRPSI;
|
||||
vp8CodecSpecificInfo->codecSpecific.VP8.hasReceivedSLI = false;
|
||||
|
||||
_hasReceivedRPSI = false;
|
||||
|
||||
return vp8CodecSpecificInfo;
|
||||
}
|
||||
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_NORMAL_ASYNC_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_NORMAL_ASYNC_TEST_H_
|
||||
|
||||
#include "../../../test_framework/normal_async_test.h"
|
||||
|
||||
class VP8NormalAsyncTest : public NormalAsyncTest
|
||||
{
|
||||
public:
|
||||
VP8NormalAsyncTest(WebRtc_UWord32 bitRate);
|
||||
VP8NormalAsyncTest(WebRtc_UWord32 bitRate, unsigned int testNo);
|
||||
VP8NormalAsyncTest() : NormalAsyncTest("VP8 Normal Test 1", "Tests VP8 normal execution", 1) {}
|
||||
protected:
|
||||
VP8NormalAsyncTest(std::string name, std::string description, unsigned int testNo) : NormalAsyncTest(name, description, testNo) {}
|
||||
virtual void CodecSpecific_InitBitrate();
|
||||
virtual void CodecSettings(int width, int height, WebRtc_UWord32 frameRate=30, WebRtc_UWord32 bitRate=0);
|
||||
virtual void* CreateEncoderSpecificInfo() const;
|
||||
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId);
|
||||
private:
|
||||
mutable bool _hasReceivedRPSI;
|
||||
WebRtc_UWord64 _pictureIdRPSI;
|
||||
};
|
||||
|
||||
#endif
|
||||
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet_loss_test.h"
|
||||
#include <cassert>
|
||||
|
||||
VP8PacketLossTest::VP8PacketLossTest()
|
||||
:
|
||||
PacketLossTest("VP8PacketLossTest", "Encode, remove lost packets, decode")
|
||||
{
|
||||
}
|
||||
|
||||
VP8PacketLossTest::VP8PacketLossTest(std::string name, std::string description)
|
||||
:
|
||||
PacketLossTest(name, description)
|
||||
{
|
||||
}
|
||||
|
||||
VP8PacketLossTest::VP8PacketLossTest(double lossRate, bool useNack)
|
||||
:
|
||||
PacketLossTest("VP8PacketLossTest", "Encode, remove lost packets, decode", lossRate, useNack)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
VP8PacketLossTest::CodecSpecific_InitBitrate()
|
||||
{
|
||||
assert(_bitRate > 0);
|
||||
WebRtc_UWord32 simulatedBitRate;
|
||||
if (_lossProbability != _lossRate)
|
||||
{
|
||||
// Simulating NACK
|
||||
simulatedBitRate = (WebRtc_UWord32)(_bitRate / (1 + _lossRate));
|
||||
}
|
||||
else
|
||||
{
|
||||
simulatedBitRate = _bitRate;
|
||||
}
|
||||
_encoder->SetRates(simulatedBitRate, _inst.maxFramerate);
|
||||
}
|
||||
|
||||
int VP8PacketLossTest::ByteLoss(int size, unsigned char* /* pkg */, int bytesToLose)
|
||||
{
|
||||
int retLength = size - bytesToLose;
|
||||
if (retLength < 4)
|
||||
{
|
||||
retLength = 4;
|
||||
}
|
||||
return retLength;
|
||||
}
|
||||
29
modules/video_coding/codecs/vp8/main/test/packet_loss_test.h
Normal file
29
modules/video_coding/codecs/vp8/main/test/packet_loss_test.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_PACKET_LOSS_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_PACKET_LOSS_TEST_H_
|
||||
|
||||
#include "../../../test_framework/packet_loss_test.h"
|
||||
|
||||
class VP8PacketLossTest : public PacketLossTest
|
||||
{
|
||||
public:
|
||||
VP8PacketLossTest();
|
||||
VP8PacketLossTest(double lossRate, bool useNack);
|
||||
|
||||
protected:
|
||||
VP8PacketLossTest(std::string name, std::string description);
|
||||
virtual void CodecSpecific_InitBitrate();
|
||||
virtual int ByteLoss(int size, unsigned char *pkg, int bytesToLose);
|
||||
|
||||
};
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_PACKET_LOSS_TEST_H_
|
||||
57
modules/video_coding/codecs/vp8/main/test/tester.cc
Normal file
57
modules/video_coding/codecs/vp8/main/test/tester.cc
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet_loss_test.h"
|
||||
#include "benchmark.h"
|
||||
#include "unit_test.h"
|
||||
#include "normal_async_test.h"
|
||||
#include "dual_decoder_test.h"
|
||||
#include "vp8.h"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
void PopulateTests(std::vector<Test*>* tests)
|
||||
{
|
||||
tests->push_back(new VP8UnitTest());
|
||||
// tests->push_back(new VP8DualDecoderTest());
|
||||
// tests->push_back(new VP8Benchmark());
|
||||
// tests->push_back(new VP8PacketLossTest());
|
||||
// tests->push_back(new VP8NormalAsyncTest());
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
VP8Encoder* enc;
|
||||
VP8Decoder* dec;
|
||||
std::vector<Test*> tests;
|
||||
PopulateTests(&tests);
|
||||
std::fstream log;
|
||||
log.open("../../TestLog.txt", std::fstream::out | std::fstream::app);
|
||||
std::vector<Test*>::iterator it;
|
||||
for (it = tests.begin() ; it < tests.end(); it++)
|
||||
{
|
||||
enc = new VP8Encoder();
|
||||
dec = new VP8Decoder();
|
||||
(*it)->SetEncoder(enc);
|
||||
(*it)->SetDecoder(dec);
|
||||
(*it)->SetLog(&log);
|
||||
(*it)->Perform();
|
||||
(*it)->Print();
|
||||
delete enc;
|
||||
delete dec;
|
||||
delete *it;
|
||||
}
|
||||
log.close();
|
||||
tests.pop_back();
|
||||
return 0;
|
||||
}
|
||||
168
modules/video_coding/codecs/vp8/main/test/unit_test.cc
Normal file
168
modules/video_coding/codecs/vp8/main/test/unit_test.cc
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "unit_test.h"
|
||||
#include "../../../test_framework/video_source.h"
|
||||
#include "vp8.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
using namespace webrtc;
|
||||
|
||||
VP8UnitTest::VP8UnitTest()
|
||||
:
|
||||
UnitTest("VP8UnitTest", "Unit test")
|
||||
{
|
||||
}
|
||||
|
||||
VP8UnitTest::VP8UnitTest(std::string name, std::string description)
|
||||
:
|
||||
UnitTest(name, description)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
VP8UnitTest::Print()
|
||||
{
|
||||
WebRtc_Word8 versionStr[64];
|
||||
|
||||
// GetVersion tests.
|
||||
|
||||
VIDEO_TEST(_encoder->Version(versionStr, sizeof(versionStr)) > 0);
|
||||
// printf("\n%s", versionStr);
|
||||
// UnitTest::Print();
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VP8UnitTest::CodecSpecific_SetBitrate(WebRtc_UWord32 bitRate, WebRtc_UWord32 /*frameRate*/)
|
||||
{
|
||||
bitRate = _encoder->SetRates(bitRate, _inst.maxFramerate);
|
||||
VIDEO_TEST_EXIT_ON_ERR(bitRate >= 0);
|
||||
return bitRate;
|
||||
}
|
||||
|
||||
bool
|
||||
VP8UnitTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
|
||||
const void* ptrB, unsigned int bLengthBytes)
|
||||
{
|
||||
const unsigned char* cPtrA = (const unsigned char*)ptrA;
|
||||
const unsigned char* cPtrB = (const unsigned char*)ptrB;
|
||||
// Skip picture ID before comparing
|
||||
int aSkip = PicIdLength(cPtrA);
|
||||
int bSkip = PicIdLength(cPtrB);
|
||||
return UnitTest::CheckIfBitExact(cPtrA + aSkip, aLengthBytes,
|
||||
cPtrB + bSkip, bLengthBytes);
|
||||
}
|
||||
|
||||
int
|
||||
VP8UnitTest::PicIdLength(const unsigned char* ptr)
|
||||
{
|
||||
WebRtc_UWord8 numberOfBytes;
|
||||
WebRtc_UWord64 pictureID = 0;
|
||||
for (numberOfBytes = 0; (ptr[numberOfBytes] & 0x80) && numberOfBytes < 8; numberOfBytes++)
|
||||
{
|
||||
pictureID += ptr[numberOfBytes] & 0x7f;
|
||||
pictureID <<= 7;
|
||||
}
|
||||
pictureID += ptr[numberOfBytes] & 0x7f;
|
||||
numberOfBytes++;
|
||||
return numberOfBytes;
|
||||
}
|
||||
|
||||
void
|
||||
VP8UnitTest::Perform()
|
||||
{
|
||||
Setup();
|
||||
FILE *outFile = NULL;
|
||||
std::string outFileName;
|
||||
VP8Encoder* enc = (VP8Encoder*)_encoder;
|
||||
VP8Decoder* dec = (VP8Decoder*)_decoder;
|
||||
int frameLength = 0;
|
||||
|
||||
//----- Encoder parameter tests -----
|
||||
//-- Calls before InitEncode() --
|
||||
VIDEO_TEST(enc->Release() == WEBRTC_VIDEO_CODEC_OK);
|
||||
VIDEO_TEST(enc->SetRates(_bitRate, _inst.maxFramerate) == WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
|
||||
VIDEO_TEST(enc->SetRates(_bitRate, _inst.maxFramerate) == WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
// VIDEO_TEST(enc->GetCodecConfigParameters(configParameters, sizeof(configParameters)) ==
|
||||
// WEBRTC_VIDEO_CODEC_UNINITIALIZED);
|
||||
|
||||
|
||||
VideoCodec codecInst;
|
||||
strncpy(codecInst.plName, "VP8", 31);
|
||||
codecInst.plType = 126;
|
||||
codecInst.maxBitrate = 0;
|
||||
codecInst.minBitrate = 0;
|
||||
codecInst.width = 1440;
|
||||
codecInst.height = 1080;
|
||||
codecInst.maxFramerate = 30;
|
||||
codecInst.startBitrate = 300;
|
||||
codecInst.codecSpecific.VP8.complexity = kComplexityNormal;
|
||||
VIDEO_TEST(enc->InitEncode(&codecInst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
|
||||
//-- Test two problematic level settings --
|
||||
strncpy(codecInst.plName, "VP8", 31);
|
||||
codecInst.plType = 126;
|
||||
codecInst.maxBitrate = 0;
|
||||
codecInst.minBitrate = 0;
|
||||
codecInst.width = 352;
|
||||
codecInst.height = 288;
|
||||
codecInst.maxFramerate = 30;
|
||||
codecInst.codecSpecific.VP8.complexity = kComplexityNormal;
|
||||
codecInst.startBitrate = 300;
|
||||
VIDEO_TEST(enc->InitEncode(&codecInst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
// Settings not correct for this profile
|
||||
strncpy(codecInst.plName, "VP8", 31);
|
||||
codecInst.plType = 126;
|
||||
codecInst.maxBitrate = 0;
|
||||
codecInst.minBitrate = 0;
|
||||
codecInst.width = 176;
|
||||
codecInst.height = 144;
|
||||
codecInst.maxFramerate = 15;
|
||||
codecInst.codecSpecific.VP8.complexity = kComplexityNormal;
|
||||
codecInst.startBitrate = 300;
|
||||
//VIDEO_TEST(enc->InitEncode(&codecInst, 1, 1440) == WEBRTC_VIDEO_CODEC_LEVEL_EXCEEDED);
|
||||
|
||||
VIDEO_TEST_EXIT_ON_ERR(enc->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
|
||||
//-- ProcessNewBitrate() errors --
|
||||
// Bad bitrate.
|
||||
VIDEO_TEST(enc->SetRates(_inst.maxBitrate + 1, _inst.maxFramerate) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
// Signaling not used.
|
||||
|
||||
// Bad packetloss.
|
||||
// VIDEO_TEST(enc->SetPacketLoss(300) < 0);
|
||||
|
||||
//----- Decoder parameter tests -----
|
||||
//-- Calls before InitDecode() --
|
||||
VIDEO_TEST(dec->Release() == 0);
|
||||
VIDEO_TEST_EXIT_ON_ERR(dec->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
|
||||
|
||||
//-- SetCodecConfigParameters() errors --
|
||||
unsigned char tmpBuf[128];
|
||||
VIDEO_TEST(dec->SetCodecConfigParameters(NULL, sizeof(tmpBuf)) == -1);
|
||||
VIDEO_TEST(dec->SetCodecConfigParameters(tmpBuf, 1) == -1);
|
||||
// Garbage data.
|
||||
VIDEO_TEST(dec->SetCodecConfigParameters(tmpBuf, sizeof(tmpBuf)) == -1);
|
||||
|
||||
//----- Function tests -----
|
||||
outFileName = "../../" + _source->GetName() + "-errResTest.yuv";
|
||||
outFile = fopen(outFileName.c_str(), "wb");
|
||||
VIDEO_TEST_EXIT_ON_ERR(outFile != NULL);
|
||||
|
||||
UnitTest::Perform();
|
||||
Teardown();
|
||||
|
||||
}
|
||||
40
modules/video_coding/codecs/vp8/main/test/unit_test.h
Normal file
40
modules/video_coding/codecs/vp8/main/test/unit_test.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_UNIT_TEST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_UNIT_TEST_H_
|
||||
|
||||
#include "../../../test_framework/unit_test.h"
|
||||
|
||||
class VP8UnitTest : public UnitTest
|
||||
{
|
||||
public:
|
||||
VP8UnitTest();
|
||||
VP8UnitTest(std::string name, std::string description);
|
||||
virtual void Perform();
|
||||
virtual void Print();
|
||||
|
||||
protected:
|
||||
virtual WebRtc_UWord32 CodecSpecific_SetBitrate(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord32 /*frameRate*/);
|
||||
virtual bool CheckIfBitExact(const void *ptrA, unsigned int aLengthBytes,
|
||||
const void *ptrB, unsigned int bLengthBytes);
|
||||
static int PicIdLength(const unsigned char* ptr);
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// RESERVATIONS TO PASSING UNIT TEST ON VP8 CODEC //
|
||||
// Test that will not pass: //
|
||||
// 1. Check bit exact for decoded images. //
|
||||
// 2. Target bitrate - Allow a margin of 10% instead of 5% //
|
||||
// 3. Detecting errors in bit stream - NA. //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_UNIT_TEST_H_
|
||||
4
modules/video_coding/main/OWNERS
Normal file
4
modules/video_coding/main/OWNERS
Normal file
@ -0,0 +1,4 @@
|
||||
holmer@google.com
|
||||
mikhal@google.com
|
||||
marpan@google.com
|
||||
hlundin@google.com
|
||||
495
modules/video_coding/main/interface/video_coding.h
Normal file
495
modules/video_coding/main/interface/video_coding.h
Normal file
@ -0,0 +1,495 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
|
||||
#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
|
||||
|
||||
#include "module.h"
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding_defines.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VideoEncoder;
|
||||
class VideoDecoder;
|
||||
|
||||
class VideoCodingModule : public Module
|
||||
{
|
||||
public:
|
||||
static VideoCodingModule* Create(const WebRtc_Word32 id);
|
||||
|
||||
static void Destroy(VideoCodingModule* module);
|
||||
|
||||
// Get number of supported codecs
|
||||
//
|
||||
// Return value : Number of supported codecs
|
||||
static WebRtc_UWord8 NumberOfCodecs();
|
||||
|
||||
// Get supported codec settings with using id
|
||||
//
|
||||
// Input:
|
||||
// - listId : Id or index of the codec to look up
|
||||
// - codec : Memory where the codec settings will be stored
|
||||
//
|
||||
// Return value : VCM_OK, on success
|
||||
// VCM_PARAMETER_ERROR if codec not supported or id too high
|
||||
static WebRtc_Word32 Codec(const WebRtc_UWord8 listId, VideoCodec* codec);
|
||||
|
||||
// Get supported codec settings using codec type
|
||||
//
|
||||
// Input:
|
||||
// - codecType : The codec type to get settings for
|
||||
// - codec : Memory where the codec settings will be stored
|
||||
//
|
||||
// Return value : VCM_OK, on success
|
||||
// VCM_PARAMETER_ERROR if codec not supported
|
||||
static WebRtc_Word32 Codec(VideoCodecType codecType, VideoCodec* codec);
|
||||
|
||||
/*
|
||||
* Sender
|
||||
*/
|
||||
|
||||
// Any encoder-related state of VCM will be initialized to the
|
||||
// same state as when the VCM was created. This will not interrupt
|
||||
// or effect decoding functionality of VCM. VCM will lose all the
|
||||
// encoding-related settings by calling this function.
|
||||
// For instance, a send codec has to be registered again.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 InitializeSender() = 0;
|
||||
|
||||
// Resets the encoder state to the same state as when the encoder
|
||||
// was created.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 ResetEncoder() = 0;
|
||||
|
||||
// Registers a codec to be used for encoding. Calling this
|
||||
// API multiple times overwrites any previously registered codecs.
|
||||
//
|
||||
// Input:
|
||||
// - sendCodec : Settings for the codec to be registered.
|
||||
// - numberOfCores : The number of cores the codec is allowed
|
||||
// to use.
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize) = 0;
|
||||
|
||||
// API to get the current send codec in use.
|
||||
//
|
||||
// Input:
|
||||
// - currentSendCodec : Address where the sendCodec will be written.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SendCodec(VideoCodec* currentSendCodec) const = 0;
|
||||
|
||||
// API to get the current send codec type
|
||||
//
|
||||
// Return value : Codec type, on success.
|
||||
// kVideoCodecUnknown, on error or if no send codec is set
|
||||
virtual VideoCodecType SendCodec() const = 0;
|
||||
|
||||
// Register an external encoder object. This can not be used together with
|
||||
// external decoder callbacks.
|
||||
//
|
||||
// Input:
|
||||
// - externalEncoder : Encoder object to be used for encoding frames inserted
|
||||
// with the AddVideoFrame API.
|
||||
// - payloadType : The payload type bound which this encoder is bound to.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterExternalEncoder(VideoEncoder* externalEncoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalSource = false) = 0;
|
||||
|
||||
// API to get codec config parameters to be sent out-of-band to a receiver.
|
||||
//
|
||||
// Input:
|
||||
// - buffer : Memory where the codec config parameters should be written.
|
||||
// - size : Size of the memory available.
|
||||
//
|
||||
// Return value : Number of bytes written, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size) = 0;
|
||||
|
||||
// API to get currently configured encoder target bit rate.
|
||||
//
|
||||
// Return value : The encoder target bit rate, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_UWord32 Bitrate() const = 0;
|
||||
|
||||
// API to get currently configured encoder target frame rate.
|
||||
//
|
||||
// Return value : The encoder target frame rate, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_UWord32 FrameRate() const = 0;
|
||||
|
||||
// Sets the parameters describing the send channel. These parameters are inputs to the
|
||||
// Media Optimization inside the VCM and also specifies the target bit rate for the
|
||||
// encoder. Bit rate used by NACK should already be compensated for by the user.
|
||||
//
|
||||
// Input:
|
||||
// - availableBandWidth : Band width available for the VCM in kbit/s.
|
||||
// - lossRate : Fractions of lost packets the past second.
|
||||
// (loss rate in percent = 100 * packetLoss / 255)
|
||||
// - RTT : Current round-trip time in ms.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetChannelParameters(WebRtc_UWord32 availableBandWidth,
|
||||
WebRtc_UWord8 lossRate,
|
||||
WebRtc_UWord32 RTT) = 0;
|
||||
|
||||
// Sets the parameters describing the receive channel. These parameters are inputs to the
|
||||
// Media Optimization inside the VCM.
|
||||
//
|
||||
// Input:
|
||||
// - RTT : Current round-trip time in ms.
|
||||
// with the most amount available bandwidth in a conference
|
||||
// scenario
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetReceiveChannelParameters(WebRtc_UWord32 RTT) = 0;
|
||||
|
||||
// Register a transport callback which will be called to deliver the encoded data and
|
||||
// side information.
|
||||
//
|
||||
// Input:
|
||||
// - transport : The callback object to register.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterTransportCallback(VCMPacketizationCallback* transport) = 0;
|
||||
|
||||
// Register video output information callback which will be called to deliver information
|
||||
// about the video stream produced by the encoder, for instance the average frame rate and
|
||||
// bit rate.
|
||||
//
|
||||
// Input:
|
||||
// - outputInformation : The callback object to register.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterSendStatisticsCallback(
|
||||
VCMSendStatisticsCallback* sendStats) = 0;
|
||||
|
||||
// Register a video quality settings callback which will be called when
|
||||
// frame rate/dimensions need to be updated for video quality optimization
|
||||
//
|
||||
// Input:
|
||||
// - videoQMSettings : The callback object to register.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error
|
||||
virtual WebRtc_Word32 RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings) = 0;
|
||||
|
||||
// Register a video protection callback which will be called to deliver
|
||||
// the requested FEC rate and NACK status (on/off).
|
||||
//
|
||||
// Input:
|
||||
// - protection : The callback object to register.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterProtectionCallback(VCMProtectionCallback* protection) = 0;
|
||||
|
||||
// Enable or disable a video protection method.
|
||||
//
|
||||
// Input:
|
||||
// - videoProtection : The method to enable or disable.
|
||||
// - enable : True if the method should be enabled, false if
|
||||
// it should be disabled.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetVideoProtection(VCMVideoProtection videoProtection,
|
||||
bool enable) = 0;
|
||||
|
||||
// Add one raw video frame to the encoder. This function does all the necessary
|
||||
// processing, then decides what frame type to encode, or if the frame should be
|
||||
// dropped. If the frame should be encoded it passes the frame to the encoder
|
||||
// before it returns.
|
||||
//
|
||||
// Input:
|
||||
// - videoFrame : Video frame to encode.
|
||||
// - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 AddVideoFrame(const VideoFrame& videoFrame,
|
||||
const VideoContentMetrics* _contentMetrics = NULL,
|
||||
const void* codecSpecificInfo = NULL) = 0;
|
||||
|
||||
// Next frame encoded should be of the type frameType.
|
||||
//
|
||||
// Input:
|
||||
// - frameType : The frame type to encode next time a VideoFrame
|
||||
// is added to the module.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 FrameTypeRequest(FrameType frameType) = 0;
|
||||
|
||||
// Frame Dropper enable. Can be used to disable the frame dropping when the encoder
|
||||
// over-uses its bit rate. This API is designed to be used when the encoded frames
|
||||
// are supposed to be stored to an AVI file, or when the I420 codec is used and the
|
||||
// target bit rate shouldn't affect the frame rate.
|
||||
//
|
||||
// Input:
|
||||
// - enable : True to enable the setting, false to disable it.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 EnableFrameDropper(bool enable) = 0;
|
||||
|
||||
// Sent frame counters
|
||||
virtual WebRtc_Word32 SentFrameCount(VCMFrameCount& frameCount) const = 0;
|
||||
|
||||
/*
|
||||
* Receiver
|
||||
*/
|
||||
|
||||
// The receiver state of the VCM will be initialized to the
|
||||
// same state as when the VCM was created. This will not interrupt
|
||||
// or effect the send side functionality of VCM. VCM will lose all the
|
||||
// decoding-related settings by calling this function. All frames
|
||||
// inside the jitter buffer are flushed and the delay is reset.
|
||||
// For instance, a receive codec has to be registered again.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 InitializeReceiver() = 0;
|
||||
|
||||
// Register possible receive codecs, can be called multiple times for different codecs.
|
||||
// The module will automatically switch between registered codecs depending on the
|
||||
// payload type of incoming frames. The actual decoder will be created when needed.
|
||||
//
|
||||
// Input:
|
||||
// - receiveCodec : Settings for the codec to be registered.
|
||||
// - numberOfCores : Number of CPU cores that the decoder is allowed to use.
|
||||
// - requireKeyFrame : Set this to true if you don't want any delta frames
|
||||
// to be decoded until the first key frame has been decoded.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterReceiveCodec(const VideoCodec* receiveCodec,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
bool requireKeyFrame = false) = 0;
|
||||
|
||||
// Register an externally defined decoder/renderer object. Can be a decoder only or a
|
||||
// decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to
|
||||
// be used for decoding incoming streams.
|
||||
//
|
||||
// Input:
|
||||
// - externalDecoder : The external decoder/renderer object.
|
||||
// - payloadType : The payload type which this decoder should be
|
||||
// registered to.
|
||||
// - internalRenderTiming : True if the internal renderer (if any) of the decoder
|
||||
// object can make sure to render at a given time in ms.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterExternalDecoder(VideoDecoder* externalDecoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming) = 0;
|
||||
|
||||
// Register a receive callback. Will be called whenever there is a new frame ready
|
||||
// for rendering.
|
||||
//
|
||||
// Input:
|
||||
// - receiveCallback : The callback object to be used by the module when a
|
||||
// frame is ready for rendering.
|
||||
// De-register with a NULL pointer.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0;
|
||||
|
||||
// Register a receive statistics callback which will be called to deliver information
|
||||
// about the video stream received by the receiving side of the VCM, for instance the
|
||||
// average frame rate and bit rate.
|
||||
//
|
||||
// Input:
|
||||
// - receiveStats : The callback object to register.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterReceiveStatisticsCallback(
|
||||
VCMReceiveStatisticsCallback* receiveStats) = 0;
|
||||
|
||||
// Register a frame type request callback. This callback will be called when the
|
||||
// module needs to request specific frame types from the send side.
|
||||
//
|
||||
// Input:
|
||||
// - frameTypeCallback : The callback object to be used by the module when
|
||||
// requesting a specific type of frame from the send side.
|
||||
// De-register with a NULL pointer.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterFrameTypeCallback(
|
||||
VCMFrameTypeCallback* frameTypeCallback) = 0;
|
||||
|
||||
// Register a frame storage callback. This callback will be called right before an
|
||||
// encoded frame is given to the decoder. Useful for recording the incoming video sequence.
|
||||
//
|
||||
// Input:
|
||||
// - frameStorageCallback : The callback object used by the module
|
||||
// to store a received encoded frame.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 RegisterFrameStorageCallback(
|
||||
VCMFrameStorageCallback* frameStorageCallback) = 0;
|
||||
|
||||
// Registers a callback which is called whenever the receive side of the VCM
|
||||
// encounters holes in the packet sequence and needs packets to be retransmitted.
|
||||
//
|
||||
// Input:
|
||||
// - callback : The callback to be registered in the VCM.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// <0, on error.
|
||||
virtual WebRtc_Word32 RegisterPacketRequestCallback(
|
||||
VCMPacketRequestCallback* callback) = 0;
|
||||
|
||||
// Waits for the next frame in the jitter buffer to become complete
|
||||
// (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding.
|
||||
// Should be called as often as possible to get the most out of the decoder.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 Decode(WebRtc_UWord16 maxWaitTimeMs = 200) = 0;
|
||||
|
||||
// Waits for the next frame in the dual jitter buffer to become complete
|
||||
// (waits no longer than maxWaitTimeMs), then passes it to the dual decoder
|
||||
// for decoding. This will never trigger a render callback. Should be
|
||||
// called frequently, and as long as it returns 1 it should be called again
|
||||
// as soon as possible.
|
||||
//
|
||||
// Return value : 1, if a frame was decoded
|
||||
// 0, if no frame was decoded
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 DecodeDualFrame(WebRtc_UWord16 maxWaitTimeMs = 200) = 0;
|
||||
|
||||
// Decodes a frame and sets an appropriate render time in ms relative to the system time.
|
||||
// Should be used in conjunction with VCMFrameStorageCallback.
|
||||
//
|
||||
// Input:
|
||||
// - frameFromStorage : Encoded frame read from file or received through
|
||||
// the VCMFrameStorageCallback callback.
|
||||
//
|
||||
// Return value: : VCM_OK, on success
|
||||
// < 0, on error
|
||||
virtual WebRtc_Word32 DecodeFromStorage(const EncodedVideoData& frameFromStorage) = 0;
|
||||
|
||||
// Reset the decoder state to the initial state.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 ResetDecoder() = 0;
|
||||
|
||||
// API to get the codec which is currently used for decoding by the module.
|
||||
//
|
||||
// Input:
|
||||
// - currentReceiveCodec : Settings for the codec to be registered.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
|
||||
|
||||
// API to get the codec type currently used for decoding by the module.
|
||||
//
|
||||
// Return value : codecy type, on success.
|
||||
// kVideoCodecUnknown, on error or if no receive codec is registered
|
||||
virtual VideoCodecType ReceiveCodec() const = 0;
|
||||
|
||||
// Insert a parsed packet into the receiver side of the module. Will be placed in the
|
||||
// jitter buffer waiting for the frame to become complete. Returns as soon as the packet
|
||||
// has been placed in the jitter buffer.
|
||||
//
|
||||
// Input:
|
||||
// - incomingPayload : Payload of the packet.
|
||||
// - payloadLength : Length of the payload.
|
||||
// - rtpInfo : The parsed header.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 IncomingPacket(const WebRtc_UWord8* incomingPayload,
|
||||
WebRtc_UWord32 payloadLength,
|
||||
const WebRtcRTPHeader& rtpInfo) = 0;
|
||||
|
||||
// Sets codec config parameters received out-of-band to the currently
|
||||
// selected receive codec.
|
||||
//
|
||||
// Input:
|
||||
// - payloadType : Payload type which specifies which codec to set these
|
||||
// parameters to.
|
||||
// - buffer : Codec config parameters.
|
||||
// - length : Length of the parameter data.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetCodecConfigParameters(WebRtc_UWord8 payloadType,
|
||||
const WebRtc_UWord8* buffer,
|
||||
WebRtc_Word32 length) = 0;
|
||||
|
||||
// Minimum playout delay (Used for lip-sync). This is the minimum delay required
|
||||
// to sync with audio. Not included in VideoCodingModule::Delay()
|
||||
// Defaults to 0 ms.
|
||||
//
|
||||
// Input:
|
||||
// - minPlayoutDelayMs : Additional delay in ms.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetMinimumPlayoutDelay(WebRtc_UWord32 minPlayoutDelayMs) = 0;
|
||||
|
||||
// Set the time required by the renderer to render a frame.
|
||||
//
|
||||
// Input:
|
||||
// - timeMS : The time in ms required by the renderer to render a frame.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 SetRenderDelay(WebRtc_UWord32 timeMS) = 0;
|
||||
|
||||
// The total delay desired by the VCM. Can be less than the minimum
|
||||
// delay set with SetMinimumPlayoutDelay.
|
||||
//
|
||||
// Return value : Total delay in ms, on success.
|
||||
// < 0, on error.
|
||||
virtual WebRtc_Word32 Delay() const = 0;
|
||||
|
||||
// Get the received frame counters. Keeps track of the number of each frame type
|
||||
// received since the start of the call.
|
||||
//
|
||||
// Output:
|
||||
// - frameCount : Struct to be filled with the number of frames received.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
// <0, on error.
|
||||
virtual WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
|
||||
191
modules/video_coding/main/interface/video_coding_defines.h
Normal file
191
modules/video_coding/main/interface/video_coding_defines.h
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
|
||||
#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// Error codes
|
||||
#define VCM_REQUEST_SLI 2
|
||||
#define VCM_MISSING_CALLBACK 1
|
||||
#define VCM_OK 0
|
||||
#define VCM_GENERAL_ERROR -1
|
||||
#define VCM_LEVEL_EXCEEDED -2
|
||||
#define VCM_MEMORY -3
|
||||
#define VCM_PARAMETER_ERROR -4
|
||||
#define VCM_UNKNOWN_PAYLOAD -5
|
||||
#define VCM_CODEC_ERROR -6
|
||||
#define VCM_UNINITIALIZED -7
|
||||
#define VCM_NO_CODEC_REGISTERED -8
|
||||
#define VCM_JITTER_BUFFER_ERROR -9
|
||||
#define VCM_OLD_PACKET_ERROR -10
|
||||
#define VCM_NO_FRAME_DECODED -11
|
||||
#define VCM_ERROR_REQUEST_SLI -12
|
||||
#define VCM_NOT_IMPLEMENTED -20
|
||||
|
||||
#define VCM_H263_PAYLOAD_TYPE 34
|
||||
#define VCM_RED_PAYLOAD_TYPE 96
|
||||
#define VCM_ULPFEC_PAYLOAD_TYPE 97
|
||||
#define VCM_H263_1998_PAYLOAD_TYPE 121
|
||||
#define VCM_VP8_PAYLOAD_TYPE 120
|
||||
#define VCM_I420_PAYLOAD_TYPE 124
|
||||
|
||||
enum VCMNackProperties
|
||||
{
|
||||
kNackHistoryLength = 450
|
||||
};
|
||||
|
||||
enum VCMH263FrameDrop
|
||||
{
|
||||
kDecodePFrames,
|
||||
kDropPFrames
|
||||
};
|
||||
|
||||
enum VCMVideoProtection
|
||||
{
|
||||
kProtectionNack, // Both send-side and receive-side
|
||||
kProtectionNackSender, // Send-side only
|
||||
kProtectionNackReceiver, // Receive-side only
|
||||
kProtectionDualDecoder,
|
||||
kProtectionFEC,
|
||||
kProtectionNackFEC,
|
||||
kProtectionKeyOnLoss,
|
||||
kProtectionKeyOnKeyLoss,
|
||||
kProtectionPeriodicKeyFrames
|
||||
};
|
||||
|
||||
enum VCMTemporalDecimation
|
||||
{
|
||||
kBitrateOverUseDecimation,
|
||||
};
|
||||
|
||||
struct VCMFrameCount
|
||||
{
|
||||
WebRtc_UWord32 numKeyFrames;
|
||||
WebRtc_UWord32 numDeltaFrames;
|
||||
};
|
||||
|
||||
|
||||
// Callback class used for sending data ready to be packetized
|
||||
class VCMPacketizationCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 SendData(const FrameType frameType,
|
||||
const WebRtc_UWord8 payloadType,
|
||||
const WebRtc_UWord32 timeStamp,
|
||||
const WebRtc_UWord8* payloadData,
|
||||
const WebRtc_UWord32 payloadSize,
|
||||
const RTPFragmentationHeader& fragmentationHeader) = 0;
|
||||
protected:
|
||||
virtual ~VCMPacketizationCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for passing decoded frames which are ready to be rendered.
|
||||
class VCMFrameStorageCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 StoreReceivedFrame(const EncodedVideoData& frameToStore) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMFrameStorageCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for passing decoded frames which are ready to be rendered.
|
||||
class VCMReceiveCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 FrameToRender(VideoFrame& videoFrame) = 0;
|
||||
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId) {return -1;}
|
||||
|
||||
protected:
|
||||
virtual ~VCMReceiveCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for informing the user of the bit rate and frame rate produced by the
|
||||
// encoder.
|
||||
class VCMSendStatisticsCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 SendStatistics(const WebRtc_UWord32 bitRate,
|
||||
const WebRtc_UWord32 frameRate) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMSendStatisticsCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for informing the user of the incoming bit rate and frame rate.
|
||||
class VCMReceiveStatisticsCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 ReceiveStatistics(const WebRtc_UWord32 bitRate,
|
||||
const WebRtc_UWord32 frameRate) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMReceiveStatisticsCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for telling the user about the requested amount of bit stream protection
|
||||
// Key frame FEC rate, delta frame and whether NACK should be on or off.
|
||||
class VCMProtectionCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 ProtectionRequest(const WebRtc_UWord8 deltaFECRate,
|
||||
const WebRtc_UWord8 keyFECRate,
|
||||
const bool nack) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMProtectionCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for telling the user about what frame type needed to continue decoding.
|
||||
// Typically a key frame when the stream has been corrupted in some way.
|
||||
class VCMFrameTypeCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 FrameTypeRequest(const FrameType frameType) = 0;
|
||||
virtual WebRtc_Word32 SliceLossIndicationRequest(const WebRtc_UWord64 pictureId) {return -1;}
|
||||
|
||||
protected:
|
||||
virtual ~VCMFrameTypeCallback() {}
|
||||
};
|
||||
|
||||
// Callback class used for telling the user about which packet sequence numbers are currently
|
||||
// missing and need to be resent.
|
||||
class VCMPacketRequestCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 ResendPackets(const WebRtc_UWord16* sequenceNumbers,
|
||||
WebRtc_UWord16 length) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMPacketRequestCallback() {}
|
||||
};
|
||||
|
||||
// Callback used to inform the user of the the desired resolution
|
||||
// as subscribed by Media Optimization (Quality Modes)
|
||||
class VCMQMSettingsCallback
|
||||
{
|
||||
public:
|
||||
virtual WebRtc_Word32 SetVideoQMSettings(const WebRtc_UWord32 frameRate,
|
||||
const WebRtc_UWord32 width,
|
||||
const WebRtc_UWord32 height) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VCMQMSettingsCallback() {}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
|
||||
799
modules/video_coding/main/source/codec_database.cc
Normal file
799
modules/video_coding/main/source/codec_database.cc
Normal file
@ -0,0 +1,799 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "codec_database.h"
|
||||
#include "../../../../engine_configurations.h"
|
||||
#include "internal_defines.h"
|
||||
#include "trace.h"
|
||||
|
||||
#if defined(_WIN32)
|
||||
// VS 2005: Don't warn for default initialized arrays. See help for more info.
|
||||
// Don't warn for strncpy being unsecure.
|
||||
// switch statement contains 'default' but no 'case' labels
|
||||
#pragma warning(disable:4351; disable:4996; disable:4065)
|
||||
#endif
|
||||
|
||||
// Supported codecs
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
#include "vp8.h"
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
#include "i420.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
bool requireKeyFrame)
|
||||
:
|
||||
_settings(settings),
|
||||
_numberOfCores(numberOfCores),
|
||||
_requireKeyFrame(requireKeyFrame)
|
||||
{
|
||||
}
|
||||
|
||||
VCMExtDecoderMapItem::VCMExtDecoderMapItem(VideoDecoder* externalDecoderInstance,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming)
|
||||
:
|
||||
_payloadType(payloadType),
|
||||
_externalDecoderInstance(externalDecoderInstance),
|
||||
_internalRenderTiming(internalRenderTiming)
|
||||
{
|
||||
}
|
||||
|
||||
VCMCodecDataBase::VCMCodecDataBase(WebRtc_Word32 id):
|
||||
_id(id),
|
||||
_numberOfCores(0),
|
||||
_maxPayloadSize(kDefaultPayloadSize),
|
||||
_periodicKeyFrames(false),
|
||||
_currentEncIsExternal(false),
|
||||
_sendCodec(),
|
||||
_receiveCodec(),
|
||||
_externalPayloadType(0),
|
||||
_externalEncoder(NULL),
|
||||
_internalSource(false),
|
||||
_ptrEncoder(NULL),
|
||||
_ptrDecoder(NULL),
|
||||
_currentDecIsExternal(false),
|
||||
_decMap(),
|
||||
_decExternalMap()
|
||||
{
|
||||
//
|
||||
}
|
||||
|
||||
VCMCodecDataBase::~VCMCodecDataBase()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::Version(WebRtc_Word8* version,
|
||||
WebRtc_UWord32& remainingBufferInBytes,
|
||||
WebRtc_UWord32& position) const
|
||||
{
|
||||
VCMGenericEncoder* encoder = NULL;
|
||||
VideoCodec settings;
|
||||
WebRtc_Word32 ret;
|
||||
for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++)
|
||||
{
|
||||
ret = VCMCodecDataBase::Codec(i, &settings);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
encoder = CreateEncoder(settings.codecType);
|
||||
if (encoder == NULL)
|
||||
{
|
||||
return VCM_MEMORY;
|
||||
}
|
||||
ret = encoder->_encoder.Version(&version[position], remainingBufferInBytes);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
remainingBufferInBytes -= ret;
|
||||
position += ret;
|
||||
delete &encoder->_encoder;
|
||||
delete encoder;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::Reset()
|
||||
{
|
||||
WebRtc_Word32 ret = ResetReceiver();
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
ret = ResetSender();
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::ResetSender()
|
||||
{
|
||||
DeleteEncoder();
|
||||
_periodicKeyFrames = false;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMGenericEncoder*
|
||||
VCMCodecDataBase::CreateEncoder(VideoCodecType type) const
|
||||
{
|
||||
switch(type)
|
||||
{
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case kVideoCodecVP8:
|
||||
return new VCMGenericEncoder(*(new VP8Encoder));
|
||||
break;
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case kVideoCodecI420:
|
||||
return new VCMGenericEncoder(*(new I420Encoder));
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
return NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecDataBase::DeleteEncoder()
|
||||
{
|
||||
if (_ptrEncoder)
|
||||
{
|
||||
_ptrEncoder->Release();
|
||||
if (!_currentEncIsExternal)
|
||||
{
|
||||
delete &_ptrEncoder->_encoder;
|
||||
}
|
||||
delete _ptrEncoder;
|
||||
_ptrEncoder = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMCodecDataBase::NumberOfCodecs()
|
||||
{
|
||||
return VCM_NUM_VIDEO_CODECS_AVAILABLE;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::Codec(WebRtc_UWord8 listId, VideoCodec *settings)
|
||||
{
|
||||
if (settings == NULL)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
|
||||
if (listId >= VCM_NUM_VIDEO_CODECS_AVAILABLE)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
memset(settings, 0, sizeof(VideoCodec));
|
||||
switch (listId)
|
||||
{
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case VCM_VP8_IDX:
|
||||
{
|
||||
strncpy(settings->plName, "VP8", 3);
|
||||
settings->codecType = kVideoCodecVP8;
|
||||
// 96 to 127 dynamic payload types for video codecs
|
||||
settings->plType = VCM_VP8_PAYLOAD_TYPE;
|
||||
settings->startBitrate = 100;
|
||||
settings->minBitrate = VCM_MIN_BITRATE;
|
||||
settings->maxBitrate = 0;
|
||||
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
|
||||
settings->width = VCM_DEFAULT_CODEC_WIDTH;
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case VCM_I420_IDX:
|
||||
{
|
||||
strncpy(settings->plName, "I420", 4);
|
||||
settings->codecType = kVideoCodecI420;
|
||||
// 96 to 127 dynamic payload types for video codecs
|
||||
settings->plType = VCM_I420_PAYLOAD_TYPE;
|
||||
// Bitrate needed for this size and framerate
|
||||
settings->startBitrate = 3*VCM_DEFAULT_CODEC_WIDTH*
|
||||
VCM_DEFAULT_CODEC_HEIGHT*8*
|
||||
VCM_DEFAULT_FRAME_RATE/1000/2;
|
||||
settings->maxBitrate = settings->startBitrate;
|
||||
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
|
||||
settings->width = VCM_DEFAULT_CODEC_WIDTH;
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->minBitrate = VCM_MIN_BITRATE;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::Codec(VideoCodecType codecType, VideoCodec* settings)
|
||||
{
|
||||
for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++)
|
||||
{
|
||||
const WebRtc_Word32 ret = VCMCodecDataBase::Codec(i, settings);
|
||||
if (ret != VCM_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
if (codecType == settings->codecType)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
}
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
|
||||
// assuming only one registered encoder - since only one used, no need for more
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize)
|
||||
{
|
||||
if (sendCodec == NULL)
|
||||
{
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
if (maxPayloadSize == 0)
|
||||
{
|
||||
maxPayloadSize = kDefaultPayloadSize;
|
||||
}
|
||||
if (numberOfCores > 32)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
if (strcmp(sendCodec->plName, "H263") == 0 &&
|
||||
(sendCodec->plType != 34))
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
if (sendCodec->plType <= 0)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
// Make sure the start bit rate is sane...
|
||||
if (sendCodec->startBitrate > 1000000)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
if (sendCodec->codecType == kVideoCodecUnknown)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
_numberOfCores = numberOfCores;
|
||||
_maxPayloadSize = maxPayloadSize;
|
||||
|
||||
memcpy(&_sendCodec, sendCodec, sizeof(VideoCodec));
|
||||
|
||||
if (_sendCodec.maxBitrate == 0)
|
||||
{
|
||||
// max is one bit per pixel
|
||||
_sendCodec.maxBitrate = ((WebRtc_Word32)_sendCodec.height *
|
||||
(WebRtc_Word32)_sendCodec.width *
|
||||
(WebRtc_Word32)_sendCodec.maxFramerate) / 1000;
|
||||
if (_sendCodec.startBitrate > _sendCodec.maxBitrate)
|
||||
{
|
||||
// but if the customer tries to set a higher start bit rate we will increase
|
||||
// the max accordingly
|
||||
_sendCodec.maxBitrate = _sendCodec.startBitrate;
|
||||
}
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::SendCodec(VideoCodec* currentSendCodec) const
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(_id), "SendCodec");
|
||||
|
||||
if(_ptrEncoder == NULL)
|
||||
{
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
memcpy(currentSendCodec, &_sendCodec, sizeof(VideoCodec));
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VideoCodecType
|
||||
VCMCodecDataBase::SendCodec() const
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(_id),
|
||||
"SendCodec type");
|
||||
if (_ptrEncoder == NULL)
|
||||
{
|
||||
return kVideoCodecUnknown;
|
||||
}
|
||||
return _sendCodec.codecType;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::DeRegisterExternalEncoder(WebRtc_UWord8 payloadType, bool& wasSendCodec)
|
||||
{
|
||||
wasSendCodec = false;
|
||||
if (_externalPayloadType != payloadType)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
if (_sendCodec.plType == payloadType)
|
||||
{
|
||||
//De-register as send codec if needed
|
||||
DeleteEncoder();
|
||||
memset(&_sendCodec, 0, sizeof(VideoCodec));
|
||||
_currentEncIsExternal = false;
|
||||
wasSendCodec = true;
|
||||
}
|
||||
_externalPayloadType = 0;
|
||||
_externalEncoder = NULL;
|
||||
_internalSource = false;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::RegisterExternalEncoder(VideoEncoder* externalEncoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalSource)
|
||||
{
|
||||
// since only one encoder can be used at a given time,
|
||||
// only one external encoder can be registered/used
|
||||
_externalEncoder = externalEncoder;
|
||||
_externalPayloadType = payloadType;
|
||||
_internalSource = internalSource;
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMGenericEncoder*
|
||||
VCMCodecDataBase::SetEncoder(const VideoCodec* settings,
|
||||
VCMEncodedFrameCallback* VCMencodedFrameCallback)
|
||||
|
||||
{
|
||||
// if encoder exists, will destroy it and create new one
|
||||
DeleteEncoder();
|
||||
|
||||
if (settings->plType == _externalPayloadType)
|
||||
{
|
||||
// External encoder
|
||||
_ptrEncoder = new VCMGenericEncoder(*_externalEncoder, _internalSource);
|
||||
_currentEncIsExternal = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
_ptrEncoder = CreateEncoder(settings->codecType);
|
||||
_currentEncIsExternal = false;
|
||||
}
|
||||
|
||||
VCMencodedFrameCallback->SetPayloadType(settings->plType);
|
||||
|
||||
if (_ptrEncoder == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (_ptrEncoder->InitEncode(settings, _numberOfCores, _maxPayloadSize) < 0)
|
||||
{
|
||||
DeleteEncoder();
|
||||
return NULL;
|
||||
}
|
||||
else if (_ptrEncoder->RegisterEncodeCallback(VCMencodedFrameCallback) < 0)
|
||||
{
|
||||
DeleteEncoder();
|
||||
return NULL;
|
||||
}
|
||||
// Intentionally don't check return value since the encoder registration
|
||||
// shouldn't fail because the codec doesn't support changing the
|
||||
// periodic key frame setting.
|
||||
_ptrEncoder->SetPeriodicKeyFrames(_periodicKeyFrames);
|
||||
return _ptrEncoder;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::SetPeriodicKeyFrames(bool enable)
|
||||
{
|
||||
_periodicKeyFrames = enable;
|
||||
if (_ptrEncoder != NULL)
|
||||
{
|
||||
return _ptrEncoder->SetPeriodicKeyFrames(_periodicKeyFrames);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::RegisterReceiveCodec(const VideoCodec* receiveCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
bool requireKeyFrame)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCoding, VCMId(_id),
|
||||
"Codec: %s, Payload type %d, Height %d, Width %d, Bitrate %d, Framerate %d.",
|
||||
receiveCodec->plName, receiveCodec->plType,
|
||||
receiveCodec->height, receiveCodec->width,
|
||||
receiveCodec->startBitrate, receiveCodec->maxFramerate);
|
||||
|
||||
// check if payload value already exists, if so - erase old and insert new
|
||||
DeRegisterReceiveCodec(receiveCodec->plType);
|
||||
if (receiveCodec->codecType == kVideoCodecUnknown)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
VideoCodec* newReceiveCodec = new VideoCodec(*receiveCodec);
|
||||
_decMap.Insert(receiveCodec->plType,
|
||||
new VCMDecoderMapItem(newReceiveCodec, numberOfCores, requireKeyFrame));
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMCodecDataBase::DeRegisterReceiveCodec(WebRtc_UWord8 payloadType)
|
||||
{
|
||||
MapItem* item = _decMap.Find(payloadType);
|
||||
if (item == NULL)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
VCMDecoderMapItem* decItem = static_cast<VCMDecoderMapItem*>(item->GetItem());
|
||||
delete decItem->_settings;
|
||||
delete decItem;
|
||||
_decMap.Erase(item);
|
||||
if (_receiveCodec.plType == payloadType)
|
||||
{
|
||||
// This codec is currently in use.
|
||||
memset(&_receiveCodec, 0, sizeof(VideoCodec));
|
||||
_currentDecIsExternal = false;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::ResetReceiver()
|
||||
{
|
||||
ReleaseDecoder(_ptrDecoder);
|
||||
_ptrDecoder = NULL;
|
||||
memset(&_receiveCodec, 0, sizeof(VideoCodec));
|
||||
MapItem* item = _decMap.First();
|
||||
while (item != NULL)
|
||||
{
|
||||
VCMDecoderMapItem* decItem = static_cast<VCMDecoderMapItem*>(item->GetItem());
|
||||
if (decItem != NULL)
|
||||
{
|
||||
if (decItem->_settings != NULL)
|
||||
{
|
||||
delete decItem->_settings;
|
||||
}
|
||||
delete decItem;
|
||||
}
|
||||
_decMap.Erase(item);
|
||||
item = _decMap.First();
|
||||
}
|
||||
item = _decExternalMap.First();
|
||||
while (item != NULL)
|
||||
{
|
||||
VCMExtDecoderMapItem* decItem = static_cast<VCMExtDecoderMapItem*>(item->GetItem());
|
||||
if (decItem != NULL)
|
||||
{
|
||||
delete decItem;
|
||||
}
|
||||
_decExternalMap.Erase(item);
|
||||
item = _decExternalMap.First();
|
||||
}
|
||||
_currentDecIsExternal = false;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::DeRegisterExternalDecoder(WebRtc_UWord8 payloadType)
|
||||
{
|
||||
MapItem* item = _decExternalMap.Find(payloadType);
|
||||
if (item == NULL)
|
||||
{
|
||||
// Not found
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
if (_receiveCodec.plType == payloadType)
|
||||
{
|
||||
// Release it if it was registered and in use
|
||||
ReleaseDecoder(_ptrDecoder);
|
||||
_ptrDecoder = NULL;
|
||||
}
|
||||
DeRegisterReceiveCodec(payloadType);
|
||||
VCMExtDecoderMapItem* decItem = static_cast<VCMExtDecoderMapItem*>(item->GetItem());
|
||||
delete decItem;
|
||||
_decExternalMap.Erase(item);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
// Add the external encoder object to the list of external decoders.
|
||||
// Won't be registered as a receive codec until RegisterReceiveCodec is called.
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::RegisterExternalDecoder(VideoDecoder* externalDecoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming)
|
||||
{
|
||||
// check if payload value already exists, if so - erase old and insert new
|
||||
VCMExtDecoderMapItem* extDecoder = new VCMExtDecoderMapItem(externalDecoder,
|
||||
payloadType,
|
||||
internalRenderTiming);
|
||||
if (extDecoder == NULL)
|
||||
{
|
||||
return VCM_MEMORY;
|
||||
}
|
||||
DeRegisterExternalDecoder(payloadType);
|
||||
_decExternalMap.Insert(payloadType, extDecoder);
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMCodecDataBase::DecoderRegistered() const
|
||||
{
|
||||
return (_decMap.Size() > 0);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::ReceiveCodec(VideoCodec* currentReceiveCodec) const
|
||||
{
|
||||
if (_ptrDecoder == NULL)
|
||||
{
|
||||
return VCM_NO_FRAME_DECODED;
|
||||
}
|
||||
memcpy(currentReceiveCodec, &_receiveCodec, sizeof(VideoCodec));
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VideoCodecType
|
||||
VCMCodecDataBase::ReceiveCodec() const
|
||||
{
|
||||
if (_ptrDecoder == NULL)
|
||||
{
|
||||
return kVideoCodecUnknown;
|
||||
}
|
||||
return _receiveCodec.codecType;
|
||||
}
|
||||
|
||||
VCMGenericDecoder*
|
||||
VCMCodecDataBase::SetDecoder(WebRtc_UWord8 payloadType, VCMDecodedFrameCallback& callback)
|
||||
{
|
||||
if (payloadType == _receiveCodec.plType || payloadType == 0)
|
||||
{
|
||||
return _ptrDecoder;
|
||||
}
|
||||
// check for exisitng decoder, if exists - delete
|
||||
if (_ptrDecoder)
|
||||
{
|
||||
ReleaseDecoder(_ptrDecoder);
|
||||
_ptrDecoder = NULL;
|
||||
memset(&_receiveCodec, 0, sizeof(VideoCodec));
|
||||
}
|
||||
_ptrDecoder = CreateAndInitDecoder(payloadType, _receiveCodec, _currentDecIsExternal);
|
||||
if (_ptrDecoder == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
if (_ptrDecoder->RegisterDecodeCompleteCallback(&callback) < 0)
|
||||
{
|
||||
ReleaseDecoder(_ptrDecoder);
|
||||
_ptrDecoder = NULL;
|
||||
memset(&_receiveCodec, 0, sizeof(VideoCodec));
|
||||
return NULL;
|
||||
}
|
||||
return _ptrDecoder;
|
||||
}
|
||||
|
||||
VCMGenericDecoder*
|
||||
VCMCodecDataBase::CreateAndInitDecoder(WebRtc_UWord8 payloadType,
|
||||
VideoCodec& newCodec,
|
||||
bool &external) const
|
||||
{
|
||||
VCMDecoderMapItem* decoderItem = FindDecoderItem(payloadType);
|
||||
if (decoderItem == NULL)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_id),
|
||||
"Unknown payload type: %u", payloadType);
|
||||
return NULL;
|
||||
}
|
||||
VCMGenericDecoder* ptrDecoder = NULL;
|
||||
VCMExtDecoderMapItem* externalDecItem = FindExternalDecoderItem(payloadType);
|
||||
if (externalDecItem != NULL)
|
||||
{
|
||||
// External codec
|
||||
ptrDecoder = new VCMGenericDecoder(*externalDecItem->_externalDecoderInstance, _id,
|
||||
true);
|
||||
external = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// create decoder
|
||||
ptrDecoder = CreateDecoder(decoderItem->_settings->codecType);
|
||||
external = false;
|
||||
}
|
||||
if (ptrDecoder == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ptrDecoder->InitDecode(decoderItem->_settings,
|
||||
decoderItem->_numberOfCores,
|
||||
decoderItem->_requireKeyFrame) < 0)
|
||||
{
|
||||
ReleaseDecoder(ptrDecoder);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SetCodecConfigParameters(*ptrDecoder, *decoderItem->_settings);
|
||||
|
||||
memcpy(&newCodec, decoderItem->_settings, sizeof(VideoCodec));
|
||||
return ptrDecoder;
|
||||
}
|
||||
|
||||
VCMGenericDecoder*
|
||||
VCMCodecDataBase::CreateDecoderCopy() const
|
||||
{
|
||||
if (_ptrDecoder == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
VideoDecoder* decoderCopy = _ptrDecoder->_decoder.Copy();
|
||||
if (decoderCopy == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return new VCMGenericDecoder(*decoderCopy, _id, _ptrDecoder->External());
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecDataBase::CopyDecoder(const VCMGenericDecoder& decoder)
|
||||
{
|
||||
VideoDecoder* decoderCopy = decoder._decoder.Copy();
|
||||
if (decoderCopy != NULL)
|
||||
{
|
||||
ReleaseDecoder(_ptrDecoder);
|
||||
_ptrDecoder = new VCMGenericDecoder(*decoderCopy, _id, decoder.External());
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
VCMCodecDataBase::RenderTiming() const
|
||||
{
|
||||
bool renderTiming = true;
|
||||
if (_currentDecIsExternal)
|
||||
{
|
||||
VCMExtDecoderMapItem* extItem = FindExternalDecoderItem(_receiveCodec.plType);
|
||||
renderTiming = extItem->_internalRenderTiming;
|
||||
}
|
||||
return renderTiming;
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const
|
||||
{
|
||||
if (decoder != NULL)
|
||||
{
|
||||
decoder->Release();
|
||||
if (!decoder->External() && &decoder->_decoder != NULL)
|
||||
{
|
||||
delete &decoder->_decoder;
|
||||
}
|
||||
delete decoder;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMCodecDataBase::SetCodecConfigParameters(WebRtc_UWord8 payloadType,
|
||||
const WebRtc_UWord8* buffer,
|
||||
WebRtc_Word32 length)
|
||||
{
|
||||
VCMDecoderMapItem* decItem = FindDecoderItem(payloadType);
|
||||
if (decItem == NULL)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
switch (decItem->_settings->codecType)
|
||||
{
|
||||
case kVideoCodecMPEG4:
|
||||
{
|
||||
memcpy(decItem->_settings->codecSpecific.MPEG4.configParameters, buffer, length);
|
||||
decItem->_settings->codecSpecific.MPEG4.configParametersSize =
|
||||
static_cast<WebRtc_UWord8>(length);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// This codec doesn't have codec config parameters
|
||||
return VCM_GENERAL_ERROR;
|
||||
}
|
||||
if (_ptrDecoder != NULL && _receiveCodec.plType == decItem->_settings->plType)
|
||||
{
|
||||
return _ptrDecoder->SetCodecConfigParameters(buffer, length);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMDecoderMapItem*
|
||||
VCMCodecDataBase::FindDecoderItem(WebRtc_UWord8 payloadType) const
|
||||
{
|
||||
MapItem* item = _decMap.Find(payloadType);
|
||||
if (item != NULL)
|
||||
{
|
||||
return static_cast<VCMDecoderMapItem*>(item->GetItem());
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
VCMExtDecoderMapItem*
|
||||
VCMCodecDataBase::FindExternalDecoderItem(WebRtc_UWord8 payloadType) const
|
||||
{
|
||||
MapItem* item = _decExternalMap.Find(payloadType);
|
||||
if (item != NULL)
|
||||
{
|
||||
return static_cast<VCMExtDecoderMapItem*>(item->GetItem());
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
VCMGenericDecoder*
|
||||
VCMCodecDataBase::CreateDecoder(VideoCodecType type) const
|
||||
{
|
||||
switch(type)
|
||||
{
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
case kVideoCodecVP8:
|
||||
return new VCMGenericDecoder(*(new VP8Decoder), _id);
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
case kVideoCodecI420:
|
||||
return new VCMGenericDecoder(*(new I420Decoder), _id);
|
||||
#endif
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecDataBase::SetCodecConfigParameters(VCMGenericDecoder& decoder,
|
||||
const VideoCodec& settings)
|
||||
{
|
||||
switch (settings.codecType)
|
||||
{
|
||||
case kVideoCodecMPEG4:
|
||||
{
|
||||
if (settings.codecSpecific.MPEG4.configParametersSize > 0)
|
||||
{
|
||||
decoder.SetCodecConfigParameters(
|
||||
settings.codecSpecific.MPEG4.configParameters,
|
||||
settings.codecSpecific.MPEG4.configParametersSize);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// No codec config parameters for this codec
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
221
modules/video_coding/main/source/codec_database.h
Normal file
221
modules/video_coding/main/source/codec_database.h
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
|
||||
|
||||
#include "video_coding.h"
|
||||
#include "video_codec_interface.h"
|
||||
#include "generic_decoder.h"
|
||||
#include "generic_encoder.h"
|
||||
#include "typedefs.h"
|
||||
#include "map_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum VCMCodecDBProperties
|
||||
{
|
||||
kDefaultPayloadSize = 1440
|
||||
};
|
||||
|
||||
class VCMDecoderMapItem {
|
||||
public:
|
||||
VCMDecoderMapItem(VideoCodec* settings,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
bool requireKeyFrame);
|
||||
|
||||
VideoCodec* _settings;
|
||||
WebRtc_UWord32 _numberOfCores;
|
||||
bool _requireKeyFrame;
|
||||
};
|
||||
|
||||
class VCMExtDecoderMapItem {
|
||||
public:
|
||||
VCMExtDecoderMapItem(VideoDecoder* externalDecoderInstance,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming);
|
||||
|
||||
WebRtc_UWord8 _payloadType;
|
||||
VideoDecoder* _externalDecoderInstance;
|
||||
bool _internalRenderTiming;
|
||||
};
|
||||
|
||||
/*******************************/
|
||||
/* VCMCodecDataBase class */
|
||||
/*******************************/
|
||||
class VCMCodecDataBase
|
||||
{
|
||||
public:
|
||||
VCMCodecDataBase(WebRtc_Word32 id);
|
||||
~VCMCodecDataBase();
|
||||
/**
|
||||
* Fills "version" with the version of all codecs supported.
|
||||
*/
|
||||
WebRtc_Word32 Version(WebRtc_Word8* version,
|
||||
WebRtc_UWord32& remainingBufferInBytes,
|
||||
WebRtc_UWord32& position) const;
|
||||
/**
|
||||
* Release codecdatabase - release all memory for both send and receive side
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
/**
|
||||
* Sender Side
|
||||
*/
|
||||
/**
|
||||
* Returns the number of supported codecs (or -1 in case of error).
|
||||
*/
|
||||
static WebRtc_UWord8 NumberOfCodecs();
|
||||
/**
|
||||
* Get supported codecs with ID
|
||||
* Input Values:
|
||||
* listnr : Requested codec id number
|
||||
* codec_inst: Pointer to the struct in which the returned codec information is copied
|
||||
* Return Values: 0 if successful, otherwise
|
||||
*/
|
||||
static WebRtc_Word32 Codec(WebRtc_UWord8 listId, VideoCodec* settings);
|
||||
static WebRtc_Word32 Codec(VideoCodecType codecType, VideoCodec* settings);
|
||||
/**
|
||||
* Reset Sender side
|
||||
*/
|
||||
WebRtc_Word32 ResetSender();
|
||||
/**
|
||||
* Setting the sender side codec and initiaiting the desired codec given the VideoCodec
|
||||
* struct.
|
||||
* Return Value: 0 if the codec and the settings are supported, otherwise
|
||||
*/
|
||||
WebRtc_Word32 RegisterSendCodec(const VideoCodec* sendCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize);
|
||||
/**
|
||||
* Get current send side codec. Relevant for internal codecs only.
|
||||
*/
|
||||
WebRtc_Word32 SendCodec(VideoCodec* currentSendCodec) const;
|
||||
/**
|
||||
* Get current send side codec type. Relevant for internal codecs only.
|
||||
*/
|
||||
VideoCodecType SendCodec() const;
|
||||
/**
|
||||
* Register external encoder - current assumption - if one is registered then it will also
|
||||
* be used, and therefore it is also initialized
|
||||
* Return value: A pointer to the encoder on success, or null, in case of an error.
|
||||
*/
|
||||
WebRtc_Word32 DeRegisterExternalEncoder(WebRtc_UWord8 payloadType, bool& wasSendCodec);
|
||||
WebRtc_Word32 RegisterExternalEncoder(VideoEncoder* externalEncoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalSource);
|
||||
/**
|
||||
* Returns a encoder given a payloadname - to be used with internal encoders only.
|
||||
* Special cases:
|
||||
* Encoder exists - If payload matches, returns existing one, otherwise,
|
||||
* deletes existing one and creates new one.
|
||||
* No match found / Error - returns NULL.
|
||||
*/
|
||||
VCMGenericEncoder* SetEncoder(const VideoCodec* settings,
|
||||
VCMEncodedFrameCallback* VCMencodedFrameCallback);
|
||||
|
||||
WebRtc_Word32 SetPeriodicKeyFrames(bool enable);
|
||||
|
||||
bool InternalSource() const;
|
||||
|
||||
/*
|
||||
* Receiver Side
|
||||
*/
|
||||
WebRtc_Word32 ResetReceiver();
|
||||
/**
|
||||
* Register external decoder/render object
|
||||
*/
|
||||
WebRtc_Word32 DeRegisterExternalDecoder(WebRtc_UWord8 payloadType);
|
||||
WebRtc_Word32 RegisterExternalDecoder(VideoDecoder* externalDecoder,
|
||||
WebRtc_UWord8 payloadType,
|
||||
bool internalRenderTiming);
|
||||
|
||||
bool DecoderRegistered() const;
|
||||
/**
|
||||
* Register recieve codec
|
||||
*/
|
||||
WebRtc_Word32 RegisterReceiveCodec(const VideoCodec* receiveCodec,
|
||||
WebRtc_UWord32 numberOfCores,
|
||||
bool requireKeyFrame);
|
||||
WebRtc_Word32 DeRegisterReceiveCodec(WebRtc_UWord8 payloadType);
|
||||
/**
|
||||
* Get current receive side codec. Relevant for internal codecs only.
|
||||
*/
|
||||
WebRtc_Word32 ReceiveCodec(VideoCodec* currentReceiveCodec) const;
|
||||
/**
|
||||
* Get current receive side codec type. Relevant for internal codecs only.
|
||||
*/
|
||||
VideoCodecType ReceiveCodec() const;
|
||||
/**
|
||||
* Returns a decoder given which matches a payload type.
|
||||
* Special cases:
|
||||
* Decoder exists - If payload matches, returns existing one, otherwise, deletes
|
||||
* existing one, and creates new one.
|
||||
* No match found / Error - returns NULL.
|
||||
*/
|
||||
VCMGenericDecoder* SetDecoder(WebRtc_UWord8 payloadType, VCMDecodedFrameCallback& callback);
|
||||
|
||||
VCMGenericDecoder* CreateAndInitDecoder(WebRtc_UWord8 payloadType,
|
||||
VideoCodec& newCodec,
|
||||
bool &external) const;
|
||||
|
||||
VCMGenericDecoder* CreateDecoderCopy() const;
|
||||
|
||||
void ReleaseDecoder(VCMGenericDecoder* decoder) const;
|
||||
|
||||
void CopyDecoder(const VCMGenericDecoder& decoder);
|
||||
|
||||
bool RenderTiming() const;
|
||||
|
||||
WebRtc_Word32 SetCodecConfigParameters(WebRtc_UWord8 payloadType,
|
||||
const WebRtc_UWord8* buffer,
|
||||
WebRtc_Word32 length);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Create an internal Encoder given a codec type
|
||||
*/
|
||||
VCMGenericEncoder* CreateEncoder(VideoCodecType type) const;
|
||||
|
||||
void DeleteEncoder();
|
||||
/*
|
||||
* Create an internal Decoder given a codec type
|
||||
*/
|
||||
VCMGenericDecoder* CreateDecoder(VideoCodecType type) const;
|
||||
|
||||
static void SetCodecConfigParameters(VCMGenericDecoder& decoder,
|
||||
const VideoCodec& settings);
|
||||
|
||||
VCMDecoderMapItem* FindDecoderItem(WebRtc_UWord8 payloadType) const;
|
||||
|
||||
VCMExtDecoderMapItem* FindExternalDecoderItem(WebRtc_UWord8 payloadType) const;
|
||||
|
||||
private:
|
||||
WebRtc_Word32 _id;
|
||||
WebRtc_UWord32 _numberOfCores;
|
||||
WebRtc_UWord32 _maxPayloadSize;
|
||||
bool _periodicKeyFrames;
|
||||
bool _currentEncIsExternal;
|
||||
VideoCodec _sendCodec;
|
||||
VideoCodec _receiveCodec;
|
||||
WebRtc_UWord8 _externalPayloadType;
|
||||
VideoEncoder* _externalEncoder;
|
||||
bool _internalSource;
|
||||
VCMGenericEncoder* _ptrEncoder;
|
||||
VCMGenericDecoder* _ptrDecoder;
|
||||
bool _currentDecIsExternal;
|
||||
MapWrapper _decMap;
|
||||
MapWrapper _decExternalMap;
|
||||
|
||||
}; // end of VCMCodecDataBase class definition
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
|
||||
133
modules/video_coding/main/source/codec_timer.cc
Normal file
133
modules/video_coding/main/source/codec_timer.cc
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "codec_timer.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VCMCodecTimer::VCMCodecTimer()
|
||||
:
|
||||
_filteredMax(0),
|
||||
_firstDecodeTime(true),
|
||||
_shortMax(0),
|
||||
_history()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMCodecTimer::StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs)
|
||||
{
|
||||
const WebRtc_Word32 timeDiff = static_cast<WebRtc_Word32>(nowMs - startTimeMs);
|
||||
MaxFilter(timeDiff, nowMs);
|
||||
return timeDiff;
|
||||
}
|
||||
|
||||
void VCMCodecTimer::Reset()
|
||||
{
|
||||
_filteredMax = 0;
|
||||
_firstDecodeTime = true;
|
||||
_shortMax = 0;
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
_history[i].shortMax = 0;
|
||||
_history[i].timeMs = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the max-value filter
|
||||
void VCMCodecTimer::MaxFilter(WebRtc_Word32 decodeTime, WebRtc_Word64 nowMs)
|
||||
{
|
||||
if (!_firstDecodeTime)
|
||||
{
|
||||
UpdateMaxHistory(decodeTime, nowMs);
|
||||
ProcessHistory(nowMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_firstDecodeTime = false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now)
|
||||
{
|
||||
if (_history[0].timeMs >= 0 &&
|
||||
now - _history[0].timeMs < SHORT_FILTER_MS)
|
||||
{
|
||||
if (decodeTime > _shortMax)
|
||||
{
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only add a new value to the history once a second
|
||||
if(_history[0].timeMs == -1)
|
||||
{
|
||||
// First, no shift
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shift
|
||||
for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
|
||||
{
|
||||
_history[i+1].shortMax = _history[i].shortMax;
|
||||
_history[i+1].timeMs = _history[i].timeMs;
|
||||
}
|
||||
}
|
||||
if (_shortMax == 0)
|
||||
{
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
|
||||
_history[0].shortMax = _shortMax;
|
||||
_history[0].timeMs = now;
|
||||
_shortMax = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::ProcessHistory(WebRtc_Word64 nowMs)
|
||||
{
|
||||
_filteredMax = _shortMax;
|
||||
if (_history[0].timeMs == -1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
if (_history[i].timeMs == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
|
||||
{
|
||||
// This sample (and all samples after this) is too old
|
||||
break;
|
||||
}
|
||||
if (_history[i].shortMax > _filteredMax)
|
||||
{
|
||||
// This sample is the largest one this far into the history
|
||||
_filteredMax = _history[i].shortMax;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the maximum observed time within a time window
|
||||
WebRtc_Word32 VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
|
||||
{
|
||||
return _filteredMax;
|
||||
}
|
||||
|
||||
}
|
||||
61
modules/video_coding/main/source/codec_timer.h
Normal file
61
modules/video_coding/main/source/codec_timer.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
|
||||
#define MAX_HISTORY_SIZE 20
|
||||
#define SHORT_FILTER_MS 1000
|
||||
|
||||
class VCMShortMaxSample
|
||||
{
|
||||
public:
|
||||
VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
|
||||
|
||||
WebRtc_Word32 shortMax;
|
||||
WebRtc_Word64 timeMs;
|
||||
};
|
||||
|
||||
class VCMCodecTimer
|
||||
{
|
||||
public:
|
||||
VCMCodecTimer();
|
||||
|
||||
// Updates and returns the max filtered decode time.
|
||||
WebRtc_Word32 StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs);
|
||||
|
||||
// Empty the list of timers.
|
||||
void Reset();
|
||||
|
||||
// Get the required decode time in ms.
|
||||
WebRtc_Word32 RequiredDecodeTimeMs(FrameType frameType) const;
|
||||
|
||||
private:
|
||||
void UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now);
|
||||
void MaxFilter(WebRtc_Word32 newTime, WebRtc_Word64 nowMs);
|
||||
void ProcessHistory(WebRtc_Word64 nowMs);
|
||||
|
||||
WebRtc_Word32 _filteredMax;
|
||||
bool _firstDecodeTime;
|
||||
WebRtc_Word32 _shortMax;
|
||||
VCMShortMaxSample _history[MAX_HISTORY_SIZE];
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
|
||||
225
modules/video_coding/main/source/content_metrics_processing.cc
Normal file
225
modules/video_coding/main/source/content_metrics_processing.cc
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "content_metrics_processing.h"
|
||||
#include "tick_time.h"
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding_defines.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
//////////////////////////////////
|
||||
/// VCMContentMetricsProcessing //
|
||||
//////////////////////////////////
|
||||
|
||||
VCMContentMetricsProcessing::VCMContentMetricsProcessing():
|
||||
_frameRate(0),
|
||||
_recAvgFactor(1 / 150.0f), // matched to 30fps
|
||||
_frameCnt(0),
|
||||
_prevAvgSizeZeroMotion(0),
|
||||
_avgSizeZeroMotion(0),
|
||||
_prevAvgSpatialPredErr(0),
|
||||
_avgSpatialPredErr(0),
|
||||
_frameCntForCC(0),
|
||||
_lastCCpdateTime(0)
|
||||
{
|
||||
_globalRecursiveAvg = new VideoContentMetrics();
|
||||
}
|
||||
|
||||
VCMContentMetricsProcessing::~VCMContentMetricsProcessing()
|
||||
{
|
||||
delete _globalRecursiveAvg;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMContentMetricsProcessing::Reset()
|
||||
{
|
||||
_globalRecursiveAvg->Reset();
|
||||
_frameCnt = 0;
|
||||
_frameRate = 0;
|
||||
//_recAvgFactor = 1 / 150.0f; // matched to 30 fps
|
||||
_prevAvgSizeZeroMotion = 0;
|
||||
_avgSizeZeroMotion = 0;
|
||||
_prevAvgSpatialPredErr = 0;
|
||||
_avgSpatialPredErr = 0;
|
||||
_frameCntForCC = 0;
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
void
|
||||
VCMContentMetricsProcessing::UpdateFrameRate(WebRtc_UWord32 frameRate)
|
||||
{
|
||||
_frameRate = frameRate;
|
||||
//Update recursive avg factor
|
||||
_recAvgFactor = (float) 1000 / ((float)(_frameRate * kQmMinIntervalMs));
|
||||
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMContentMetricsProcessing::UpdateContentData(const VideoContentMetrics *contentMetrics)
|
||||
{
|
||||
if (contentMetrics == NULL)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
return ProcessContent(contentMetrics);
|
||||
|
||||
}
|
||||
|
||||
VideoContentMetrics*
|
||||
VCMContentMetricsProcessing::Data()
|
||||
{
|
||||
if (_frameCnt == 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return _globalRecursiveAvg;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMContentMetricsProcessing::ProcessContent(const VideoContentMetrics *contentMetrics)
|
||||
{
|
||||
// update global metric
|
||||
UpdateGlobalMetric(contentMetrics);
|
||||
|
||||
//Update metrics over local window for content change (CC) detection:
|
||||
//two metrics are used for CC detection: size of zero motion, and spatial prediction error
|
||||
//Not currently used:
|
||||
//UpdateLocalMetricCC(contentMetrics->sizeZeroMotion, contentMetrics->spatialPredErr);
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMContentMetricsProcessing::ContentChangeCheck()
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
// Thresholds for bitrate and content change detection
|
||||
float qmContentChangePercMotion = 0.4f;
|
||||
float qmContentChangePercSpatial = 0.4f;
|
||||
|
||||
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
|
||||
if ( (now - _lastCCpdateTime) < kCcMinIntervalMs)
|
||||
{
|
||||
//keep averaging
|
||||
return result;
|
||||
}
|
||||
else //check for detection and reset
|
||||
{
|
||||
//normalize
|
||||
_avgSizeZeroMotion = _avgSizeZeroMotion / (float)(_frameCntForCC);
|
||||
_prevAvgSpatialPredErr = _prevAvgSpatialPredErr / (float)(_frameCntForCC);
|
||||
|
||||
//check for content change
|
||||
float diffMotion = fabs(_avgSizeZeroMotion - _prevAvgSizeZeroMotion);
|
||||
float diffSpatial = fabs(_avgSpatialPredErr -_prevAvgSpatialPredErr);
|
||||
if ((diffMotion > (_avgSizeZeroMotion * qmContentChangePercMotion)) ||
|
||||
(diffSpatial > (_prevAvgSpatialPredErr * qmContentChangePercSpatial)))
|
||||
{
|
||||
result = true;
|
||||
}
|
||||
|
||||
//copy to previous
|
||||
_prevAvgSizeZeroMotion = _avgSizeZeroMotion;
|
||||
_prevAvgSpatialPredErr = _avgSpatialPredErr;
|
||||
|
||||
//reset
|
||||
_avgSizeZeroMotion = 0.;
|
||||
_avgSpatialPredErr = 0.;
|
||||
_frameCntForCC = 0;
|
||||
|
||||
_lastCCpdateTime = now;
|
||||
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//update metrics for content change detection: update is uniform average over soem time window
|
||||
void VCMContentMetricsProcessing::UpdateLocalMetricCC(float motionVal, float spatialVal)
|
||||
{
|
||||
|
||||
_frameCntForCC += 1;
|
||||
_avgSizeZeroMotion += motionVal;
|
||||
_avgSpatialPredErr += spatialVal;
|
||||
|
||||
return;
|
||||
|
||||
}
|
||||
void
|
||||
VCMContentMetricsProcessing::UpdateGlobalMetric(const VideoContentMetrics *contentMetrics)
|
||||
{
|
||||
|
||||
// Threshold for size of zero motion cluster: for updating 3 metrics:
|
||||
// motion magnitude, cluster distortion, and horizontalness
|
||||
float nonZeroMvThr = 0.1f;
|
||||
|
||||
// first zero and one: take value as is (no motion search in frame zero).
|
||||
float tmpRecAvgFactor = _recAvgFactor;
|
||||
if (_frameCnt < 1)
|
||||
{
|
||||
_recAvgFactor = 1;
|
||||
}
|
||||
|
||||
_globalRecursiveAvg->motionPredErr = (1 - _recAvgFactor) * _globalRecursiveAvg->motionPredErr +
|
||||
_recAvgFactor * contentMetrics->motionPredErr;
|
||||
|
||||
_globalRecursiveAvg->sizeZeroMotion = (1 - _recAvgFactor) * _globalRecursiveAvg->sizeZeroMotion +
|
||||
_recAvgFactor * contentMetrics->sizeZeroMotion;
|
||||
|
||||
_globalRecursiveAvg->spatialPredErr = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErr +
|
||||
_recAvgFactor * contentMetrics->spatialPredErr;
|
||||
|
||||
_globalRecursiveAvg->spatialPredErrH = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErrH +
|
||||
_recAvgFactor * contentMetrics->spatialPredErrH;
|
||||
|
||||
_globalRecursiveAvg->spatialPredErrV = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErrV +
|
||||
_recAvgFactor * contentMetrics->spatialPredErrV;
|
||||
|
||||
//motionMag metric is derived from NFD (normalized frame difference)
|
||||
if (kNfdMetric == 1)
|
||||
{
|
||||
_globalRecursiveAvg->motionMagnitudeNZ = (1 - _recAvgFactor) * _globalRecursiveAvg->motionMagnitudeNZ +
|
||||
_recAvgFactor * contentMetrics->motionMagnitudeNZ;
|
||||
}
|
||||
|
||||
if (contentMetrics->sizeZeroMotion > nonZeroMvThr)
|
||||
{
|
||||
_globalRecursiveAvg->motionClusterDistortion = (1 - _recAvgFactor) * _globalRecursiveAvg->motionClusterDistortion +
|
||||
_recAvgFactor *contentMetrics->motionClusterDistortion;
|
||||
|
||||
_globalRecursiveAvg->motionHorizontalness = (1 - _recAvgFactor) * _globalRecursiveAvg->motionHorizontalness +
|
||||
_recAvgFactor * contentMetrics->motionHorizontalness;
|
||||
|
||||
//motionMag metric is derived from motion vectors
|
||||
if (kNfdMetric == 0)
|
||||
{
|
||||
_globalRecursiveAvg->motionMagnitudeNZ = (1 - _recAvgFactor) * _globalRecursiveAvg->motionMagnitudeNZ +
|
||||
_recAvgFactor * contentMetrics->motionMagnitudeNZ;
|
||||
}
|
||||
}
|
||||
|
||||
// update native values:
|
||||
_globalRecursiveAvg->nativeHeight = contentMetrics->nativeHeight;
|
||||
_globalRecursiveAvg->nativeWidth = contentMetrics->nativeWidth;
|
||||
_globalRecursiveAvg->nativeFrameRate = contentMetrics->nativeFrameRate;
|
||||
|
||||
if (_frameCnt < 1)
|
||||
{
|
||||
_recAvgFactor = tmpRecAvgFactor;
|
||||
}
|
||||
_frameCnt++;
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
struct VideoContentMetrics;
|
||||
|
||||
// QM interval time
|
||||
enum { kQmMinIntervalMs = 10000 };
|
||||
enum { kCcMinIntervalMs = 5000 };
|
||||
|
||||
//Flag for NFD metric vs motion metric
|
||||
enum { kNfdMetric = 1 };
|
||||
|
||||
/**********************************/
|
||||
/* Content Metrics Processing */
|
||||
/**********************************/
|
||||
class VCMContentMetricsProcessing
|
||||
{
|
||||
public:
|
||||
VCMContentMetricsProcessing();
|
||||
~VCMContentMetricsProcessing();
|
||||
|
||||
// Update class with latest metrics
|
||||
WebRtc_Word32 UpdateContentData(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Check for content change detection
|
||||
bool ContentChangeCheck();
|
||||
|
||||
//Initialize to
|
||||
WebRtc_Word32 Reset();
|
||||
|
||||
// Inform class of current frame rate
|
||||
void UpdateFrameRate(WebRtc_UWord32 frameRate);
|
||||
|
||||
// Get working (avg) value
|
||||
VideoContentMetrics* Data();
|
||||
private:
|
||||
|
||||
// Compute working avg
|
||||
WebRtc_UWord32 ProcessContent(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Computation of global metric
|
||||
void UpdateGlobalMetric(const VideoContentMetrics *contentMetrics);
|
||||
|
||||
// Compute local average of certain metrics for content change detection
|
||||
void UpdateLocalMetricCC(float motionVal, float spatialVal);
|
||||
|
||||
VideoContentMetrics* _globalRecursiveAvg;
|
||||
WebRtc_UWord32 _frameRate;
|
||||
float _recAvgFactor;
|
||||
WebRtc_UWord32 _frameCnt;
|
||||
|
||||
float _prevAvgSizeZeroMotion;
|
||||
float _avgSizeZeroMotion;
|
||||
float _prevAvgSpatialPredErr;
|
||||
float _avgSpatialPredErr;
|
||||
WebRtc_UWord32 _frameCntForCC;
|
||||
WebRtc_UWord64 _lastCCpdateTime;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
|
||||
201
modules/video_coding/main/source/encoded_frame.cc
Normal file
201
modules/video_coding/main/source/encoded_frame.cc
Normal file
@ -0,0 +1,201 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "encoded_frame.h"
|
||||
#include "generic_encoder.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "video_coding_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame()
|
||||
:
|
||||
webrtc::EncodedImage(),
|
||||
_renderTimeMs(-1),
|
||||
_payloadType(0),
|
||||
_missingFrame(false),
|
||||
_codecSpecificInfo(NULL),
|
||||
_codecSpecificInfoLength(0),
|
||||
_codec(kVideoCodecUnknown)
|
||||
{
|
||||
}
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
|
||||
:
|
||||
webrtc::EncodedImage(rhs),
|
||||
_renderTimeMs(-1),
|
||||
_payloadType(0),
|
||||
_missingFrame(false),
|
||||
_codecSpecificInfo(NULL),
|
||||
_codecSpecificInfoLength(0),
|
||||
_codec(kVideoCodecUnknown)
|
||||
{
|
||||
_buffer = NULL;
|
||||
_size = NULL;
|
||||
_length = NULL;
|
||||
if (rhs._buffer != NULL)
|
||||
{
|
||||
VerifyAndAllocate(rhs._length);
|
||||
memcpy(_buffer, rhs._buffer, rhs._length);
|
||||
}
|
||||
}
|
||||
|
||||
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
|
||||
:
|
||||
webrtc::EncodedImage(rhs),
|
||||
_renderTimeMs(rhs._renderTimeMs),
|
||||
_payloadType(rhs._payloadType),
|
||||
_missingFrame(rhs._missingFrame),
|
||||
_codecSpecificInfo(NULL),
|
||||
_codecSpecificInfoLength(0),
|
||||
_codec(rhs._codec)
|
||||
{
|
||||
_buffer = NULL;
|
||||
_size = NULL;
|
||||
_length = NULL;
|
||||
if (rhs._buffer != NULL)
|
||||
{
|
||||
VerifyAndAllocate(rhs._size);
|
||||
memcpy(_buffer, rhs._buffer, rhs._length);
|
||||
}
|
||||
}
|
||||
|
||||
VCMEncodedFrame::~VCMEncodedFrame()
|
||||
{
|
||||
Free();
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::Free()
|
||||
{
|
||||
Reset();
|
||||
if (_buffer != NULL)
|
||||
{
|
||||
delete [] _buffer;
|
||||
_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::Reset()
|
||||
{
|
||||
_renderTimeMs = -1;
|
||||
_timeStamp = 0;
|
||||
_payloadType = 0;
|
||||
_codecSpecificInfo = NULL;
|
||||
_codecSpecificInfoLength = 0;
|
||||
_frameType = kDeltaFrame;
|
||||
_encodedWidth = 0;
|
||||
_encodedHeight = 0;
|
||||
_completeFrame = false;
|
||||
_missingFrame = false;
|
||||
_length = 0;
|
||||
_codec = kVideoCodecUnknown;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrame::Store(VCMFrameStorageCallback& storeCallback) const
|
||||
{
|
||||
EncodedVideoData frameToStore;
|
||||
frameToStore.codec = _codec;
|
||||
if (_buffer != NULL)
|
||||
{
|
||||
frameToStore.VerifyAndAllocate(_length);
|
||||
memcpy(frameToStore.payloadData, _buffer, _length);
|
||||
frameToStore.payloadSize = _length;
|
||||
}
|
||||
frameToStore.completeFrame = _completeFrame;
|
||||
frameToStore.encodedWidth = _encodedWidth;
|
||||
frameToStore.encodedHeight = _encodedHeight;
|
||||
frameToStore.frameType = ConvertFrameType(_frameType);
|
||||
frameToStore.missingFrame = _missingFrame;
|
||||
frameToStore.payloadType = _payloadType;
|
||||
frameToStore.renderTimeMs = _renderTimeMs;
|
||||
frameToStore.timeStamp = _timeStamp;
|
||||
storeCallback.StoreReceivedFrame(frameToStore);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrame::VerifyAndAllocate(const WebRtc_UWord32 minimumSize)
|
||||
{
|
||||
if(minimumSize > _size)
|
||||
{
|
||||
// create buffer of sufficient size
|
||||
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[minimumSize];
|
||||
if (newBuffer == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
if(_buffer)
|
||||
{
|
||||
// copy old data
|
||||
memcpy(newBuffer, _buffer, _size);
|
||||
delete [] _buffer;
|
||||
}
|
||||
_buffer = newBuffer;
|
||||
_size = minimumSize;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
|
||||
{
|
||||
switch(frameType)
|
||||
{
|
||||
case kKeyFrame:
|
||||
{
|
||||
return kVideoFrameKey;
|
||||
}
|
||||
case kDeltaFrame:
|
||||
{
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
case kGoldenFrame:
|
||||
{
|
||||
return kVideoFrameGolden;
|
||||
}
|
||||
case kAltRefFrame:
|
||||
{
|
||||
return kVideoFrameAltRef;
|
||||
}
|
||||
default:
|
||||
{
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frameType)
|
||||
{
|
||||
switch (frameType)
|
||||
{
|
||||
case kVideoFrameKey:
|
||||
{
|
||||
return kKeyFrame;
|
||||
}
|
||||
case kVideoFrameDelta:
|
||||
{
|
||||
return kDeltaFrame;
|
||||
}
|
||||
case kVideoFrameGolden:
|
||||
{
|
||||
return kGoldenFrame;
|
||||
}
|
||||
case kVideoFrameAltRef:
|
||||
{
|
||||
return kAltRefFrame;
|
||||
}
|
||||
default:
|
||||
{
|
||||
return kDeltaFrame;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
112
modules/video_coding/main/source/encoded_frame.h
Normal file
112
modules/video_coding/main/source/encoded_frame.h
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
|
||||
#include "module_common_types.h"
|
||||
#include "common_types.h"
|
||||
#include "video_coding_defines.h"
|
||||
#include "video_image.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMEncodedFrame : protected EncodedImage
|
||||
{
|
||||
public:
|
||||
VCMEncodedFrame();
|
||||
VCMEncodedFrame(const webrtc::EncodedImage& rhs);
|
||||
VCMEncodedFrame(const VCMEncodedFrame& rhs);
|
||||
|
||||
~VCMEncodedFrame();
|
||||
/**
|
||||
* Delete VideoFrame and resets members to zero
|
||||
*/
|
||||
void Free();
|
||||
/**
|
||||
* Set render time in milliseconds
|
||||
*/
|
||||
void SetRenderTime(const WebRtc_Word64 renderTimeMs) {_renderTimeMs = renderTimeMs;}
|
||||
|
||||
/**
|
||||
* Set the encoded frame size
|
||||
*/
|
||||
void SetEncodedSize(WebRtc_UWord32 width, WebRtc_UWord32 height)
|
||||
{ _encodedWidth = width; _encodedHeight = height; }
|
||||
/**
|
||||
* Get the encoded image
|
||||
*/
|
||||
const webrtc::EncodedImage& EncodedImage() const
|
||||
{ return static_cast<const webrtc::EncodedImage&>(*this); }
|
||||
/**
|
||||
* Get pointer to frame buffer
|
||||
*/
|
||||
const WebRtc_UWord8* Buffer() const {return _buffer;}
|
||||
/**
|
||||
* Get frame length
|
||||
*/
|
||||
WebRtc_UWord32 Length() const {return _length;}
|
||||
/**
|
||||
* Get frame timestamp (90kHz)
|
||||
*/
|
||||
WebRtc_UWord32 TimeStamp() const {return _timeStamp;}
|
||||
/**
|
||||
* Get render time in milliseconds
|
||||
*/
|
||||
WebRtc_Word64 RenderTimeMs() const {return _renderTimeMs;}
|
||||
/**
|
||||
* Get frame type
|
||||
*/
|
||||
webrtc::FrameType FrameType() const {return ConvertFrameType(_frameType);}
|
||||
/**
|
||||
* True if this frame is complete, false otherwise
|
||||
*/
|
||||
bool Complete() const { return _completeFrame; }
|
||||
/**
|
||||
* True if there's a frame missing before this frame
|
||||
*/
|
||||
bool MissingFrame() const { return _missingFrame; }
|
||||
/**
|
||||
* Payload type of the encoded payload
|
||||
*/
|
||||
WebRtc_UWord8 PayloadType() const { return _payloadType; }
|
||||
/**
|
||||
* Get codec specific info
|
||||
*/
|
||||
const void* CodecSpecificInfo() const {return _codecSpecificInfo;}
|
||||
|
||||
WebRtc_Word32 Store(VCMFrameStorageCallback& storeCallback) const;
|
||||
|
||||
static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
|
||||
static VideoFrameType ConvertFrameType(webrtc::FrameType frameType);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Verifies that current allocated buffer size is larger than or equal to the input size.
|
||||
* If the current buffer size is smaller, a new allocation is made and the old buffer data
|
||||
* is copied to the new buffer.
|
||||
* Buffer size is updated to minimumSize.
|
||||
*/
|
||||
WebRtc_Word32 VerifyAndAllocate(const WebRtc_UWord32 minimumSize);
|
||||
|
||||
void Reset();
|
||||
|
||||
WebRtc_Word64 _renderTimeMs;
|
||||
WebRtc_UWord8 _payloadType;
|
||||
bool _missingFrame;
|
||||
void* _codecSpecificInfo;
|
||||
WebRtc_UWord32 _codecSpecificInfoLength;
|
||||
webrtc::VideoCodecType _codec;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
|
||||
38728
modules/video_coding/main/source/er_tables_xor.h
Normal file
38728
modules/video_coding/main/source/er_tables_xor.h
Normal file
File diff suppressed because it is too large
Load Diff
63
modules/video_coding/main/source/event.h
Normal file
63
modules/video_coding/main/source/event.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
|
||||
#include "event_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
//#define EVENT_DEBUG
|
||||
|
||||
class VCMEvent : public EventWrapper
|
||||
{
|
||||
public:
|
||||
VCMEvent() : _event(*EventWrapper::Create()) {};
|
||||
|
||||
virtual ~VCMEvent() { delete &_event; };
|
||||
|
||||
/**
|
||||
* Release waiting threads
|
||||
*/
|
||||
bool Set() { return _event.Set(); };
|
||||
|
||||
bool Reset() { return _event.Reset(); };
|
||||
|
||||
/**
|
||||
* Wait for this event
|
||||
*/
|
||||
EventTypeWrapper Wait(unsigned long maxTime)
|
||||
{
|
||||
#ifdef EVENT_DEBUG
|
||||
return kEventTimeout;
|
||||
#else
|
||||
return _event.Wait(maxTime);
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* Start a timer
|
||||
*/
|
||||
bool StartTimer(bool periodic, unsigned long time)
|
||||
{ return _event.StartTimer(periodic, time); };
|
||||
/**
|
||||
* Stop the timer
|
||||
*/
|
||||
bool StopTimer() { return _event.StopTimer(); };
|
||||
|
||||
private:
|
||||
EventWrapper& _event;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
|
||||
60
modules/video_coding/main/source/exp_filter.cc
Normal file
60
modules/video_coding/main/source/exp_filter.cc
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "exp_filter.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
void
|
||||
VCMExpFilter::Reset(float alpha)
|
||||
{
|
||||
_alpha = alpha;
|
||||
_filtered = -1.0;
|
||||
}
|
||||
|
||||
float
|
||||
VCMExpFilter::Apply(float exp, float sample)
|
||||
{
|
||||
if (_filtered == -1.0)
|
||||
{
|
||||
// Initialize filtered bit rates
|
||||
_filtered = sample;
|
||||
}
|
||||
else if (exp == 1.0)
|
||||
{
|
||||
_filtered = _alpha * _filtered + (1 - _alpha) * sample;
|
||||
}
|
||||
else
|
||||
{
|
||||
float alpha = pow(_alpha, exp);
|
||||
_filtered = alpha * _filtered + (1 - alpha) * sample;
|
||||
}
|
||||
if (_max != -1 && _filtered > _max)
|
||||
{
|
||||
_filtered = _max;
|
||||
}
|
||||
return _filtered;
|
||||
}
|
||||
|
||||
void
|
||||
VCMExpFilter::UpdateBase(float alpha)
|
||||
{
|
||||
_alpha = alpha;
|
||||
}
|
||||
|
||||
float
|
||||
VCMExpFilter::Value() const
|
||||
{
|
||||
return _filtered;
|
||||
}
|
||||
|
||||
}
|
||||
58
modules/video_coding/main/source/exp_filter.h
Normal file
58
modules/video_coding/main/source/exp_filter.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
/**********************/
|
||||
/* ExpFilter class */
|
||||
/**********************/
|
||||
|
||||
class VCMExpFilter
|
||||
{
|
||||
public:
|
||||
VCMExpFilter(float alpha, float max = -1.0) : _alpha(alpha), _filtered(-1.0), _max(max) {}
|
||||
|
||||
// Resets the filter to its initial state, and resets alpha to the given value
|
||||
//
|
||||
// Input:
|
||||
// - alpha : the new value of the filter factor base.
|
||||
void Reset(float alpha);
|
||||
|
||||
// Applies the filter with the given exponent on the provided sample
|
||||
//
|
||||
// Input:
|
||||
// - exp : Exponent T in y(k) = alpha^T * y(k-1) + (1 - alpha^T) * x(k)
|
||||
// - sample : x(k) in the above filter equation
|
||||
float Apply(float exp, float sample);
|
||||
|
||||
// Return current filtered value: y(k)
|
||||
//
|
||||
// Return value : The current filter output
|
||||
float Value() const;
|
||||
|
||||
// Change the filter factor base
|
||||
//
|
||||
// Input:
|
||||
// - alpha : The new filter factor base.
|
||||
void UpdateBase(float alpha);
|
||||
|
||||
private:
|
||||
float _alpha; // Filter factor base
|
||||
float _filtered; // Current filter output
|
||||
const float _max;
|
||||
}; // end of ExpFilter class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
|
||||
6478
modules/video_coding/main/source/fec_tables_xor.h
Normal file
6478
modules/video_coding/main/source/fec_tables_xor.h
Normal file
File diff suppressed because it is too large
Load Diff
370
modules/video_coding/main/source/frame_buffer.cc
Normal file
370
modules/video_coding/main/source/frame_buffer.cc
Normal file
@ -0,0 +1,370 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "../../../../engine_configurations.h"
|
||||
#include "frame_buffer.h"
|
||||
#include "packet.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <string.h>
|
||||
|
||||
#if defined(_WIN32)
|
||||
// VS 2005: Don't warn for default initialized arrays. See help for more info.
|
||||
#pragma warning(disable:4351)
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Constructor
|
||||
VCMFrameBuffer::VCMFrameBuffer() :
|
||||
_state(kStateFree),
|
||||
_frameCounted(false),
|
||||
_nackCount(0),
|
||||
_latestPacketTimeMs(-1)
|
||||
{
|
||||
}
|
||||
|
||||
// Destructor
|
||||
VCMFrameBuffer::~VCMFrameBuffer()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMFrameBuffer::VCMFrameBuffer(VCMFrameBuffer& rhs)
|
||||
:
|
||||
VCMEncodedFrame(rhs),
|
||||
_state(rhs._state),
|
||||
_frameCounted(rhs._frameCounted),
|
||||
_sessionInfo(),
|
||||
_nackCount(rhs._nackCount),
|
||||
_latestPacketTimeMs(rhs._latestPacketTimeMs)
|
||||
{
|
||||
_sessionInfo = rhs._sessionInfo;
|
||||
}
|
||||
|
||||
webrtc::FrameType
|
||||
VCMFrameBuffer::FrameType() const
|
||||
{
|
||||
return _sessionInfo.FrameType();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::SetPreviousFrameLoss()
|
||||
{
|
||||
_sessionInfo.SetPreviousFrameLoss();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::GetLowSeqNum()
|
||||
{
|
||||
return _sessionInfo.GetLowSeqNum();
|
||||
}
|
||||
|
||||
// Get highest sequence number for complete sessions
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::GetHighSeqNumComplete()
|
||||
{
|
||||
if (_sessionInfo.IsSessionComplete())
|
||||
{
|
||||
return _sessionInfo.GetHighSeqNum();
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::GetHighSeqNum()
|
||||
{
|
||||
return _sessionInfo.GetHighSeqNum();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::IsSessionComplete()
|
||||
{
|
||||
return _sessionInfo.IsSessionComplete();
|
||||
}
|
||||
|
||||
// Insert packet
|
||||
VCMFrameBufferEnum
|
||||
VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs)
|
||||
{
|
||||
if (_state == kStateDecoding)
|
||||
{
|
||||
// Do not insert packet
|
||||
return kIncomplete;
|
||||
}
|
||||
|
||||
// Sanity to check if the frame has been freed. (Too old for example)
|
||||
if(_state == kStateFree)
|
||||
{
|
||||
return kStateError;
|
||||
}
|
||||
|
||||
// is this packet part of this frame
|
||||
if (TimeStamp() && (TimeStamp() != packet.timestamp))
|
||||
{
|
||||
return kTimeStampError;
|
||||
}
|
||||
|
||||
// sanity checks
|
||||
if (_size + packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0) >
|
||||
kMaxJBFrameSizeBytes)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if (NULL == packet.dataPtr && packet.sizeBytes > 0)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if(!_sessionInfo.HaveStartSeqNumber())
|
||||
{
|
||||
_sessionInfo.SetStartSeqNumber(packet.seqNum);
|
||||
}
|
||||
if (packet.dataPtr != NULL)
|
||||
{
|
||||
_payloadType = packet.payloadType;
|
||||
}
|
||||
|
||||
if (kStateEmpty == _state)
|
||||
{
|
||||
// This is the first packet inserted into this frame,
|
||||
// store some info and set some initial values.
|
||||
_timeStamp = packet.timestamp;
|
||||
_codec = packet.codec;
|
||||
SetState(kStateIncomplete);
|
||||
}
|
||||
|
||||
WebRtc_UWord32 requiredSizeBytes = Length() + packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0);
|
||||
if (requiredSizeBytes >= _size)
|
||||
{
|
||||
const WebRtc_UWord32 increments = requiredSizeBytes / kBufferIncStepSizeBytes +
|
||||
(requiredSizeBytes % kBufferIncStepSizeBytes > 0);
|
||||
const WebRtc_UWord32 newSize = _size + increments * kBufferIncStepSizeBytes;
|
||||
if (newSize > kMaxJBFrameSizeBytes)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
if (VerifyAndAllocate(newSize) == -1)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
}
|
||||
WebRtc_Word64 retVal = _sessionInfo.InsertPacket(packet, _buffer);
|
||||
if(retVal == -1)
|
||||
{
|
||||
return kSizeError;
|
||||
}
|
||||
else if (retVal == -2)
|
||||
{
|
||||
return kDuplicatePacket;
|
||||
}
|
||||
// update length
|
||||
_length = Length() + static_cast<WebRtc_UWord32>(retVal);
|
||||
|
||||
_latestPacketTimeMs = timeInMs;
|
||||
|
||||
if(_sessionInfo.IsSessionComplete())
|
||||
{
|
||||
return kCompleteSession;
|
||||
}
|
||||
else
|
||||
{
|
||||
// this layer is not complete
|
||||
if (_state == kStateComplete)
|
||||
{
|
||||
// we already have a complete layer
|
||||
// wait for all independent layers belonging to the same frame
|
||||
_state = kStateIncomplete;
|
||||
}
|
||||
}
|
||||
return kIncomplete;
|
||||
}
|
||||
|
||||
WebRtc_Word64 VCMFrameBuffer::LatestPacketTimeMs()
|
||||
{
|
||||
return _latestPacketTimeMs;
|
||||
}
|
||||
|
||||
// Zero out all entries in list up to and including the (first) entry equal to _lowSeqNum
|
||||
WebRtc_Word32 VCMFrameBuffer::ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num)
|
||||
{
|
||||
if(_sessionInfo.ZeroOutSeqNum(list, num) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void VCMFrameBuffer::IncrementNackCount()
|
||||
{
|
||||
_nackCount++;
|
||||
}
|
||||
|
||||
WebRtc_Word16 VCMFrameBuffer::GetNackCount() const
|
||||
{
|
||||
return _nackCount;
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::HaveLastPacket()
|
||||
{
|
||||
return _sessionInfo.HaveLastPacket();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::ForceSetHaveLastPacket()
|
||||
{
|
||||
_sessionInfo.ForceSetHaveLastPacket();
|
||||
return _sessionInfo.IsSessionComplete();
|
||||
}
|
||||
|
||||
void VCMFrameBuffer::Reset()
|
||||
{
|
||||
_length = 0;
|
||||
_timeStamp = 0;
|
||||
|
||||
_sessionInfo.Reset();
|
||||
_frameCounted = false;
|
||||
_payloadType = 0;
|
||||
_nackCount = 0;
|
||||
_latestPacketTimeMs = -1;
|
||||
_state = kStateFree;
|
||||
VCMEncodedFrame::Reset();
|
||||
}
|
||||
|
||||
// Makes sure the session contain a decodable stream.
|
||||
void
|
||||
VCMFrameBuffer::MakeSessionDecodable()
|
||||
{
|
||||
WebRtc_Word32 retVal = _sessionInfo.MakeSessionDecodable(_buffer);
|
||||
// update length
|
||||
_length -= retVal;
|
||||
}
|
||||
|
||||
// Set state of frame
|
||||
void
|
||||
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state)
|
||||
{
|
||||
if(_state == state)
|
||||
{
|
||||
return;
|
||||
}
|
||||
switch (state)
|
||||
{
|
||||
case kStateFree:
|
||||
// Reset everything
|
||||
// We can go to this state from all other states.
|
||||
// The one setting the state to free must ensure
|
||||
// that the frame is removed from the timestamp
|
||||
// ordered frame list in the jb.
|
||||
Reset();
|
||||
break;
|
||||
|
||||
case kStateIncomplete:
|
||||
// we can go to this state from state kStateEmpty
|
||||
assert(_state == kStateEmpty ||
|
||||
_state == kStateDecoding);
|
||||
|
||||
// Do nothing, we received a packet
|
||||
break;
|
||||
|
||||
case kStateComplete:
|
||||
assert(_state == kStateEmpty ||
|
||||
_state == kStateIncomplete);
|
||||
|
||||
break;
|
||||
|
||||
case kStateEmpty:
|
||||
assert(_state == kStateFree);
|
||||
// Do nothing
|
||||
break;
|
||||
|
||||
case kStateDecoding:
|
||||
// we can go to this state from state kStateComplete kStateIncomplete
|
||||
assert(_state == kStateComplete || _state == kStateIncomplete);
|
||||
// Transfer frame information to EncodedFrame and create any codec specific information
|
||||
RestructureFrameInformation();
|
||||
break;
|
||||
|
||||
default:
|
||||
// Should never happen
|
||||
assert(!"FrameBuffer::SetState Incorrect frame buffer state as input");
|
||||
return;
|
||||
}
|
||||
_state = state;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::RestructureFrameInformation()
|
||||
{
|
||||
PrepareForDecode();
|
||||
_frameType = ConvertFrameType(_sessionInfo.FrameType());
|
||||
_completeFrame = _sessionInfo.IsSessionComplete();
|
||||
_missingFrame = _sessionInfo.PreviousFrameLoss();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage)
|
||||
{
|
||||
_frameType = ConvertFrameType(frameFromStorage.frameType);
|
||||
_timeStamp = frameFromStorage.timeStamp;
|
||||
_payloadType = frameFromStorage.payloadType;
|
||||
_encodedWidth = frameFromStorage.encodedWidth;
|
||||
_encodedHeight = frameFromStorage.encodedHeight;
|
||||
_missingFrame = frameFromStorage.missingFrame;
|
||||
_completeFrame = frameFromStorage.completeFrame;
|
||||
_renderTimeMs = frameFromStorage.renderTimeMs;
|
||||
_codec = frameFromStorage.codec;
|
||||
if (VerifyAndAllocate(frameFromStorage.payloadSize) < 0)
|
||||
{
|
||||
return VCM_MEMORY;
|
||||
}
|
||||
memcpy(_buffer, frameFromStorage.payloadData, frameFromStorage.payloadSize);
|
||||
_length = frameFromStorage.payloadSize;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void VCMFrameBuffer::SetCountedFrame(bool frameCounted)
|
||||
{
|
||||
_frameCounted = frameCounted;
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::GetCountedFrame()
|
||||
{
|
||||
return _frameCounted;
|
||||
}
|
||||
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum
|
||||
VCMFrameBuffer::GetState() const
|
||||
{
|
||||
return _state;
|
||||
}
|
||||
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum
|
||||
VCMFrameBuffer::GetState(WebRtc_UWord32& timeStamp) const
|
||||
{
|
||||
timeStamp = TimeStamp();
|
||||
return GetState();
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFrameBuffer::IsRetransmitted()
|
||||
{
|
||||
return _sessionInfo.IsRetransmitted();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameBuffer::PrepareForDecode()
|
||||
{
|
||||
_length = _sessionInfo.PrepareForDecode(_buffer, _codec);
|
||||
}
|
||||
|
||||
}
|
||||
91
modules/video_coding/main/source/frame_buffer.h
Normal file
91
modules/video_coding/main/source/frame_buffer.h
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
#include "encoded_frame.h"
|
||||
#include "frame_list.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "session_info.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMFrameBuffer : public VCMEncodedFrame
|
||||
{
|
||||
public:
|
||||
VCMFrameBuffer();
|
||||
virtual ~VCMFrameBuffer();
|
||||
|
||||
VCMFrameBuffer(VCMFrameBuffer& rhs);
|
||||
|
||||
virtual void Reset();
|
||||
|
||||
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs);
|
||||
|
||||
// State
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum GetState() const;
|
||||
// Get current state and timestamp of frame
|
||||
VCMFrameBufferStateEnum GetState(WebRtc_UWord32& timeStamp) const;
|
||||
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
|
||||
|
||||
bool IsRetransmitted();
|
||||
bool IsSessionComplete();
|
||||
bool HaveLastPacket();
|
||||
bool ForceSetHaveLastPacket();
|
||||
// Makes sure the session contain a decodable stream.
|
||||
void MakeSessionDecodable();
|
||||
|
||||
// Sequence numbers
|
||||
// Get lowest packet sequence number in frame
|
||||
WebRtc_Word32 GetLowSeqNum();
|
||||
// Get highest packet sequence number in frame
|
||||
WebRtc_Word32 GetHighSeqNum();
|
||||
|
||||
// Get highest sequence number of complete session
|
||||
WebRtc_Word32 GetHighSeqNumComplete();
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void SetCountedFrame(bool frameCounted);
|
||||
bool GetCountedFrame();
|
||||
|
||||
// NACK
|
||||
// Zero out all entries in list up to and including the entry equal to _lowSeqNum
|
||||
WebRtc_Word32 ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num);
|
||||
void IncrementNackCount();
|
||||
WebRtc_Word16 GetNackCount() const;
|
||||
|
||||
WebRtc_Word64 LatestPacketTimeMs();
|
||||
|
||||
webrtc::FrameType FrameType() const;
|
||||
void SetPreviousFrameLoss();
|
||||
|
||||
WebRtc_Word32 ExtractFromStorage(const EncodedVideoData& frameFromStorage);
|
||||
|
||||
protected:
|
||||
void RestructureFrameInformation();
|
||||
void PrepareForDecode();
|
||||
|
||||
private:
|
||||
VCMFrameBufferStateEnum _state; // Current state of the frame
|
||||
bool _frameCounted; // If this frame has been counted by JB
|
||||
VCMSessionInfo _sessionInfo;
|
||||
WebRtc_UWord16 _nackCount;
|
||||
WebRtc_Word64 _latestPacketTimeMs;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
|
||||
331
modules/video_coding/main/source/frame_dropper.cc
Normal file
331
modules/video_coding/main/source/frame_dropper.cc
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "frame_dropper.h"
|
||||
#include "internal_defines.h"
|
||||
#include "trace.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
VCMFrameDropper::VCMFrameDropper(WebRtc_Word32 vcmId)
|
||||
:
|
||||
_vcmId(vcmId),
|
||||
_keyFrameSizeAvgKbits(0.9f),
|
||||
_keyFrameRatio(0.99f),
|
||||
_dropRatio(0.9f, 0.96f)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Reset()
|
||||
{
|
||||
_keyFrameRatio.Reset(0.99f);
|
||||
_keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
|
||||
_keyFrameSizeAvgKbits.Reset(0.9f);
|
||||
_keyFrameCount = 0;
|
||||
_accumulator = 0.0f;
|
||||
_accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
|
||||
_targetBitRate = 300.0f;
|
||||
_userFrameRate = 30;
|
||||
_keyFrameSpreadFrames = 0.5f * _userFrameRate;
|
||||
_dropNext = false;
|
||||
_dropRatio.Reset(0.9f);
|
||||
_dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
|
||||
_dropCount = 0;
|
||||
_windowSize = 0.5f;
|
||||
_wasBelowMax = true;
|
||||
_enabled = true;
|
||||
_fastMode = false; // start with normal (non-aggressive) mode
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Enable(bool enable)
|
||||
{
|
||||
_enabled = enable;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
|
||||
if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
|
||||
{
|
||||
_keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
|
||||
_keyFrameRatio.Apply(1.0, 1.0);
|
||||
if (frameSizeKbits > _keyFrameSizeAvgKbits.Value())
|
||||
{
|
||||
// Remove the average key frame size since we
|
||||
// compensate for key frames when adding delta
|
||||
// frames.
|
||||
frameSizeKbits -= _keyFrameSizeAvgKbits.Value();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shouldn't be negative, so zero is the lower bound.
|
||||
frameSizeKbits = 0;
|
||||
}
|
||||
if (_keyFrameRatio.Value() > 1e-5 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
|
||||
{
|
||||
// We are sending key frames more often than our upper bound for
|
||||
// how much we allow the key frame compensation to be spread
|
||||
// out in time. Therefor we must use the key frame ratio rather
|
||||
// than keyFrameSpreadFrames.
|
||||
_keyFrameCount = static_cast<WebRtc_Word32>(1 / _keyFrameRatio.Value() + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Compensate for the key frame the following frames
|
||||
_keyFrameCount = static_cast<WebRtc_Word32>(_keyFrameSpreadFrames + 0.5);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decrease the keyFrameRatio
|
||||
_keyFrameRatio.Apply(1.0, 0.0);
|
||||
}
|
||||
// Change the level of the accumulator (bucket)
|
||||
_accumulator += frameSizeKbits;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::Leak(WebRtc_UWord32 inputFrameRate)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (inputFrameRate < 1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (_targetBitRate < 0.0f)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_keyFrameSpreadFrames = 0.5f * inputFrameRate;
|
||||
// T is the expected bits per frame (target). If all frames were the same size,
|
||||
// we would get T bits per frame. Notice that T is also weighted to be able to
|
||||
// force a lower frame rate if wanted.
|
||||
float T = _targetBitRate / inputFrameRate;
|
||||
if (_keyFrameCount > 0)
|
||||
{
|
||||
// Perform the key frame compensation
|
||||
if (_keyFrameRatio.Value() > 0 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
|
||||
{
|
||||
T -= _keyFrameSizeAvgKbits.Value() * _keyFrameRatio.Value();
|
||||
}
|
||||
else
|
||||
{
|
||||
T -= _keyFrameSizeAvgKbits.Value() / _keyFrameSpreadFrames;
|
||||
}
|
||||
_keyFrameCount--;
|
||||
}
|
||||
_accumulator -= T;
|
||||
UpdateRatio();
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::UpdateNack(WebRtc_UWord32 nackBytes)
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::FillBucket(float inKbits, float outKbits)
|
||||
{
|
||||
_accumulator += (inKbits - outKbits);
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::UpdateRatio()
|
||||
{
|
||||
if (_accumulator > 1.3f * _accumulatorMax)
|
||||
{
|
||||
// Too far above accumulator max, react faster
|
||||
_dropRatio.UpdateBase(0.8f);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Go back to normal reaction
|
||||
_dropRatio.UpdateBase(0.9f);
|
||||
}
|
||||
if (_accumulator > _accumulatorMax)
|
||||
{
|
||||
// We are above accumulator max, and should ideally
|
||||
// drop a frame. Increase the dropRatio and drop
|
||||
// the frame later.
|
||||
if (_wasBelowMax)
|
||||
{
|
||||
_dropNext = true;
|
||||
}
|
||||
if (_fastMode)
|
||||
{
|
||||
// always drop in aggressive mode
|
||||
_dropNext = true;
|
||||
}
|
||||
|
||||
_dropRatio.Apply(1.0f, 1.0f);
|
||||
_dropRatio.UpdateBase(0.9f);
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropRatio.Apply(1.0f, 0.0f);
|
||||
}
|
||||
if (_accumulator < 0.0f)
|
||||
{
|
||||
_accumulator = 0.0f;
|
||||
}
|
||||
_wasBelowMax = _accumulator < _accumulatorMax;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId), "FrameDropper: dropRatio = %f accumulator = %f, accumulatorMax = %f", _dropRatio.Value(), _accumulator, _accumulatorMax);
|
||||
}
|
||||
|
||||
// This function signals when to drop frames to the caller. It makes use of the dropRatio
|
||||
// to smooth out the drops over time.
|
||||
bool
|
||||
VCMFrameDropper::DropFrame()
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (_dropNext)
|
||||
{
|
||||
_dropNext = false;
|
||||
_dropCount = 0;
|
||||
}
|
||||
|
||||
if (_dropRatio.Value() >= 0.5f) // Drops per keep
|
||||
{
|
||||
// limit is the number of frames we should drop between each kept frame
|
||||
// to keep our drop ratio. limit is positive in this case.
|
||||
float denom = 1.0f - _dropRatio.Value();
|
||||
if (denom < 1e-5)
|
||||
{
|
||||
denom = (float)1e-5;
|
||||
}
|
||||
WebRtc_Word32 limit = static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
|
||||
if (_dropCount < 0)
|
||||
{
|
||||
// Reset the _dropCount since it was negative and should be positive.
|
||||
if (_dropRatio.Value() > 0.4f)
|
||||
{
|
||||
_dropCount = -_dropCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
}
|
||||
}
|
||||
if (_dropCount < limit)
|
||||
{
|
||||
// As long we are below the limit we should drop frames.
|
||||
_dropCount++;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only when we reset _dropCount a frame should be kept.
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (_dropRatio.Value() > 0.0f && _dropRatio.Value() < 0.5f) // Keeps per drop
|
||||
{
|
||||
// limit is the number of frames we should keep between each drop
|
||||
// in order to keep the drop ratio. limit is negative in this case,
|
||||
// and the _dropCount is also negative.
|
||||
float denom = _dropRatio.Value();
|
||||
if (denom < 1e-5)
|
||||
{
|
||||
denom = (float)1e-5;
|
||||
}
|
||||
WebRtc_Word32 limit = -static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
|
||||
if (_dropCount > 0)
|
||||
{
|
||||
// Reset the _dropCount since we have a positive
|
||||
// _dropCount, and it should be negative.
|
||||
if (_dropRatio.Value() < 0.6f)
|
||||
{
|
||||
_dropCount = -_dropCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
}
|
||||
}
|
||||
if (_dropCount > limit)
|
||||
{
|
||||
if (_dropCount == 0)
|
||||
{
|
||||
// Drop frames when we reset _dropCount.
|
||||
_dropCount--;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Keep frames as long as we haven't reached limit.
|
||||
_dropCount--;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_dropCount = 0;
|
||||
return false;
|
||||
|
||||
// A simpler version, unfiltered and quicker
|
||||
//bool dropNext = _dropNext;
|
||||
//_dropNext = false;
|
||||
//return dropNext;
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameDropper::SetRates(float bitRate, float userFrameRate)
|
||||
{
|
||||
// Bit rate of -1 means infinite bandwidth.
|
||||
_accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
|
||||
if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
|
||||
{
|
||||
// Rescale the accumulator level if the accumulator max decreases
|
||||
_accumulator = bitRate / _targetBitRate * _accumulator;
|
||||
}
|
||||
_targetBitRate = bitRate;
|
||||
if (userFrameRate > 0.0f)
|
||||
{
|
||||
_userFrameRate = userFrameRate;
|
||||
}
|
||||
}
|
||||
|
||||
float
|
||||
VCMFrameDropper::ActualFrameRate(WebRtc_UWord32 inputFrameRate) const
|
||||
{
|
||||
if (!_enabled)
|
||||
{
|
||||
return static_cast<float>(inputFrameRate);
|
||||
}
|
||||
return inputFrameRate * (1.0f - _dropRatio.Value());
|
||||
}
|
||||
|
||||
}
|
||||
94
modules/video_coding/main/source/frame_dropper.h
Normal file
94
modules/video_coding/main/source/frame_dropper.h
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
|
||||
#include "exp_filter.h"
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
/******************************/
|
||||
/* VCMFrameDropper class */
|
||||
/****************************/
|
||||
// The Frame Dropper implements a variant of the leaky bucket algorithm
|
||||
// for keeping track of when to drop frames to avoid bit rate
|
||||
// over use when the encoder can't keep its bit rate.
|
||||
class VCMFrameDropper
|
||||
{
|
||||
public:
|
||||
VCMFrameDropper(WebRtc_Word32 vcmId = 0);
|
||||
// Resets the FrameDropper to its initial state.
|
||||
// This means that the frameRateWeight is set to its
|
||||
// default value as well.
|
||||
void Reset();
|
||||
|
||||
void Enable(bool enable);
|
||||
// Answers the question if it's time to drop a frame
|
||||
// if we want to reach a given frame rate. Must be
|
||||
// called for every frame.
|
||||
//
|
||||
// Return value : True if we should drop the current frame
|
||||
bool DropFrame();
|
||||
// Updates the FrameDropper with the size of the latest encoded
|
||||
// frame. The FrameDropper calculates a new drop ratio (can be
|
||||
// seen as the probability to drop a frame) and updates its
|
||||
// internal statistics.
|
||||
//
|
||||
// Input:
|
||||
// - frameSizeBytes : The size of the latest frame
|
||||
// returned from the encoder.
|
||||
// - deltaFrame : True if the encoder returned
|
||||
// a key frame.
|
||||
void Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame);
|
||||
|
||||
void Leak(WebRtc_UWord32 inputFrameRate);
|
||||
|
||||
void UpdateNack(WebRtc_UWord32 nackBytes);
|
||||
|
||||
// Sets the target bit rate and the frame rate produced by
|
||||
// the camera.
|
||||
//
|
||||
// Input:
|
||||
// - bitRate : The target bit rate
|
||||
void SetRates(float bitRate, float userFrameRate);
|
||||
|
||||
// Return value : The current average frame rate produced
|
||||
// if the DropFrame() function is used as
|
||||
// instruction of when to drop frames.
|
||||
float ActualFrameRate(WebRtc_UWord32 inputFrameRate) const;
|
||||
|
||||
private:
|
||||
void FillBucket(float inKbits, float outKbits);
|
||||
void UpdateRatio();
|
||||
|
||||
WebRtc_Word32 _vcmId;
|
||||
VCMExpFilter _keyFrameSizeAvgKbits;
|
||||
VCMExpFilter _keyFrameRatio;
|
||||
float _keyFrameSpreadFrames;
|
||||
WebRtc_Word32 _keyFrameCount;
|
||||
float _accumulator;
|
||||
float _accumulatorMax;
|
||||
float _targetBitRate;
|
||||
bool _dropNext;
|
||||
VCMExpFilter _dropRatio;
|
||||
WebRtc_Word32 _dropCount;
|
||||
float _windowSize;
|
||||
float _userFrameRate;
|
||||
bool _wasBelowMax;
|
||||
bool _enabled;
|
||||
bool _fastMode;
|
||||
}; // end of VCMFrameDropper class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
|
||||
113
modules/video_coding/main/source/frame_list.cc
Normal file
113
modules/video_coding/main/source/frame_list.cc
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "frame_list.h"
|
||||
#include "frame_buffer.h"
|
||||
#include "jitter_buffer.h"
|
||||
#include <cstdlib>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMFrameListTimestampOrderAsc::~VCMFrameListTimestampOrderAsc()
|
||||
{
|
||||
Flush();
|
||||
}
|
||||
|
||||
void
|
||||
VCMFrameListTimestampOrderAsc::Flush()
|
||||
{
|
||||
while(Erase(First()) != -1) { }
|
||||
}
|
||||
|
||||
// Inserts frame in timestamp order, with the oldest timestamp first. Takes wrap arounds into account
|
||||
WebRtc_Word32
|
||||
VCMFrameListTimestampOrderAsc::Insert(VCMFrameBuffer* frame)
|
||||
{
|
||||
VCMFrameListItem* item = static_cast<VCMFrameListItem*>(First());
|
||||
VCMFrameListItem* newItem = new VCMFrameListItem(frame);
|
||||
bool inserted = false;
|
||||
if (newItem == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
while (item != NULL)
|
||||
{
|
||||
const WebRtc_UWord32 itemTimestamp = item->GetItem()->TimeStamp();
|
||||
if (VCMJitterBuffer::LatestTimestamp(itemTimestamp, frame->TimeStamp()) == itemTimestamp)
|
||||
{
|
||||
if (InsertBefore(item, newItem) < 0)
|
||||
{
|
||||
delete newItem;
|
||||
return -1;
|
||||
}
|
||||
inserted = true;
|
||||
break;
|
||||
}
|
||||
item = Next(item);
|
||||
}
|
||||
if (!inserted && ListWrapper::Insert(ListWrapper::Last(), newItem) < 0)
|
||||
{
|
||||
delete newItem;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
VCMFrameBuffer*
|
||||
VCMFrameListTimestampOrderAsc::FirstFrame() const
|
||||
{
|
||||
VCMFrameListItem* item = First();
|
||||
if (item != NULL)
|
||||
{
|
||||
return item->GetItem();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
VCMFrameListItem*
|
||||
VCMFrameListTimestampOrderAsc::FindFrameListItem(FindFrameCriteria criteria,
|
||||
const void* compareWith,
|
||||
VCMFrameListItem* startItem) const
|
||||
{
|
||||
if (startItem == NULL)
|
||||
{
|
||||
startItem = First();
|
||||
}
|
||||
if (criteria == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
while (startItem != NULL)
|
||||
{
|
||||
if (criteria(startItem->GetItem(), compareWith))
|
||||
{
|
||||
return startItem;
|
||||
}
|
||||
startItem = Next(startItem);
|
||||
}
|
||||
// No frame found
|
||||
return NULL;
|
||||
}
|
||||
|
||||
VCMFrameBuffer*
|
||||
VCMFrameListTimestampOrderAsc::FindFrame(FindFrameCriteria criteria,
|
||||
const void* compareWith,
|
||||
VCMFrameListItem* startItem) const
|
||||
{
|
||||
const VCMFrameListItem* frameListItem = FindFrameListItem(criteria, compareWith, startItem);
|
||||
if (frameListItem == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return frameListItem->GetItem();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
66
modules/video_coding/main/source/frame_list.h
Normal file
66
modules/video_coding/main/source/frame_list.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_
|
||||
|
||||
#include "list_wrapper.h"
|
||||
#include "typedefs.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMFrameBuffer;
|
||||
|
||||
typedef bool (*FindFrameCriteria)(VCMFrameBuffer*, const void*);
|
||||
|
||||
class VCMFrameListItem : public ListItem
|
||||
{
|
||||
friend class VCMFrameListTimestampOrderAsc;
|
||||
public:
|
||||
VCMFrameListItem(const VCMFrameBuffer* ptr) : ListItem(ptr) {}
|
||||
~VCMFrameListItem() {};
|
||||
|
||||
VCMFrameBuffer* GetItem() const
|
||||
{ return static_cast<VCMFrameBuffer*>(ListItem::GetItem()); }
|
||||
};
|
||||
|
||||
class VCMFrameListTimestampOrderAsc : public ListWrapper
|
||||
{
|
||||
public:
|
||||
VCMFrameListTimestampOrderAsc() : ListWrapper() {};
|
||||
~VCMFrameListTimestampOrderAsc();
|
||||
|
||||
void Flush();
|
||||
|
||||
// Inserts frame in timestamp order, with the oldest timestamp first.
|
||||
// Takes wrap arounds into account.
|
||||
WebRtc_Word32 Insert(VCMFrameBuffer* frame);
|
||||
VCMFrameBuffer* FirstFrame() const;
|
||||
VCMFrameListItem* Next(VCMFrameListItem* item) const
|
||||
{ return static_cast<VCMFrameListItem*>(ListWrapper::Next(item)); }
|
||||
VCMFrameListItem* Previous(VCMFrameListItem* item) const
|
||||
{ return static_cast<VCMFrameListItem*>(ListWrapper::Previous(item)); }
|
||||
VCMFrameListItem* First() const
|
||||
{ return static_cast<VCMFrameListItem*>(ListWrapper::First()); }
|
||||
VCMFrameListItem* Last() const
|
||||
{ return static_cast<VCMFrameListItem*>(ListWrapper::Last()); }
|
||||
VCMFrameListItem* FindFrameListItem(FindFrameCriteria criteria,
|
||||
const void* compareWith = NULL,
|
||||
VCMFrameListItem* startItem = NULL) const;
|
||||
VCMFrameBuffer* FindFrame(FindFrameCriteria criteria,
|
||||
const void* compareWith = NULL,
|
||||
VCMFrameListItem* startItem = NULL) const;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_
|
||||
203
modules/video_coding/main/source/generic_decoder.cc
Normal file
203
modules/video_coding/main/source/generic_decoder.cc
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video_coding.h"
|
||||
#include "trace.h"
|
||||
#include "generic_decoder.h"
|
||||
#include "internal_defines.h"
|
||||
#include "tick_time.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing)
|
||||
:
|
||||
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_receiveCallback(NULL),
|
||||
_timing(timing),
|
||||
_timestampMap(kDecoderFrameMemoryLength)
|
||||
{
|
||||
}
|
||||
|
||||
VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
|
||||
{
|
||||
delete &_critSect;
|
||||
}
|
||||
|
||||
void VCMDecodedFrameCallback::SetUserReceiveCallback(VCMReceiveCallback* receiveCallback)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_receiveCallback = receiveCallback;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Decoded(RawImage& decodedImage)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
VCMFrameInformation* frameInfo = static_cast<VCMFrameInformation*>(_timestampMap.Pop(decodedImage._timeStamp));
|
||||
if (frameInfo == NULL)
|
||||
{
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
WebRtc_Word32 ret = _timing.StopDecodeTimer(decodedImage._timeStamp, frameInfo->decodeStartTimeMs, VCMTickTime::MillisecondTimestamp());
|
||||
|
||||
if (_receiveCallback != NULL)
|
||||
{
|
||||
_frame.Swap(decodedImage._buffer, decodedImage._length, decodedImage._size);
|
||||
_frame.SetWidth(decodedImage._width);
|
||||
_frame.SetHeight(decodedImage._height);
|
||||
_frame.SetTimeStamp(decodedImage._timeStamp);
|
||||
_frame.SetRenderTime(frameInfo->renderTimeMs);
|
||||
// Convert raw image to video frame
|
||||
WebRtc_Word32 callbackReturn = _receiveCallback->FrameToRender(_frame);
|
||||
if (callbackReturn < 0)
|
||||
{
|
||||
return callbackReturn;
|
||||
}
|
||||
}
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (_receiveCallback != NULL)
|
||||
{
|
||||
return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMDecodedFrameCallback::ReceivedDecodedFrame(const WebRtc_UWord64 pictureId)
|
||||
{
|
||||
_lastReceivedPictureID = pictureId;
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord64 VCMDecodedFrameCallback::LastReceivedPictureID() const
|
||||
{
|
||||
return _lastReceivedPictureID;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _timestampMap.Add(timestamp, frameInfo);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMDecodedFrameCallback::Pop(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (_timestampMap.Pop(timestamp) == NULL)
|
||||
{
|
||||
return VCM_GENERAL_ERROR;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id, bool isExternal)
|
||||
:
|
||||
_id(id),
|
||||
_callback(NULL),
|
||||
_frameInfos(),
|
||||
_nextFrameInfoIdx(0),
|
||||
_decoder(decoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_isExternal(isExternal),
|
||||
_requireKeyFrame(false),
|
||||
_keyFrameDecoded(false)
|
||||
{
|
||||
}
|
||||
|
||||
VCMGenericDecoder::~VCMGenericDecoder()
|
||||
{
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::InitDecode(const VideoCodec* settings, WebRtc_Word32 numberOfCores, bool requireKeyFrame)
|
||||
{
|
||||
_requireKeyFrame = requireKeyFrame;
|
||||
_keyFrameDecoded = false;
|
||||
_codecType = settings->codecType;
|
||||
|
||||
return _decoder.InitDecode(settings, numberOfCores);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::Decode(const VCMEncodedFrame& frame)
|
||||
{
|
||||
if (_requireKeyFrame &&
|
||||
!_keyFrameDecoded &&
|
||||
frame.FrameType() != kVideoFrameKey &&
|
||||
frame.FrameType() != kVideoFrameGolden)
|
||||
{
|
||||
// Require key frame is enabled, meaning that one key frame must be decoded
|
||||
// before we can decode delta frames.
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = VCMTickTime::MillisecondTimestamp();
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_id),
|
||||
"Decoding timestamp %u",
|
||||
frame.TimeStamp());
|
||||
|
||||
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
||||
|
||||
WebRtc_Word32 ret = _decoder.Decode(frame.EncodedImage(),
|
||||
frame.MissingFrame(),
|
||||
frame.CodecSpecificInfo(),
|
||||
frame.RenderTimeMs());
|
||||
|
||||
if (ret < WEBRTC_VIDEO_CODEC_OK)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_id), "Decoder error: %d\n", ret);
|
||||
_callback->Pop(frame.TimeStamp());
|
||||
return ret;
|
||||
}
|
||||
// Update the key frame decoded variable so that we know whether or not we've decoded a key frame since reset.
|
||||
_keyFrameDecoded = (frame.FrameType() == kVideoFrameKey || frame.FrameType() == kVideoFrameGolden);
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericDecoder::Release()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::Reset()
|
||||
{
|
||||
_keyFrameDecoded = false;
|
||||
return _decoder.Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::SetCodecConfigParameters(const WebRtc_UWord8* buffer, WebRtc_Word32 size)
|
||||
{
|
||||
return _decoder.SetCodecConfigParameters(buffer, size);
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
|
||||
{
|
||||
_callback = callback;
|
||||
return _decoder.RegisterDecodeCompleteCallback(callback);
|
||||
}
|
||||
|
||||
bool VCMGenericDecoder::External() const
|
||||
{
|
||||
return _isExternal;
|
||||
}
|
||||
|
||||
}
|
||||
120
modules/video_coding/main/source/generic_decoder.h
Normal file
120
modules/video_coding/main/source/generic_decoder.h
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
|
||||
#include "timing.h"
|
||||
#include "timestamp_map.h"
|
||||
#include "video_codec_interface.h"
|
||||
#include "encoded_frame.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMReceiveCallback;
|
||||
|
||||
enum { kDecoderFrameMemoryLength = 10 };
|
||||
|
||||
struct VCMFrameInformation
|
||||
{
|
||||
WebRtc_Word64 renderTimeMs;
|
||||
WebRtc_Word64 decodeStartTimeMs;
|
||||
void* userData;
|
||||
};
|
||||
|
||||
class VCMDecodedFrameCallback : public DecodedImageCallback
|
||||
{
|
||||
public:
|
||||
VCMDecodedFrameCallback(VCMTiming& timing);
|
||||
virtual ~VCMDecodedFrameCallback();
|
||||
void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
|
||||
|
||||
virtual WebRtc_Word32 Decoded(RawImage& decodedImage);
|
||||
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId);
|
||||
virtual WebRtc_Word32 ReceivedDecodedFrame(const WebRtc_UWord64 pictureId);
|
||||
|
||||
WebRtc_UWord64 LastReceivedPictureID() const;
|
||||
|
||||
WebRtc_Word32 Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo);
|
||||
WebRtc_Word32 Pop(WebRtc_UWord32 timestamp);
|
||||
|
||||
private:
|
||||
CriticalSectionWrapper& _critSect;
|
||||
VideoFrame _frame;
|
||||
VCMReceiveCallback* _receiveCallback;
|
||||
VCMTiming& _timing;
|
||||
VCMTimestampMap _timestampMap;
|
||||
WebRtc_UWord64 _lastReceivedPictureID;
|
||||
};
|
||||
|
||||
|
||||
class VCMGenericDecoder
|
||||
{
|
||||
friend class VCMCodecDataBase;
|
||||
public:
|
||||
VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id = 0, bool isExternal = false);
|
||||
~VCMGenericDecoder();
|
||||
|
||||
/**
|
||||
* Initialize the decoder with the information from the VideoCodec
|
||||
*/
|
||||
WebRtc_Word32 InitDecode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
bool requireKeyFrame);
|
||||
|
||||
/**
|
||||
* Decode to a raw I420 frame,
|
||||
*
|
||||
* inputVideoBuffer reference to encoded video frame
|
||||
*/
|
||||
WebRtc_Word32 Decode(const VCMEncodedFrame& inputFrame);
|
||||
|
||||
/**
|
||||
* Free the decoder memory
|
||||
*/
|
||||
WebRtc_Word32 Release();
|
||||
|
||||
/**
|
||||
* Reset the decoder state, prepare for a new call
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
|
||||
/**
|
||||
* Codec configuration data sent out-of-band, i.e. in SIP call setup
|
||||
*
|
||||
* buffer pointer to the configuration data
|
||||
* size the size of the configuration data in bytes
|
||||
*/
|
||||
WebRtc_Word32 SetCodecConfigParameters(const WebRtc_UWord8* /*buffer*/,
|
||||
WebRtc_Word32 /*size*/);
|
||||
|
||||
WebRtc_Word32 RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
|
||||
|
||||
bool External() const;
|
||||
|
||||
protected:
|
||||
|
||||
WebRtc_Word32 _id;
|
||||
VCMDecodedFrameCallback* _callback;
|
||||
VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
|
||||
WebRtc_UWord32 _nextFrameInfoIdx;
|
||||
VideoDecoder& _decoder;
|
||||
VideoCodecType _codecType;
|
||||
bool _isExternal;
|
||||
bool _requireKeyFrame;
|
||||
bool _keyFrameDecoded;
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
|
||||
229
modules/video_coding/main/source/generic_encoder.cc
Normal file
229
modules/video_coding/main/source/generic_encoder.cc
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "encoded_frame.h"
|
||||
#include "generic_encoder.h"
|
||||
#include "media_optimization.h"
|
||||
#include "../../../../engine_configurations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
//#define DEBUG_ENCODER_BIT_STREAM
|
||||
|
||||
VCMGenericEncoder::VCMGenericEncoder(VideoEncoder& encoder, bool internalSource /*= false*/)
|
||||
:
|
||||
_encoder(encoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_VCMencodedFrameCallback(NULL),
|
||||
_bitRate(0),
|
||||
_frameRate(0),
|
||||
_internalSource(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
VCMGenericEncoder::~VCMGenericEncoder()
|
||||
{
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::Reset()
|
||||
{
|
||||
_bitRate = 0;
|
||||
_frameRate = 0;
|
||||
_VCMencodedFrameCallback = NULL;
|
||||
return _encoder.Reset();
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMGenericEncoder::Release()
|
||||
{
|
||||
_bitRate = 0;
|
||||
_frameRate = 0;
|
||||
_VCMencodedFrameCallback = NULL;
|
||||
return _encoder.Release();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::InitEncode(const VideoCodec* settings, WebRtc_Word32 numberOfCores, WebRtc_UWord32 maxPayloadSize)
|
||||
{
|
||||
_bitRate = settings->startBitrate;
|
||||
_frameRate = settings->maxFramerate;
|
||||
_codecType = settings->codecType;
|
||||
if (_VCMencodedFrameCallback != NULL)
|
||||
{
|
||||
_VCMencodedFrameCallback->SetCodecType(_codecType);
|
||||
}
|
||||
return _encoder.InitEncode(settings, numberOfCores, maxPayloadSize);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::Encode(const VideoFrame& inputFrame, const void* codecSpecificInfo, FrameType frameType)
|
||||
{
|
||||
RawImage rawImage(inputFrame.Buffer(), inputFrame.Length(), inputFrame.Size());
|
||||
rawImage._width = inputFrame.Width();
|
||||
rawImage._height = inputFrame.Height();
|
||||
rawImage._timeStamp = inputFrame.TimeStamp();
|
||||
|
||||
WebRtc_Word32 ret = _encoder.Encode(rawImage, codecSpecificInfo, VCMEncodedFrame::ConvertFrameType(frameType));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetPacketLoss(WebRtc_Word32 packetLoss)
|
||||
{
|
||||
return _encoder.SetPacketLoss(packetLoss);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate)
|
||||
{
|
||||
WebRtc_Word32 ret = _encoder.SetRates(newBitRate, frameRate);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
_bitRate = newBitRate;
|
||||
_frameRate = frameRate;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size)
|
||||
{
|
||||
WebRtc_Word32 ret = _encoder.CodecConfigParameters(buffer, size);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMGenericEncoder::BitRate() const
|
||||
{
|
||||
return _bitRate;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMGenericEncoder::FrameRate() const
|
||||
{
|
||||
return _frameRate;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
|
||||
{
|
||||
return _encoder.SetPeriodicKeyFrames(enable);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::RequestFrame(FrameType frameType)
|
||||
{
|
||||
RawImage image;
|
||||
return _encoder.Encode(image, NULL, VCMEncodedFrame::ConvertFrameType(frameType));
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMGenericEncoder::RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback)
|
||||
{
|
||||
_VCMencodedFrameCallback = VCMencodedFrameCallback;
|
||||
|
||||
_VCMencodedFrameCallback->SetCodecType(_codecType);
|
||||
_VCMencodedFrameCallback->SetInternalSource(_internalSource);
|
||||
return _encoder.RegisterEncodeCompleteCallback(_VCMencodedFrameCallback);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMGenericEncoder::InternalSource() const
|
||||
{
|
||||
return _internalSource;
|
||||
}
|
||||
|
||||
/***************************
|
||||
* Callback Implementation
|
||||
***************************/
|
||||
VCMEncodedFrameCallback::VCMEncodedFrameCallback():
|
||||
_sendCallback(),
|
||||
_encodedBytes(0),
|
||||
_payloadType(0),
|
||||
_bitStreamAfterEncoder(NULL)
|
||||
{
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
_bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
|
||||
#endif
|
||||
}
|
||||
|
||||
VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
|
||||
{
|
||||
#ifdef DEBUG_ENCODER_BIT_STREAM
|
||||
fclose(_bitStreamAfterEncoder);
|
||||
#endif
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
|
||||
{
|
||||
_sendCallback = transport;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMEncodedFrameCallback::Encoded(EncodedImage &encodedImage, const void* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentationHeader)
|
||||
{
|
||||
FrameType frameType = VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
|
||||
|
||||
WebRtc_UWord32 encodedBytes = 0;
|
||||
if (_sendCallback != NULL)
|
||||
{
|
||||
encodedBytes = encodedImage._length;
|
||||
|
||||
if (_bitStreamAfterEncoder != NULL)
|
||||
{
|
||||
fwrite(encodedImage._buffer, 1, encodedImage._length, _bitStreamAfterEncoder);
|
||||
}
|
||||
|
||||
WebRtc_Word32 callbackReturn = _sendCallback->SendData(frameType,
|
||||
_payloadType,
|
||||
encodedImage._timeStamp,
|
||||
encodedImage._buffer,
|
||||
encodedBytes,
|
||||
*fragmentationHeader);
|
||||
if (callbackReturn < 0)
|
||||
{
|
||||
return callbackReturn;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
_encodedBytes = encodedBytes;
|
||||
_mediaOpt->UpdateWithEncodedData(_encodedBytes, frameType);
|
||||
if (_internalSource)
|
||||
{
|
||||
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMEncodedFrameCallback::EncodedBytes()
|
||||
{
|
||||
return _encodedBytes;
|
||||
}
|
||||
|
||||
void
|
||||
VCMEncodedFrameCallback::SetMediaOpt(VCMMediaOptimization *mediaOpt)
|
||||
{
|
||||
_mediaOpt = mediaOpt;
|
||||
}
|
||||
|
||||
}
|
||||
139
modules/video_coding/main/source/generic_encoder.h
Normal file
139
modules/video_coding/main/source/generic_encoder.h
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
|
||||
#include "video_codec_interface.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMMediaOptimization;
|
||||
|
||||
/*************************************/
|
||||
/* VCMEncodeFrameCallback class */
|
||||
/***********************************/
|
||||
class VCMEncodedFrameCallback : public EncodedImageCallback
|
||||
{
|
||||
public:
|
||||
VCMEncodedFrameCallback();
|
||||
virtual ~VCMEncodedFrameCallback();
|
||||
|
||||
/*
|
||||
* Callback implementation - codec encode complete
|
||||
*/
|
||||
WebRtc_Word32 Encoded(EncodedImage& encodedImage, const void* codecSpecificInfo = NULL,
|
||||
const RTPFragmentationHeader* fragmentationHeader = NULL);
|
||||
/*
|
||||
* Get number of encoded bytes
|
||||
*/
|
||||
WebRtc_UWord32 EncodedBytes();
|
||||
/*
|
||||
* Callback implementation - generic encoder encode complete
|
||||
*/
|
||||
WebRtc_Word32 SetTransportCallback(VCMPacketizationCallback* transport);
|
||||
/**
|
||||
* Set media Optimization
|
||||
*/
|
||||
void SetMediaOpt (VCMMediaOptimization* mediaOpt);
|
||||
|
||||
void SetPayloadType(WebRtc_UWord8 payloadType) { _payloadType = payloadType; };
|
||||
void SetCodecType(VideoCodecType codecType) {_codecType = codecType;};
|
||||
void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
|
||||
|
||||
private:
|
||||
VCMPacketizationCallback* _sendCallback;
|
||||
VCMMediaOptimization* _mediaOpt;
|
||||
WebRtc_UWord32 _encodedBytes;
|
||||
WebRtc_UWord8 _payloadType;
|
||||
VideoCodecType _codecType;
|
||||
bool _internalSource;
|
||||
FILE* _bitStreamAfterEncoder;
|
||||
|
||||
};// end of VCMEncodeFrameCallback class
|
||||
|
||||
|
||||
/******************************/
|
||||
/* VCMGenericEncoder class */
|
||||
/******************************/
|
||||
class VCMGenericEncoder
|
||||
{
|
||||
friend class VCMCodecDataBase;
|
||||
public:
|
||||
VCMGenericEncoder(VideoEncoder& encoder, bool internalSource = false);
|
||||
~VCMGenericEncoder();
|
||||
/**
|
||||
* Reset the encoder state, prepare for a new call
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
/**
|
||||
* Free encoder memory
|
||||
*/
|
||||
WebRtc_Word32 Release();
|
||||
/**
|
||||
* Initialize the encoder with the information from the VideoCodec
|
||||
*/
|
||||
WebRtc_Word32 InitEncode(const VideoCodec* settings,
|
||||
WebRtc_Word32 numberOfCores,
|
||||
WebRtc_UWord32 maxPayloadSize);
|
||||
/**
|
||||
* Encode raw image
|
||||
* inputFrame : Frame containing raw image
|
||||
* codecSpecificInfo : Specific codec data
|
||||
* cameraFrameRate : request or information from the remote side
|
||||
* frameType : The requested frame type to encode
|
||||
*/
|
||||
WebRtc_Word32 Encode(const VideoFrame& inputFrame,
|
||||
const void* codecSpecificInfo,
|
||||
FrameType frameType);
|
||||
/**
|
||||
* Set new target bit rate and frame rate
|
||||
* Return Value: new bit rate if OK, otherwise <0s
|
||||
*/
|
||||
WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate);
|
||||
/**
|
||||
* Set a new packet loss rate
|
||||
*/
|
||||
WebRtc_Word32 SetPacketLoss(WebRtc_Word32 packetLoss);
|
||||
WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size);
|
||||
/**
|
||||
* Register a transport callback which will be called to deliver the encoded buffers
|
||||
*/
|
||||
WebRtc_Word32 RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback);
|
||||
/**
|
||||
* Get encoder bit rate
|
||||
*/
|
||||
WebRtc_UWord32 BitRate() const;
|
||||
/**
|
||||
* Get encoder frame rate
|
||||
*/
|
||||
WebRtc_UWord32 FrameRate() const;
|
||||
|
||||
WebRtc_Word32 SetPeriodicKeyFrames(bool enable);
|
||||
|
||||
WebRtc_Word32 RequestFrame(FrameType frameType);
|
||||
|
||||
bool InternalSource() const;
|
||||
|
||||
private:
|
||||
VideoEncoder& _encoder;
|
||||
VideoCodecType _codecType;
|
||||
VCMEncodedFrameCallback* _VCMencodedFrameCallback;
|
||||
WebRtc_UWord32 _bitRate;
|
||||
WebRtc_UWord32 _frameRate;
|
||||
bool _internalSource;
|
||||
}; // end of VCMGenericEncoder class
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
120
modules/video_coding/main/source/inter_frame_delay.cc
Normal file
120
modules/video_coding/main/source/inter_frame_delay.cc
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "inter_frame_delay.h"
|
||||
#include "tick_time.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMInterFrameDelay::VCMInterFrameDelay()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
// Resets the delay estimate
|
||||
void
|
||||
VCMInterFrameDelay::Reset()
|
||||
{
|
||||
_zeroWallClock = VCMTickTime::MillisecondTimestamp();
|
||||
_wrapArounds = 0;
|
||||
_prevWallClock = 0;
|
||||
_prevTimestamp = 0;
|
||||
_dTS = 0;
|
||||
}
|
||||
|
||||
// Calculates the delay of a frame with the given timestamp.
|
||||
// This method is called when the frame is complete.
|
||||
bool
|
||||
VCMInterFrameDelay::CalculateDelay(WebRtc_UWord32 timestamp,
|
||||
WebRtc_Word64 *delay,
|
||||
WebRtc_Word64 currentWallClock /* = -1 */)
|
||||
{
|
||||
if (currentWallClock <= -1)
|
||||
{
|
||||
currentWallClock = VCMTickTime::MillisecondTimestamp();
|
||||
}
|
||||
|
||||
if (_prevWallClock == 0)
|
||||
{
|
||||
// First set of data, initialization, wait for next frame
|
||||
_prevWallClock = currentWallClock;
|
||||
_prevTimestamp = timestamp;
|
||||
*delay = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtc_Word32 prevWrapArounds = _wrapArounds;
|
||||
CheckForWrapArounds(timestamp);
|
||||
|
||||
// This will be -1 for backward wrap arounds and +1 for forward wrap arounds
|
||||
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
|
||||
|
||||
// Account for reordering in jitter variance estimate in the future?
|
||||
// Note that this also captures incomplete frames which are grabbed
|
||||
// for decoding after a later frame has been complete, i.e. real
|
||||
// packet losses.
|
||||
if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
|
||||
{
|
||||
*delay = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute the compensated timestamp difference and convert it to ms and
|
||||
// round it to closest integer.
|
||||
_dTS = static_cast<WebRtc_Word64>((timestamp + wrapAroundsSincePrev *
|
||||
(static_cast<WebRtc_Word64>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
|
||||
|
||||
// frameDelay is the difference of dT and dTS -- i.e. the difference of
|
||||
// the wall clock time difference and the timestamp difference between
|
||||
// two following frames.
|
||||
*delay = static_cast<WebRtc_Word64>(currentWallClock - _prevWallClock - _dTS);
|
||||
|
||||
_prevTimestamp = timestamp;
|
||||
_prevWallClock = currentWallClock;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns the current difference between incoming timestamps
|
||||
WebRtc_UWord32 VCMInterFrameDelay::CurrentTimeStampDiffMs() const
|
||||
{
|
||||
if (_dTS < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return static_cast<WebRtc_UWord32>(_dTS);
|
||||
}
|
||||
|
||||
// Investigates if the timestamp clock has overflowed since the last timestamp and
|
||||
// keeps track of the number of wrap arounds since reset.
|
||||
void
|
||||
VCMInterFrameDelay::CheckForWrapArounds(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
if (timestamp < _prevTimestamp)
|
||||
{
|
||||
// This difference will probably be less than -2^31 if we have had a wrap around
|
||||
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
|
||||
// it should be positive.
|
||||
if (static_cast<WebRtc_Word32>(timestamp - _prevTimestamp) > 0)
|
||||
{
|
||||
// Forward wrap around
|
||||
_wrapArounds++;
|
||||
}
|
||||
}
|
||||
// This difference will probably be less than -2^31 if we have had a backward wrap around.
|
||||
// Since it is cast to a Word32, it should be positive.
|
||||
else if (static_cast<WebRtc_Word32>(_prevTimestamp - timestamp) > 0)
|
||||
{
|
||||
// Backward wrap around
|
||||
_wrapArounds--;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
66
modules/video_coding/main/source/inter_frame_delay.h
Normal file
66
modules/video_coding/main/source/inter_frame_delay.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMInterFrameDelay
|
||||
{
|
||||
public:
|
||||
VCMInterFrameDelay();
|
||||
|
||||
// Resets the estimate. Zeros are given as parameters.
|
||||
void Reset();
|
||||
|
||||
// Calculates the delay of a frame with the given timestamp.
|
||||
// This method is called when the frame is complete.
|
||||
//
|
||||
// Input:
|
||||
// - timestamp : RTP timestamp of a received frame
|
||||
// - *delay : Pointer to memory where the result should be stored
|
||||
// - currentWallClock : The current time in milliseconds.
|
||||
// Should be -1 for normal operation, only used for testing.
|
||||
// Return value : true if OK, false when reordered timestamps
|
||||
bool CalculateDelay(WebRtc_UWord32 timestamp,
|
||||
WebRtc_Word64 *delay,
|
||||
WebRtc_Word64 currentWallClock = -1);
|
||||
|
||||
// Returns the current difference between incoming timestamps
|
||||
//
|
||||
// Return value : Wrap-around compensated difference between incoming
|
||||
// timestamps.
|
||||
WebRtc_UWord32 CurrentTimeStampDiffMs() const;
|
||||
|
||||
private:
|
||||
// Controls if the RTP timestamp counter has had a wrap around
|
||||
// between the current and the previously received frame.
|
||||
//
|
||||
// Input:
|
||||
// - timestmap : RTP timestamp of the current frame.
|
||||
void CheckForWrapArounds(WebRtc_UWord32 timestamp);
|
||||
|
||||
WebRtc_Word64 _zeroWallClock; // Local timestamp of the first video packet received
|
||||
WebRtc_Word32 _wrapArounds; // Number of wrapArounds detected
|
||||
// The previous timestamp passed to the delay estimate
|
||||
WebRtc_UWord32 _prevTimestamp;
|
||||
// The previous wall clock timestamp used by the delay estimate
|
||||
WebRtc_Word64 _prevWallClock;
|
||||
// Wrap-around compensated difference between incoming timestamps
|
||||
WebRtc_Word64 _dTS;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
|
||||
57
modules/video_coding/main/source/internal_defines.h
Normal file
57
modules/video_coding/main/source/internal_defines.h
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
|
||||
|
||||
inline WebRtc_UWord32 MaskWord64ToUWord32(WebRtc_Word64 w64)
|
||||
{
|
||||
return static_cast<WebRtc_UWord32>(MASK_32_BITS(w64));
|
||||
}
|
||||
|
||||
#define VCM_MAX(a, b) ((a) > (b)) ? (a) : (b)
|
||||
#define VCM_MIN(a, b) ((a) < (b)) ? (a) : (b)
|
||||
|
||||
#define VCM_DEFAULT_CODEC_WIDTH 352
|
||||
#define VCM_DEFAULT_CODEC_HEIGHT 288
|
||||
#define VCM_DEFAULT_FRAME_RATE 30
|
||||
#define VCM_MIN_BITRATE 30
|
||||
|
||||
// Helper macros for creating the static codec list
|
||||
#define VCM_NO_CODEC_IDX -1
|
||||
#ifdef VIDEOCODEC_VP8
|
||||
#define VCM_VP8_IDX VCM_NO_CODEC_IDX + 1
|
||||
#else
|
||||
#define VCM_VP8_IDX VCM_NO_CODEC_IDX
|
||||
#endif
|
||||
#ifdef VIDEOCODEC_I420
|
||||
#define VCM_I420_IDX VCM_VP8_IDX + 1
|
||||
#else
|
||||
#define VCM_I420_IDX VCM_VP8_IDX
|
||||
#endif
|
||||
#define VCM_NUM_VIDEO_CODECS_AVAILABLE VCM_I420_IDX + 1
|
||||
|
||||
#define VCM_NO_RECEIVER_ID 0
|
||||
|
||||
inline WebRtc_Word32 VCMId(const WebRtc_Word32 vcmId, const WebRtc_Word32 receiverId = 0)
|
||||
{
|
||||
return static_cast<WebRtc_Word32>((vcmId << 16) + receiverId);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
|
||||
1881
modules/video_coding/main/source/jitter_buffer.cc
Normal file
1881
modules/video_coding/main/source/jitter_buffer.cc
Normal file
File diff suppressed because it is too large
Load Diff
221
modules/video_coding/main/source/jitter_buffer.h
Normal file
221
modules/video_coding/main/source/jitter_buffer.h
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding_defines.h"
|
||||
#include "inter_frame_delay.h"
|
||||
#include "event.h"
|
||||
#include "frame_list.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "jitter_estimator.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// forward declarations
|
||||
class VCMFrameBuffer;
|
||||
class VCMPacket;
|
||||
class VCMEncodedFrame;
|
||||
|
||||
class VCMJitterSample
|
||||
{
|
||||
public:
|
||||
VCMJitterSample() : timestamp(0), frameSize(0), latestPacketTime(-1) {}
|
||||
WebRtc_UWord32 timestamp;
|
||||
WebRtc_UWord32 frameSize;
|
||||
WebRtc_Word64 latestPacketTime;
|
||||
};
|
||||
|
||||
class VCMJitterBuffer
|
||||
{
|
||||
public:
|
||||
VCMJitterBuffer(WebRtc_Word32 vcmId = -1,
|
||||
WebRtc_Word32 receiverId = -1,
|
||||
bool master = true);
|
||||
virtual ~VCMJitterBuffer();
|
||||
|
||||
VCMJitterBuffer& operator=(const VCMJitterBuffer& rhs);
|
||||
|
||||
// We need a start and stop to break out of the wait event
|
||||
// used in GetCompleteFrameForDecoding
|
||||
void Start();
|
||||
void Stop();
|
||||
bool Running() const;
|
||||
|
||||
// Empty the Jitter buffer of all its data
|
||||
void Flush();
|
||||
|
||||
// Statistics, Get received key and delta frames
|
||||
WebRtc_Word32 GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames,
|
||||
WebRtc_UWord32& receivedKeyFrames) const;
|
||||
|
||||
// Statistics, Calculate frame and bit rates
|
||||
WebRtc_Word32 GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate);
|
||||
|
||||
// Wait for the first packet in the next frame to arrive, blocks for <= maxWaitTimeMS ms
|
||||
WebRtc_Word64 GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS,
|
||||
FrameType& incomingFrameType,
|
||||
WebRtc_Word64& renderTimeMs);
|
||||
|
||||
// Will the packet sequence be complete if the next frame is grabbed
|
||||
// for decoding right now? That is, have we lost a frame between the
|
||||
// last decoded frame and the next, or is the next frame missing one
|
||||
// or more packets?
|
||||
bool CompleteSequenceWithNextFrame();
|
||||
|
||||
// Wait maxWaitTimeMS for a complete frame to arrive. After timeout NULL is returned.
|
||||
VCMEncodedFrame* GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS);
|
||||
|
||||
// Get a frame for decoding (even an incomplete) without delay.
|
||||
VCMEncodedFrame* GetFrameForDecoding();
|
||||
|
||||
VCMEncodedFrame* GetFrameForDecodingNACK();
|
||||
|
||||
// Release frame (when done with decoding)
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
|
||||
// Get frame to use for this timestamp
|
||||
WebRtc_Word32 GetFrame(const VCMPacket& packet, VCMEncodedFrame*&);
|
||||
VCMEncodedFrame* GetFrame(const VCMPacket& packet); // deprecated
|
||||
|
||||
// Returns the time in ms when the latest packet was insterted into the frame.
|
||||
// Retransmitted is set to true if any of the packets belonging to the frame
|
||||
// has been retransmitted.
|
||||
WebRtc_Word64 LastPacketTime(VCMEncodedFrame* frame, bool& retransmitted) const;
|
||||
|
||||
// Insert a packet into a frame
|
||||
VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame, const VCMPacket& packet);
|
||||
|
||||
// Sync
|
||||
WebRtc_UWord32 GetEstimatedJitterMS();
|
||||
void UpdateRtt(WebRtc_UWord32 rttMs);
|
||||
|
||||
// NACK
|
||||
void SetNackStatus(bool enable); // Enable/disable nack
|
||||
bool GetNackStatus(); // Get nack status (enabled/disabled)
|
||||
// Get list of missing sequence numbers (size in number of elements)
|
||||
WebRtc_UWord16* GetNackList(WebRtc_UWord16& nackSize, bool& listExtended);
|
||||
|
||||
WebRtc_Word64 LastDecodedTimestamp() const;
|
||||
static WebRtc_UWord32 LatestTimestamp(const WebRtc_UWord32 existingTimestamp,
|
||||
const WebRtc_UWord32 newTimestamp);
|
||||
|
||||
protected:
|
||||
|
||||
// Misc help functions
|
||||
// Recycle (release) frame, used if we didn't receive whole frame
|
||||
void RecycleFrame(VCMFrameBuffer* frame);
|
||||
void ReleaseFrameInternal(VCMFrameBuffer* frame);
|
||||
// Flush and reset the jitter buffer. Call under critical section.
|
||||
void FlushInternal();
|
||||
VCMFrameListItem* FindOldestSequenceNum() const;
|
||||
|
||||
// Help functions for insert packet
|
||||
// Get empty frame, creates new (i.e. increases JB size) if necessary
|
||||
VCMFrameBuffer* GetEmptyFrame();
|
||||
// Recycle oldest frames up to a key frame, used if JB is completely full
|
||||
bool RecycleFramesUntilKeyFrame();
|
||||
// Update frame state (set as complete or reconstructable if conditions are met)
|
||||
void UpdateFrameState(VCMFrameBuffer* frameListItem);
|
||||
|
||||
// Help functions for getting a frame
|
||||
// Find oldest complete frame, used for getting next frame to decode
|
||||
VCMFrameListItem* FindOldestCompleteContinuousFrame();
|
||||
|
||||
// Check if a frame is missing the markerbit but is complete
|
||||
bool CheckForCompleteFrame(VCMFrameListItem* oldestFrameItem);
|
||||
|
||||
|
||||
void CleanUpOldFrames();
|
||||
void CleanUpSizeZeroFrames();
|
||||
|
||||
void VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame);
|
||||
bool IsPacketRetransmitted(const VCMPacket& packet) const;
|
||||
void UpdateJitterAndDelayEstimates(VCMJitterSample& sample, bool incompleteFrame);
|
||||
void UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame, bool incompleteFrame);
|
||||
void UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs,
|
||||
WebRtc_UWord32 timestamp,
|
||||
WebRtc_UWord32 frameSize,
|
||||
bool incompleteFrame);
|
||||
void UpdateOldJitterSample(const VCMPacket& packet);
|
||||
WebRtc_UWord32 GetEstimatedJitterMsInternal();
|
||||
|
||||
// NACK help
|
||||
WebRtc_UWord16* CreateNackList(WebRtc_UWord16& nackSize, bool& listExtended);
|
||||
WebRtc_Word32 GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum,
|
||||
WebRtc_Word32& highSeqNum) const;
|
||||
|
||||
void UpdateLastDecodedWithFiller(const VCMPacket& packet);
|
||||
|
||||
private:
|
||||
|
||||
static bool FrameEqualTimestamp(VCMFrameBuffer* frame, const void* timestamp);
|
||||
static bool CompleteKeyFrameCriteria(VCMFrameBuffer* frame, const void* notUsed);
|
||||
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _running; // If we are running (have started) or not
|
||||
CriticalSectionWrapper& _critSect;
|
||||
bool _master;
|
||||
// Event to signal when we have a frame ready for decoder
|
||||
VCMEvent _frameEvent;
|
||||
// Event to signal when we have received a packet
|
||||
VCMEvent _packetEvent;
|
||||
WebRtc_Word32 _maxNumberOfFrames; // Number of allocated frames
|
||||
// Array of pointers to the frames in JB
|
||||
VCMFrameBuffer* _frameBuffers[kMaxNumberOfFrames];
|
||||
VCMFrameListTimestampOrderAsc _frameBuffersTSOrder;
|
||||
|
||||
// timing
|
||||
// Sequence number of last frame that was given to decoder
|
||||
WebRtc_Word32 _lastDecodedSeqNum;
|
||||
// Timestamp of last frame that was given to decoder
|
||||
WebRtc_Word64 _lastDecodedTimeStamp;
|
||||
|
||||
// Statistics
|
||||
// Frame counter for each type (key, delta, golden, key-delta)
|
||||
WebRtc_UWord8 _receiveStatistics[4];
|
||||
// Latest calculated frame rates of incoming stream
|
||||
WebRtc_UWord8 _incomingFrameRate;
|
||||
WebRtc_UWord32 _incomingFrameCount; // Frame counter, reset in GetUpdate
|
||||
// Real time for last _frameCount reset
|
||||
WebRtc_Word64 _timeLastIncomingFrameCount;
|
||||
WebRtc_UWord32 _incomingBitCount; // Received bits counter, reset in GetUpdate
|
||||
WebRtc_UWord32 _incomingBitRate;
|
||||
WebRtc_UWord32 _dropCount; // Frame drop counter
|
||||
// Number of frames in a row that have been too old
|
||||
WebRtc_UWord32 _numConsecutiveOldFrames;
|
||||
// Number of packets in a row that have been too old
|
||||
WebRtc_UWord32 _numConsecutiveOldPackets;
|
||||
// Filters for estimating jitter
|
||||
VCMJitterEstimator _jitterEstimate;
|
||||
// Calculates network delays used for jitter calculations
|
||||
VCMInterFrameDelay _delayEstimate;
|
||||
VCMJitterSample _waitingForCompletion;
|
||||
|
||||
// NACK
|
||||
bool _usingNACK; // If we are using nack
|
||||
// Holds the internal nack list (the missing seqence numbers)
|
||||
WebRtc_Word32 _NACKSeqNumInternal[kNackHistoryLength];
|
||||
WebRtc_UWord16 _NACKSeqNum[kNackHistoryLength];
|
||||
WebRtc_UWord32 _NACKSeqNumLength;
|
||||
|
||||
bool _missingMarkerBits;
|
||||
bool _firstPacket;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
|
||||
66
modules/video_coding/main/source/jitter_buffer_common.h
Normal file
66
modules/video_coding/main/source/jitter_buffer_common.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum { kMaxNumberOfFrames = 100 };
|
||||
enum { kStartNumberOfFrames = 6 }; // in packets, 6 packets are approximately 198 ms,
|
||||
// we need at least one more for process
|
||||
enum { kMaxVideoDelayMs = 2000 }; // in ms
|
||||
|
||||
enum VCMJitterBufferEnum
|
||||
{
|
||||
kMaxConsecutiveOldFrames = 60,
|
||||
kMaxConsecutiveOldPackets = 300,
|
||||
kMaxPacketsInJitterBuffer = 800,
|
||||
kBufferIncStepSizeBytes = 30000, // >20 packets
|
||||
kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte
|
||||
};
|
||||
|
||||
enum VCMFrameBufferEnum
|
||||
{
|
||||
kStateError = -4,
|
||||
kTimeStampError = -2,
|
||||
kSizeError = -1,
|
||||
kNoError = 0,
|
||||
kIncomplete = 1, // Frame incomplete
|
||||
kFirstPacket = 2,
|
||||
kCompleteSession = 3, // at least one layer in the frame complete
|
||||
kDuplicatePacket = 5 // We're receiving a duplicate packet.
|
||||
};
|
||||
|
||||
enum VCMFrameBufferStateEnum
|
||||
{
|
||||
kStateFree, // Unused frame in the JB
|
||||
kStateEmpty, // frame popped by the RTP receiver
|
||||
kStateIncomplete, // frame that have one or more packet(s) stored
|
||||
kStateComplete, // frame that have all packets
|
||||
kStateDecoding // frame popped by the decoding thread
|
||||
};
|
||||
|
||||
enum { kH264StartCodeLengthBytes = 4};
|
||||
|
||||
// Used to indicate if a received packet contain a complete NALU (or equivalent)
|
||||
enum VCMNaluCompleteness
|
||||
{
|
||||
kNaluUnset=0, //Packet has not been filled.
|
||||
kNaluComplete=1, //Packet can be decoded as is.
|
||||
kNaluStart, // Packet contain beginning of NALU
|
||||
kNaluIncomplete, //Packet is not beginning or end of NALU
|
||||
kNaluEnd // Packet is the end of a NALU
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
|
||||
439
modules/video_coding/main/source/jitter_estimator.cc
Normal file
439
modules/video_coding/main/source/jitter_estimator.cc
Normal file
@ -0,0 +1,439 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "jitter_estimator.h"
|
||||
#include "rtt_filter.h"
|
||||
#include "tick_time.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMJitterEstimator::VCMJitterEstimator(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId) :
|
||||
_vcmId(vcmId),
|
||||
_receiverId(receiverId),
|
||||
_phi(0.97),
|
||||
_psi(0.9999),
|
||||
_alphaCountMax(400),
|
||||
_beta(0.9994),
|
||||
_thetaLow(0.000001),
|
||||
_nackLimit(3), // This should be 1 if the old
|
||||
// retransmition estimate is used.
|
||||
_nackWindowMS(15000),
|
||||
_numStdDevDelayOutlier(15),
|
||||
_numStdDevFrameSizeOutlier(3),
|
||||
_noiseStdDevs(2.33), // ~Less than 1% chance
|
||||
// (look up in normal distribution table)...
|
||||
_noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
|
||||
_rttFilter(vcmId, receiverId)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMJitterEstimator&
|
||||
VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
|
||||
{
|
||||
if (this != &rhs)
|
||||
{
|
||||
memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
|
||||
memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
|
||||
|
||||
_vcmId = rhs._vcmId;
|
||||
_receiverId = rhs._receiverId;
|
||||
_avgFrameSize = rhs._avgFrameSize;
|
||||
_varFrameSize = rhs._varFrameSize;
|
||||
_maxFrameSize = rhs._maxFrameSize;
|
||||
_fsSum = rhs._fsSum;
|
||||
_fsCount = rhs._fsCount;
|
||||
_lastUpdateT = rhs._lastUpdateT;
|
||||
_prevEstimate = rhs._prevEstimate;
|
||||
_prevFrameSize = rhs._prevFrameSize;
|
||||
_avgNoise = rhs._avgNoise;
|
||||
_alphaCount = rhs._alphaCount;
|
||||
_filterJitterEstimate = rhs._filterJitterEstimate;
|
||||
_startupCount = rhs._startupCount;
|
||||
_latestNackTimestamp = rhs._latestNackTimestamp;
|
||||
_nackCount = rhs._nackCount;
|
||||
_rttFilter = rhs._rttFilter;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Resets the JitterEstimate
|
||||
void
|
||||
VCMJitterEstimator::Reset()
|
||||
{
|
||||
_theta[0] = 1/(512e3/8);
|
||||
_theta[1] = 0;
|
||||
_varNoise = 4.0;
|
||||
|
||||
_thetaCov[0][0] = 1e-4;
|
||||
_thetaCov[1][1] = 1e2;
|
||||
_thetaCov[0][1] = _thetaCov[1][0] = 0;
|
||||
_Qcov[0][0] = 2.5e-10;
|
||||
_Qcov[1][1] = 1e-10;
|
||||
_Qcov[0][1] = _Qcov[1][0] = 0;
|
||||
_avgFrameSize = 500;
|
||||
_maxFrameSize = 500;
|
||||
_varFrameSize = 100;
|
||||
_lastUpdateT = -1;
|
||||
_prevEstimate = -1.0;
|
||||
_prevFrameSize = 0;
|
||||
_avgNoise = 0.0;
|
||||
_alphaCount = 1;
|
||||
_filterJitterEstimate = 0.0;
|
||||
_latestNackTimestamp = 0;
|
||||
_nackCount = 0;
|
||||
_fsSum = 0;
|
||||
_fsCount = 0;
|
||||
_startupCount = 0;
|
||||
_rttFilter.Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::ResetNackCount()
|
||||
{
|
||||
_nackCount = 0;
|
||||
}
|
||||
|
||||
// Updates the estimates with the new measurements
|
||||
void
|
||||
VCMJitterEstimator::UpdateEstimate(WebRtc_Word64 frameDelayMS, WebRtc_UWord32 frameSizeBytes,
|
||||
bool incompleteFrame /* = false */)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Jitter estimate updated with: frameSize=%d frameDelayMS=%d",
|
||||
frameSizeBytes, frameDelayMS);
|
||||
if (frameSizeBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
int deltaFS = frameSizeBytes - _prevFrameSize;
|
||||
if (_fsCount < kFsAccuStartupSamples)
|
||||
{
|
||||
_fsSum += frameSizeBytes;
|
||||
_fsCount++;
|
||||
}
|
||||
else if (_fsCount == kFsAccuStartupSamples)
|
||||
{
|
||||
// Give the frame size filter
|
||||
_avgFrameSize = static_cast<double>(_fsSum) /
|
||||
static_cast<double>(_fsCount);
|
||||
_fsCount++;
|
||||
}
|
||||
if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
|
||||
{
|
||||
double avgFrameSize = _phi * _avgFrameSize +
|
||||
(1 - _phi) * frameSizeBytes;
|
||||
if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
|
||||
{
|
||||
// Only update the average frame size if this sample wasn't a
|
||||
// key frame
|
||||
_avgFrameSize = avgFrameSize;
|
||||
}
|
||||
// Update the variance anyway since we want to capture cases where we only get
|
||||
// key frames.
|
||||
_varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
|
||||
(frameSizeBytes - avgFrameSize) *
|
||||
(frameSizeBytes - avgFrameSize), 1.0);
|
||||
}
|
||||
|
||||
// Update max frameSize estimate
|
||||
_maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
|
||||
|
||||
if (_prevFrameSize == 0)
|
||||
{
|
||||
_prevFrameSize = frameSizeBytes;
|
||||
return;
|
||||
}
|
||||
_prevFrameSize = frameSizeBytes;
|
||||
|
||||
// Only update the Kalman filter if the sample is not considered
|
||||
// an extreme outlier. Even if it is an extreme outlier from a
|
||||
// delay point of view, if the frame size also is large the
|
||||
// deviation is probably due to an incorrect line slope.
|
||||
double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
|
||||
|
||||
if (abs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
|
||||
frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
|
||||
{
|
||||
// Update the variance of the deviation from the
|
||||
// line given by the Kalman filter
|
||||
EstimateRandomJitter(deviation, incompleteFrame);
|
||||
// Prevent updating with frames which have been congested by a large
|
||||
// frame, and therefore arrives almost at the same time as that frame.
|
||||
// This can occur when we receive a large frame (key frame) which
|
||||
// has been delayed. The next frame is of normal size (delta frame),
|
||||
// and thus deltaFS will be << 0. This removes all frame samples
|
||||
// which arrives after a key frame.
|
||||
if ((!incompleteFrame || deviation >= 0.0) &&
|
||||
static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
|
||||
{
|
||||
// Update the Kalman filter with the new data
|
||||
KalmanEstimateChannel(frameDelayMS, deltaFS);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
|
||||
EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
|
||||
}
|
||||
// Post process the total estimated jitter
|
||||
if (_startupCount >= kStartupDelaySamples)
|
||||
{
|
||||
PostProcessEstimate();
|
||||
}
|
||||
else
|
||||
{
|
||||
_startupCount++;
|
||||
}
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Framesize statistics: max=%f average=%f", _maxFrameSize, _avgFrameSize);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"The estimated slope is: theta=(%f, %f)", _theta[0], _theta[1]);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Random jitter: mean=%f variance=%f", _avgNoise, _varNoise);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Current jitter estimate: %f", _filterJitterEstimate);
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Current max RTT: %u", _rttFilter.RttMs());
|
||||
}
|
||||
|
||||
// Updates the nack/packet ratio
|
||||
void
|
||||
VCMJitterEstimator::UpdateNackEstimate(bool retransmitted, WebRtc_Word64 /*wallClockMS = -1*/)
|
||||
{
|
||||
// Simplified since it seems to be hard to be sure if a
|
||||
// packet actually has been retransmitted or not, resulting
|
||||
// in a delay which varies up and down with one RTT.
|
||||
// The solution is to wait until _nackLimit retransmitts
|
||||
// has been received, then always add an RTT to the estimate.
|
||||
if (retransmitted && _nackCount < _nackLimit)
|
||||
{
|
||||
_nackCount++;
|
||||
}
|
||||
//if (wallClockMS == -1)
|
||||
//{
|
||||
// wallClockMS = VCMTickTime::MillisecondTimestamp();
|
||||
//}
|
||||
//if (retransmitted)
|
||||
//{
|
||||
// if (_nackCount < _nackLimit)
|
||||
// {
|
||||
// _nackCount++;
|
||||
// }
|
||||
// _latestNackTimestamp = wallClockMS;
|
||||
//}
|
||||
//else if (_nackCount > 0 && wallClockMS - _latestNackTimestamp > _nackWindowMS)
|
||||
//{
|
||||
// _nackCount = 0;
|
||||
//}
|
||||
}
|
||||
|
||||
// Updates Kalman estimate of the channel
|
||||
// The caller is expected to sanity check the inputs.
|
||||
void
|
||||
VCMJitterEstimator::KalmanEstimateChannel(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes)
|
||||
{
|
||||
double Mh[2];
|
||||
double hMh_sigma;
|
||||
double kalmanGain[2];
|
||||
double measureRes;
|
||||
double t00, t01;
|
||||
|
||||
// Kalman filtering
|
||||
|
||||
// Prediction
|
||||
// M = M + Q
|
||||
_thetaCov[0][0] += _Qcov[0][0];
|
||||
_thetaCov[0][1] += _Qcov[0][1];
|
||||
_thetaCov[1][0] += _Qcov[1][0];
|
||||
_thetaCov[1][1] += _Qcov[1][1];
|
||||
|
||||
// Kalman gain
|
||||
// K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
|
||||
// h = [dFS 1]
|
||||
// Mh = M*h'
|
||||
// hMh_sigma = h*M*h' + R
|
||||
Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
|
||||
Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
|
||||
// sigma weights measurements with a small deltaFS as noisy and
|
||||
// measurements with large deltaFS as good
|
||||
if (_maxFrameSize < 1.0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
double sigma = (300.0 * exp(-abs(static_cast<double>(deltaFSBytes)) /
|
||||
(1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
|
||||
if (sigma < 1.0)
|
||||
{
|
||||
sigma = 1.0;
|
||||
}
|
||||
hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
|
||||
if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
|
||||
{
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
kalmanGain[0] = Mh[0] / hMh_sigma;
|
||||
kalmanGain[1] = Mh[1] / hMh_sigma;
|
||||
|
||||
// Correction
|
||||
// theta = theta + K*(dT - h*theta)
|
||||
measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
|
||||
_theta[0] += kalmanGain[0] * measureRes;
|
||||
_theta[1] += kalmanGain[1] * measureRes;
|
||||
|
||||
if (_theta[0] < _thetaLow)
|
||||
{
|
||||
_theta[0] = _thetaLow;
|
||||
}
|
||||
|
||||
// M = (I - K*h)*M
|
||||
t00 = _thetaCov[0][0];
|
||||
t01 = _thetaCov[0][1];
|
||||
_thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
|
||||
kalmanGain[0] * _thetaCov[1][0];
|
||||
_thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
|
||||
kalmanGain[0] * _thetaCov[1][1];
|
||||
_thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
|
||||
kalmanGain[1] * deltaFSBytes * t00;
|
||||
_thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
|
||||
kalmanGain[1] * deltaFSBytes * t01;
|
||||
|
||||
// Covariance matrix, must be positive semi-definite
|
||||
assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
|
||||
_thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
|
||||
_thetaCov[0][0] >= 0);
|
||||
}
|
||||
|
||||
// Calculate difference in delay between a sample and the
|
||||
// expected delay estimated by the Kalman filter
|
||||
double
|
||||
VCMJitterEstimator::DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes) const
|
||||
{
|
||||
return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
|
||||
}
|
||||
|
||||
// Estimates the random jitter by calculating the variance of the
|
||||
// sample distance from the line given by theta.
|
||||
void
|
||||
VCMJitterEstimator::EstimateRandomJitter(double d_dT, bool incompleteFrame)
|
||||
{
|
||||
double alpha;
|
||||
if (_alphaCount == 0)
|
||||
{
|
||||
assert(_alphaCount > 0);
|
||||
return;
|
||||
}
|
||||
alpha = static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
|
||||
_alphaCount++;
|
||||
if (_alphaCount > _alphaCountMax)
|
||||
{
|
||||
_alphaCount = _alphaCountMax;
|
||||
}
|
||||
double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
|
||||
double varNoise = alpha * _varNoise +
|
||||
(1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
|
||||
if (!incompleteFrame || varNoise > _varNoise)
|
||||
{
|
||||
_avgNoise = avgNoise;
|
||||
_varNoise = varNoise;
|
||||
}
|
||||
if (_varNoise < 1.0)
|
||||
{
|
||||
// The variance should never be zero, since we might get
|
||||
// stuck and consider all samples as outliers.
|
||||
_varNoise = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
double
|
||||
VCMJitterEstimator::NoiseThreshold() const
|
||||
{
|
||||
double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
|
||||
if (noiseThreshold < 1.0)
|
||||
{
|
||||
noiseThreshold = 1.0;
|
||||
}
|
||||
return noiseThreshold;
|
||||
}
|
||||
|
||||
// Calculates the current jitter estimate from the filtered estimates
|
||||
double
|
||||
VCMJitterEstimator::CalculateEstimate()
|
||||
{
|
||||
double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
|
||||
|
||||
// A very low estimate (or negative) is neglected
|
||||
if (ret < 1.0) {
|
||||
if (_prevEstimate <= 0.01)
|
||||
{
|
||||
ret = 1.0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = _prevEstimate;
|
||||
}
|
||||
}
|
||||
if (ret > 10000.0) // Sanity
|
||||
{
|
||||
ret = 10000.0;
|
||||
}
|
||||
_prevEstimate = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::PostProcessEstimate()
|
||||
{
|
||||
_filterJitterEstimate = CalculateEstimate();
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::UpdateRtt(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
_rttFilter.Update(rttMs);
|
||||
}
|
||||
|
||||
void
|
||||
VCMJitterEstimator::UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes)
|
||||
{
|
||||
if (_maxFrameSize < frameSizeBytes)
|
||||
{
|
||||
_maxFrameSize = frameSizeBytes;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the current filtered estimate if available,
|
||||
// otherwise tries to calculate an estimate.
|
||||
double
|
||||
VCMJitterEstimator::GetJitterEstimate()
|
||||
{
|
||||
double jitterMS = CalculateEstimate();
|
||||
if (_filterJitterEstimate > jitterMS)
|
||||
{
|
||||
jitterMS = _filterJitterEstimate;
|
||||
}
|
||||
if (_nackCount >= _nackLimit)
|
||||
{
|
||||
return jitterMS + _rttFilter.RttMs();
|
||||
}
|
||||
return jitterMS;
|
||||
}
|
||||
|
||||
}
|
||||
158
modules/video_coding/main/source/jitter_estimator.h
Normal file
158
modules/video_coding/main/source/jitter_estimator.h
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "rtt_filter.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMJitterEstimator
|
||||
{
|
||||
public:
|
||||
VCMJitterEstimator(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
|
||||
|
||||
VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
|
||||
|
||||
// Resets the estimate to the initial state
|
||||
void Reset();
|
||||
void ResetNackCount();
|
||||
|
||||
// Updates the jitter estimate with the new data.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - frameSize : Frame size of the current frame.
|
||||
// - incompleteFrame : Flags if the frame is used to update the estimate before it
|
||||
// was complete. Default is false.
|
||||
void UpdateEstimate(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_UWord32 frameSizeBytes,
|
||||
bool incompleteFrame = false);
|
||||
|
||||
// Returns the current jitter estimate in milliseconds and adds
|
||||
// also adds an RTT dependent term in cases of retransmission.
|
||||
//
|
||||
// Return value : Jitter estimate in milliseconds
|
||||
double GetJitterEstimate();
|
||||
|
||||
// Updates the nack counter/timer.
|
||||
//
|
||||
// Input:
|
||||
// - retransmitted : True for a nacked frames, false otherwise
|
||||
// - wallClockMS : Used for testing
|
||||
void UpdateNackEstimate(bool retransmitted, WebRtc_Word64 wallClockMS = -1);
|
||||
|
||||
// Updates the RTT filter.
|
||||
//
|
||||
// Input:
|
||||
// - rttMs : RTT in ms
|
||||
void UpdateRtt(WebRtc_UWord32 rttMs);
|
||||
|
||||
void UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes);
|
||||
|
||||
// A constant describing the delay from the jitter buffer
|
||||
// to the delay on the receiving side which is not accounted
|
||||
// for by the jitter buffer nor the decoding delay estimate.
|
||||
static const WebRtc_UWord32 OPERATING_SYSTEM_JITTER = 10;
|
||||
|
||||
protected:
|
||||
// These are protected for better testing possibilities
|
||||
double _theta[2]; // Estimated line parameters (slope, offset)
|
||||
double _varNoise; // Variance of the time-deviation from the line
|
||||
|
||||
private:
|
||||
// Updates the Kalman filter for the line describing
|
||||
// the frame size dependent jitter.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - deltaFSBytes : Frame size delta, i.e.
|
||||
// : frame size at time T minus frame size at time T-1
|
||||
void KalmanEstimateChannel(WebRtc_Word64 frameDelayMS, WebRtc_Word32 deltaFSBytes);
|
||||
|
||||
// Updates the random jitter estimate, i.e. the variance
|
||||
// of the time deviations from the line given by the Kalman filter.
|
||||
//
|
||||
// Input:
|
||||
// - d_dT : The deviation from the kalman estimate
|
||||
// - incompleteFrame : True if the frame used to update the estimate
|
||||
// with was incomplete
|
||||
void EstimateRandomJitter(double d_dT, bool incompleteFrame);
|
||||
|
||||
double NoiseThreshold() const;
|
||||
|
||||
// Calculates the current jitter estimate.
|
||||
//
|
||||
// Return value : The current jitter estimate in milliseconds
|
||||
double CalculateEstimate();
|
||||
|
||||
// Post process the calculated estimate
|
||||
void PostProcessEstimate();
|
||||
|
||||
// Calculates the difference in delay between a sample and the
|
||||
// expected delay estimated by the Kalman filter.
|
||||
//
|
||||
// Input:
|
||||
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
|
||||
// - deltaFS : Frame size delta, i.e. frame size at time
|
||||
// T minus frame size at time T-1
|
||||
//
|
||||
// Return value : The difference in milliseconds
|
||||
double DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
|
||||
WebRtc_Word32 deltaFSBytes) const;
|
||||
|
||||
// Constants, filter parameters
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
const double _phi;
|
||||
const double _psi;
|
||||
const WebRtc_UWord32 _alphaCountMax;
|
||||
const double _beta;
|
||||
const double _thetaLow;
|
||||
const WebRtc_UWord32 _nackLimit;
|
||||
const WebRtc_UWord32 _nackWindowMS;
|
||||
const WebRtc_Word32 _numStdDevDelayOutlier;
|
||||
const WebRtc_Word32 _numStdDevFrameSizeOutlier;
|
||||
const double _noiseStdDevs;
|
||||
const double _noiseStdDevOffset;
|
||||
|
||||
double _thetaCov[2][2]; // Estimate covariance
|
||||
double _Qcov[2][2]; // Process noise covariance
|
||||
double _avgFrameSize; // Average frame size
|
||||
double _varFrameSize; // Frame size variance
|
||||
double _maxFrameSize; // Largest frame size received (descending
|
||||
// with a factor _psi)
|
||||
WebRtc_UWord32 _fsSum;
|
||||
WebRtc_UWord32 _fsCount;
|
||||
|
||||
WebRtc_Word64 _lastUpdateT;
|
||||
double _prevEstimate; // The previously returned jitter estimate
|
||||
WebRtc_UWord32 _prevFrameSize; // Frame size of the previous frame
|
||||
double _avgNoise; // Average of the random jitter
|
||||
WebRtc_UWord32 _alphaCount;
|
||||
double _filterJitterEstimate; // The filtered sum of jitter estimates
|
||||
|
||||
WebRtc_UWord32 _startupCount;
|
||||
|
||||
WebRtc_Word64 _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
|
||||
WebRtc_UWord32 _nackCount; // Keeps track of the number of nacks received,
|
||||
// but never goes above _nackLimit
|
||||
VCMRttFilter _rttFilter;
|
||||
|
||||
enum { kStartupDelaySamples = 30 };
|
||||
enum { kFsAccuStartupSamples = 5 };
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
|
||||
850
modules/video_coding/main/source/media_opt_util.cc
Normal file
850
modules/video_coding/main/source/media_opt_util.cc
Normal file
@ -0,0 +1,850 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video_coding_defines.h"
|
||||
#include "fec_tables_xor.h"
|
||||
#include "er_tables_xor.h"
|
||||
#include "nack_fec_tables.h"
|
||||
#include "qm_select_data.h"
|
||||
#include "media_opt_util.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <float.h>
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
bool
|
||||
VCMProtectionMethod::BetterThan(VCMProtectionMethod *pm)
|
||||
{
|
||||
if (pm == NULL)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return pm->_score > _score;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* /*parameters*/)
|
||||
{
|
||||
|
||||
//use FEC model with modification with RTT for now
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* /*parameters*/)
|
||||
{
|
||||
//use FEC model with modification with RTT for now
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
|
||||
VCMFecMethod fecMethod;
|
||||
VCMNackMethod nackMethod;
|
||||
|
||||
const WebRtc_UWord8 plossMax = 129;
|
||||
WebRtc_UWord16 rttMax = nackMethod.MaxRttNack();
|
||||
|
||||
// We should reduce the NACK threshold for NackFec protection method,
|
||||
// with FEC and ER, we should only use NACK for small RTT, to avoid delay
|
||||
//But this parameter change should be shared with RTP and JB
|
||||
//rttMax = (WebRtc_UWord16) 0.5*rttMax;
|
||||
|
||||
//Compute the protection factor
|
||||
fecMethod.ProtectionFactor(parameters);
|
||||
|
||||
//Compute the effective packet loss
|
||||
fecMethod.EffectivePacketLoss(parameters);
|
||||
|
||||
WebRtc_UWord8 protFactorK = fecMethod._protectionFactorK;
|
||||
WebRtc_UWord8 protFactorD = fecMethod._protectionFactorD;
|
||||
WebRtc_UWord8 effPacketLoss = fecMethod._effectivePacketLoss;
|
||||
float resPacketLoss = fecMethod._residualPacketLoss;
|
||||
|
||||
WebRtc_Word16 rttIndex= (WebRtc_UWord16) parameters->rtt;
|
||||
float softnessRtt = 1.0;
|
||||
if (parameters->rtt < rttMax)
|
||||
{
|
||||
softnessRtt = (float)VCMNackFecTable[rttIndex]/(float)4096.0;
|
||||
|
||||
//soften ER with NACK on
|
||||
//table depends on roundtrip time relative to rttMax (NACK Threshold)
|
||||
_effectivePacketLoss = (WebRtc_UWord8)(effPacketLoss*softnessRtt);
|
||||
|
||||
//soften FEC with NACK on
|
||||
//table depends on roundtrip time relative to rttMax (NACK Threshold)
|
||||
_protectionFactorK = (WebRtc_UWord8) (protFactorK * softnessRtt);
|
||||
_protectionFactorD = (WebRtc_UWord8) (protFactorD * softnessRtt);
|
||||
}
|
||||
|
||||
|
||||
//make sure I frame protection is at least larger than P frame protection, and at least as high as received loss
|
||||
WebRtc_UWord8 packetLoss = (WebRtc_UWord8)(255* parameters->lossPr);
|
||||
_protectionFactorK = static_cast<WebRtc_UWord8>(VCM_MAX(packetLoss,VCM_MAX(_scaleProtKey*protFactorD,protFactorK)));
|
||||
|
||||
//check limit on amount of protection for I frame: 50% is max
|
||||
if (_protectionFactorK >= plossMax) _protectionFactorK = plossMax - 1;
|
||||
|
||||
//Bit cost for NackFec
|
||||
|
||||
// NACK cost: based on residual packet loss (since we should only NACK packet not recovered by FEC)
|
||||
_efficiency = 0.0f;
|
||||
if (parameters->rtt < rttMax)
|
||||
_efficiency = parameters->bitRate * resPacketLoss / (1.0f + resPacketLoss);
|
||||
|
||||
//add FEC cost: ignore I frames for now
|
||||
float fecRate = static_cast<float>(_protectionFactorD) / 255.0f;
|
||||
if (fecRate >= 0.0f)
|
||||
_efficiency += parameters->bitRate * fecRate;
|
||||
|
||||
_score = _efficiency;
|
||||
|
||||
//Protection/fec rates obtained above is defined relative to total number of packets (total rate: source+fec)
|
||||
//FEC in RTP module assumes protection factor is defined relative to source number of packets
|
||||
//so we should convert the factor to reduce mismatch between mediaOpt suggested rate and the actual rate
|
||||
WebRtc_UWord8 codeRate = protFactorK;
|
||||
_protectionFactorK = fecMethod.ConvertFECRate(codeRate);
|
||||
codeRate = protFactorD;
|
||||
_protectionFactorD = fecMethod.ConvertFECRate(codeRate);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VCMNackMethod::EffectivePacketLoss(WebRtc_UWord8 effPacketLoss, WebRtc_UWord16 rttTime)
|
||||
{
|
||||
|
||||
WebRtc_UWord16 rttMax = MaxRttNack();
|
||||
|
||||
//For large RTT, we should rely on some Error Resilience, so we set packetLossEnc = 0
|
||||
//for RTT less than the NACK threshold
|
||||
if (rttTime < rttMax )
|
||||
effPacketLoss = 0; //may want a softer transition here
|
||||
|
||||
_effectivePacketLoss = effPacketLoss;
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
bool
|
||||
VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
|
||||
//Compute the effective packet loss for ER
|
||||
WebRtc_UWord8 effPacketLoss = (WebRtc_UWord8)(255* parameters->lossPr);
|
||||
WebRtc_UWord16 rttTime = (WebRtc_UWord16) parameters->rtt;
|
||||
EffectivePacketLoss(effPacketLoss, rttTime);
|
||||
//
|
||||
|
||||
//Compute the NACK bit cost
|
||||
_efficiency = parameters->bitRate * parameters->lossPr / (1.0f + parameters->lossPr);
|
||||
_score = _efficiency;
|
||||
if (parameters->rtt > _NACK_MAX_RTT)
|
||||
{
|
||||
_score = 0.0f;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMFecMethod::BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta, WebRtc_UWord8 packetFrameKey) const
|
||||
{
|
||||
|
||||
WebRtc_UWord8 boostRateKey = 2;
|
||||
//default: ratio scales the FEC protection up for I frames
|
||||
WebRtc_UWord8 ratio = 1;
|
||||
|
||||
if (packetFrameDelta > 0)
|
||||
ratio = (WebRtc_Word8)( packetFrameKey / packetFrameDelta );
|
||||
|
||||
ratio = VCM_MAX(boostRateKey, ratio);
|
||||
|
||||
return ratio;
|
||||
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMFecMethod::ConvertFECRate(WebRtc_UWord8 codeRateRTP) const
|
||||
{
|
||||
return static_cast<WebRtc_UWord8>(VCM_MIN(255,(0.5 + 255.0*codeRateRTP/(float)(255 - codeRateRTP))));
|
||||
}
|
||||
|
||||
//AvgRecoveryFEC: average recovery from FEC, assuming random packet loss model
|
||||
//Computed offline for a range of FEC code parameters and loss rates
|
||||
float
|
||||
VCMFecMethod::AvgRecoveryFEC(const VCMProtectionParameters* parameters) const
|
||||
{
|
||||
|
||||
//Total (avg) bits available per frame: total rate over actual/sent frame rate
|
||||
//units are kbits/frame
|
||||
const WebRtc_UWord16 bitRatePerFrame = static_cast<WebRtc_UWord16>(parameters->bitRate/(parameters->frameRate));
|
||||
|
||||
//Total (avg) number of packets per frame (source and fec):
|
||||
const WebRtc_UWord8 avgTotPackets = 1 + (WebRtc_UWord8)((float)bitRatePerFrame*1000.0/(float)(8.0*_maxPayloadSize) + 0.5);
|
||||
|
||||
//parameters for tables
|
||||
const WebRtc_UWord8 codeSize = 24;
|
||||
const WebRtc_UWord8 plossMax = 129;
|
||||
const WebRtc_UWord16 maxErTableSize = 38700;
|
||||
//
|
||||
|
||||
//
|
||||
//Get index for table
|
||||
const float protectionFactor = (float)_protectionFactorD/(float)255;
|
||||
WebRtc_UWord8 fecPacketsPerFrame = (WebRtc_UWord8)(0.5 + protectionFactor*avgTotPackets);
|
||||
WebRtc_UWord8 sourcePacketsPerFrame = avgTotPackets - fecPacketsPerFrame;
|
||||
|
||||
if (fecPacketsPerFrame == 0)
|
||||
{
|
||||
return 0.0; //no protection, so avg. recov from FEC == 0
|
||||
}
|
||||
|
||||
//table defined up to codeSizexcodeSize code
|
||||
if (sourcePacketsPerFrame > codeSize)
|
||||
{
|
||||
sourcePacketsPerFrame = codeSize;
|
||||
}
|
||||
|
||||
//check: protection factor is maxed at 50%, so this should never happen
|
||||
if (sourcePacketsPerFrame < 1)
|
||||
{
|
||||
assert("average number of source packets below 1\n");
|
||||
}
|
||||
|
||||
//index for ER tables: up to codeSizexcodeSize mask
|
||||
WebRtc_UWord16 codeIndexTable[codeSize*codeSize];
|
||||
WebRtc_UWord16 k = -1;
|
||||
for(WebRtc_UWord8 i=1;i<=codeSize;i++)
|
||||
{
|
||||
for(WebRtc_UWord8 j=1;j<=i;j++)
|
||||
{
|
||||
k += 1;
|
||||
codeIndexTable[(j-1)*codeSize + i - 1] = k;
|
||||
}
|
||||
}
|
||||
|
||||
const WebRtc_UWord8 lossRate = (WebRtc_UWord8) (255.0*parameters->lossPr + 0.5f);
|
||||
|
||||
const WebRtc_UWord16 codeIndex = (fecPacketsPerFrame - 1)*codeSize + (sourcePacketsPerFrame - 1);
|
||||
const WebRtc_UWord16 indexTable = codeIndexTable[codeIndex] * plossMax + lossRate;
|
||||
|
||||
const WebRtc_UWord16 codeIndex2 = (fecPacketsPerFrame)*codeSize + (sourcePacketsPerFrame);
|
||||
WebRtc_UWord16 indexTable2 = codeIndexTable[codeIndex2] * plossMax + lossRate;
|
||||
|
||||
//checks on table index
|
||||
if (indexTable >= maxErTableSize)
|
||||
{
|
||||
assert("ER table index too large\n");
|
||||
}
|
||||
|
||||
if (indexTable2 >= maxErTableSize)
|
||||
{
|
||||
indexTable2 = indexTable;
|
||||
}
|
||||
//
|
||||
|
||||
//Get the average effective packet loss recovery from FEC
|
||||
//this is from tables, computed using random loss model
|
||||
WebRtc_UWord8 avgFecRecov1 = 0;
|
||||
WebRtc_UWord8 avgFecRecov2 = 0;
|
||||
float avgFecRecov = 0;
|
||||
|
||||
if (fecPacketsPerFrame > 0)
|
||||
{
|
||||
avgFecRecov1 = VCMAvgFECRecoveryXOR[indexTable];
|
||||
avgFecRecov2 = VCMAvgFECRecoveryXOR[indexTable2];
|
||||
}
|
||||
|
||||
//interpolate over two FEC codes
|
||||
const float weightRpl = (float)(0.5 + protectionFactor*avgTotPackets) - (float)fecPacketsPerFrame;
|
||||
avgFecRecov = (float)weightRpl * (float)avgFecRecov2 + (float)(1.0 - weightRpl) * (float)avgFecRecov1;
|
||||
|
||||
|
||||
return avgFecRecov;
|
||||
|
||||
}
|
||||
|
||||
bool
|
||||
VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
|
||||
//FEC PROTECTION SETTINGS: varies with packet loss and bitrate
|
||||
|
||||
const float bitRate = parameters->bitRate;
|
||||
WebRtc_UWord8 packetLoss = (WebRtc_UWord8)(255* parameters->lossPr);
|
||||
|
||||
|
||||
//Size of tables
|
||||
const WebRtc_UWord16 maxFecTableSize = 6450;
|
||||
//Parameters for range of rate and packet loss for tables
|
||||
const WebRtc_UWord8 ratePar1 = 5;
|
||||
const WebRtc_UWord8 ratePar2 = 49;
|
||||
const WebRtc_UWord8 plossMax = 129;
|
||||
|
||||
//
|
||||
//Just for testing: for the case where we randomly lose slices instead of RTP packets and use SingleMode packetization in RTP module
|
||||
//const WebRtc_UWord16 slice_size = 3000/6; //corresponds to rate=1000k with 4 cores
|
||||
|
||||
//float slice_mtu = (float)_maxPayloadSize/(float)slice_size;
|
||||
const float slice_mtu = 1.0;
|
||||
//
|
||||
|
||||
//Total (avg) bits available per frame: total rate over actual/sent frame rate
|
||||
//units are kbits/frame
|
||||
const WebRtc_UWord16 bitRatePerFrame = static_cast<WebRtc_UWord16>(slice_mtu*bitRate/(parameters->frameRate));
|
||||
|
||||
//Total (avg) number of packets per frame (source and fec):
|
||||
const WebRtc_UWord8 avgTotPackets = 1 + (WebRtc_UWord8)((float)bitRatePerFrame*1000.0/(float)(8.0*_maxPayloadSize) + 0.5);
|
||||
|
||||
//TODO(marpan): Tune model for FEC Protection.
|
||||
//Better modulation of protection with available bits/frame (or avgTotpackets) using weight factors
|
||||
//FEC Tables include this effect already, but need to tune model off-line
|
||||
float weight1 = 0.5;
|
||||
float weight2 = 0.5;
|
||||
if (avgTotPackets > 4)
|
||||
{
|
||||
weight1 = 1.0;
|
||||
weight2 = 0.;
|
||||
}
|
||||
if (avgTotPackets > 6)
|
||||
{
|
||||
weight1 = 1.5;
|
||||
weight2 = 0.;
|
||||
}
|
||||
//
|
||||
|
||||
//Fec rate parameters: for P and I frame
|
||||
WebRtc_UWord8 codeRateDelta = 0;
|
||||
WebRtc_UWord8 codeRateKey = 0;
|
||||
|
||||
|
||||
//Get index for new table: the FEC protection depends on the (avergare) available bits/frame
|
||||
//the range on the rate index corresponds to rates (bps) from 200k to 8000k, for 30fps
|
||||
WebRtc_UWord8 rateIndexTable = (WebRtc_UWord8) VCM_MAX(VCM_MIN((bitRatePerFrame-ratePar1)/ratePar1,ratePar2),0);
|
||||
|
||||
// Restrict packet loss range to 50 for now%: current tables defined only up to 50%
|
||||
if (packetLoss >= plossMax)
|
||||
{
|
||||
packetLoss = plossMax - 1;
|
||||
}
|
||||
WebRtc_UWord16 indexTable = rateIndexTable * plossMax + packetLoss;
|
||||
|
||||
//check on table index
|
||||
if (indexTable >= maxFecTableSize)
|
||||
{
|
||||
assert("FEC table index too large\n");
|
||||
}
|
||||
//
|
||||
|
||||
//For Key frame: effectively at a higher rate, so we scale/boost the rate index.
|
||||
//the boost factor may depend on several factors: ratio of packet number of I to P frames, how much protection placed on P frames, etc.
|
||||
//default is 2
|
||||
const WebRtc_UWord8 packetFrameDelta = (WebRtc_UWord8)(0.5 + parameters->packetsPerFrame);
|
||||
const WebRtc_UWord8 packetFrameKey = (WebRtc_UWord8) (0.5 + parameters->packetsPerFrameKey);
|
||||
const WebRtc_UWord8 boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
|
||||
rateIndexTable = (WebRtc_UWord8) VCM_MAX(VCM_MIN(1+(boostKey*bitRatePerFrame-ratePar1)/ratePar1,ratePar2),0);
|
||||
WebRtc_UWord16 indexTableKey = rateIndexTable * plossMax + packetLoss;
|
||||
|
||||
indexTableKey = VCM_MIN(indexTableKey, maxFecTableSize);
|
||||
|
||||
codeRateDelta = VCMCodeRateXORTable[indexTable]; //protection factor for P fra
|
||||
codeRateKey = VCMCodeRateXORTable[indexTableKey]; //protection factor for I frame
|
||||
|
||||
//average with minimum protection level given by (average) total number of packets
|
||||
if (packetLoss > 0)
|
||||
{
|
||||
codeRateDelta = static_cast<WebRtc_UWord8>((weight1*(float)codeRateDelta + weight2*255.0/(float)avgTotPackets));
|
||||
}
|
||||
|
||||
//check limit on amount of protection for P frame; 50% is max
|
||||
if (codeRateDelta >= plossMax)
|
||||
{
|
||||
codeRateDelta = plossMax - 1;
|
||||
}
|
||||
|
||||
//make sure I frame protection is at least larger than P frame protection, and at least as high as received loss
|
||||
codeRateKey = static_cast<WebRtc_UWord8>(VCM_MAX(packetLoss,VCM_MAX(_scaleProtKey*codeRateDelta, codeRateKey)));
|
||||
|
||||
//check limit on amount of protection for I frame: 50% is max
|
||||
if (codeRateKey >= plossMax)
|
||||
{
|
||||
codeRateKey = plossMax - 1;
|
||||
}
|
||||
|
||||
_protectionFactorK = codeRateKey;
|
||||
_protectionFactorD = codeRateDelta;
|
||||
|
||||
// DONE WITH FEC PROTECTION SETTINGS
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
|
||||
// ER SETTINGS:
|
||||
//Effective packet loss to encoder is based on RPL (residual packet loss)
|
||||
//this is a soft setting based on degree of FEC protection
|
||||
//RPL = received/input packet loss - average_FEC_recovery
|
||||
//note: received/input packet loss may be filtered according to FilteredLoss
|
||||
|
||||
//The input packet loss:
|
||||
WebRtc_UWord8 effPacketLoss = (WebRtc_UWord8)(255*parameters->lossPr);
|
||||
|
||||
float scaleErRS = 0.5;
|
||||
float scaleErXOR = 0.5;
|
||||
float minErLevel = (float) 0.025;
|
||||
//float scaleErRS = 1.0;
|
||||
//float scaleErXOR = 1.0;
|
||||
//float minErLevel = (float) 0.0;
|
||||
|
||||
float avgFecRecov = 0.;
|
||||
//Effective packet loss for ER:
|
||||
float scaleEr = scaleErXOR;
|
||||
avgFecRecov = AvgRecoveryFEC(parameters);
|
||||
|
||||
//Residual Packet Loss:
|
||||
_residualPacketLoss = (float)(effPacketLoss - avgFecRecov)/(float)255.0;
|
||||
|
||||
|
||||
//Effective Packet Loss for encoder:
|
||||
_effectivePacketLoss = 0;
|
||||
if (effPacketLoss > 0)
|
||||
{
|
||||
_effectivePacketLoss = VCM_MAX((effPacketLoss - (WebRtc_UWord8)(scaleEr*avgFecRecov)),static_cast<WebRtc_UWord8>(minErLevel*255));
|
||||
}
|
||||
|
||||
|
||||
// DONE WITH ER SETTING
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
|
||||
// Compute the protection factor
|
||||
ProtectionFactor(parameters);
|
||||
|
||||
// Compute the effective packet loss
|
||||
EffectivePacketLoss(parameters);
|
||||
|
||||
|
||||
// Compute the bit cost
|
||||
// Ignore key frames for now.
|
||||
float fecRate = static_cast<float>(_protectionFactorD) / 255.0f;
|
||||
if (fecRate >= 0.0f)
|
||||
{
|
||||
// use this formula if the fecRate (protection factor) is defined relative to number of source packets
|
||||
// this is the case for the previous tables:
|
||||
// _efficiency = parameters->bitRate * ( 1.0 - 1.0 / (1.0 + fecRate));
|
||||
|
||||
// in the new tables, the fecRate is defined relative to total number of packets (total rate),
|
||||
// so overhead cost is:
|
||||
_efficiency = parameters->bitRate * fecRate;
|
||||
}
|
||||
else
|
||||
{
|
||||
_efficiency = 0.0f;
|
||||
}
|
||||
_score = _efficiency;
|
||||
|
||||
|
||||
// Protection/fec rates obtained above is defined relative to total number of packets (total rate: source+fec)
|
||||
// FEC in RTP module assumes protection factor is defined relative to source number of packets
|
||||
// so we should convert the factor to reduce mismatch between mediaOpt suggested rate and the actual rate
|
||||
_protectionFactorK = ConvertFECRate(_protectionFactorK);
|
||||
_protectionFactorD = ConvertFECRate(_protectionFactorD);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMIntraReqMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
float packetRate = parameters->packetsPerFrame * parameters->frameRate;
|
||||
// Assume that all lost packets cohere to different frames
|
||||
float lossRate = parameters->lossPr * packetRate;
|
||||
if (parameters->keyFrameSize <= 1e-3)
|
||||
{
|
||||
_score = FLT_MAX;
|
||||
return false;
|
||||
}
|
||||
_efficiency = lossRate * parameters->keyFrameSize;
|
||||
_score = _efficiency;
|
||||
if (parameters->lossPr >= 1.0f / parameters->keyFrameSize || parameters->rtt > _IREQ_MAX_RTT)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMPeriodicIntraMethod::UpdateParameters(const VCMProtectionParameters* /*parameters*/)
|
||||
{
|
||||
// Periodic I-frames. The last thing we want to use.
|
||||
_efficiency = 0.0f;
|
||||
_score = FLT_MAX;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMbIntraRefreshMethod::UpdateParameters(const VCMProtectionParameters* parameters)
|
||||
{
|
||||
// Assume optimal for now.
|
||||
_efficiency = parameters->bitRate * parameters->lossPr / (1.0f + parameters->lossPr);
|
||||
_score = _efficiency;
|
||||
if (parameters->bitRate < _MBREF_MIN_BITRATE)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtc_UWord16
|
||||
VCMNackMethod::MaxRttNack() const
|
||||
{
|
||||
return _NACK_MAX_RTT;
|
||||
}
|
||||
|
||||
VCMLossProtectionLogic::~VCMLossProtectionLogic()
|
||||
{
|
||||
ClearLossProtections();
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::ClearLossProtections()
|
||||
{
|
||||
ListItem *item;
|
||||
while ((item = _availableMethods.First()) != 0)
|
||||
{
|
||||
VCMProtectionMethod *method = static_cast<VCMProtectionMethod*>(item->GetItem());
|
||||
if (method != NULL)
|
||||
{
|
||||
delete method;
|
||||
}
|
||||
_availableMethods.PopFront();
|
||||
}
|
||||
_selectedMethod = NULL;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMLossProtectionLogic::AddMethod(VCMProtectionMethod *newMethod)
|
||||
{
|
||||
VCMProtectionMethod *method;
|
||||
ListItem *item;
|
||||
if (newMethod == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
|
||||
{
|
||||
method = static_cast<VCMProtectionMethod *>(item->GetItem());
|
||||
if (method != NULL && method->Type() == newMethod->Type())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_availableMethods.PushBack(newMethod);
|
||||
return true;
|
||||
|
||||
}
|
||||
bool
|
||||
VCMLossProtectionLogic::RemoveMethod(VCMProtectionMethodEnum methodType)
|
||||
{
|
||||
VCMProtectionMethod *method;
|
||||
ListItem *item;
|
||||
bool foundAndRemoved = false;
|
||||
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
|
||||
{
|
||||
method = static_cast<VCMProtectionMethod *>(item->GetItem());
|
||||
if (method != NULL && method->Type() == methodType)
|
||||
{
|
||||
if (_selectedMethod != NULL && _selectedMethod->Type() == method->Type())
|
||||
{
|
||||
_selectedMethod = NULL;
|
||||
}
|
||||
_availableMethods.Erase(item);
|
||||
item = NULL;
|
||||
delete method;
|
||||
foundAndRemoved = true;
|
||||
}
|
||||
}
|
||||
return foundAndRemoved;
|
||||
}
|
||||
|
||||
VCMProtectionMethod*
|
||||
VCMLossProtectionLogic::FindMethod(VCMProtectionMethodEnum methodType) const
|
||||
{
|
||||
VCMProtectionMethod *method;
|
||||
ListItem *item;
|
||||
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
|
||||
{
|
||||
method = static_cast<VCMProtectionMethod *>(item->GetItem());
|
||||
if (method != NULL && method->Type() == methodType)
|
||||
{
|
||||
return method;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
float
|
||||
VCMLossProtectionLogic::HighestOverhead() const
|
||||
{
|
||||
VCMProtectionMethod *method;
|
||||
ListItem *item;
|
||||
float highestOverhead = 0.0f;
|
||||
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
|
||||
{
|
||||
method = static_cast<VCMProtectionMethod *>(item->GetItem());
|
||||
if (method != NULL && method->RequiredBitRate() > highestOverhead)
|
||||
{
|
||||
highestOverhead = method->RequiredBitRate();
|
||||
}
|
||||
}
|
||||
return highestOverhead;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateRtt(WebRtc_UWord32 rtt)
|
||||
{
|
||||
_rtt = rtt;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateResidualPacketLoss(float residualPacketLoss)
|
||||
{
|
||||
_residualPacketLoss = residualPacketLoss;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateFecType(VCMFecTypes fecType)
|
||||
{
|
||||
_fecType = fecType;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateLossPr(WebRtc_UWord8 lossPr255)
|
||||
{
|
||||
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
|
||||
UpdateMaxLossHistory(lossPr255, now);
|
||||
_lossPr255.Apply(static_cast<float>(now - _lastPrUpdateT), static_cast<float>(lossPr255));
|
||||
_lastPrUpdateT = now;
|
||||
_lossPr = _lossPr255.Value() / 255.0f;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateMaxLossHistory(WebRtc_UWord8 lossPr255, WebRtc_Word64 now)
|
||||
{
|
||||
if (_lossPrHistory[0].timeMs >= 0 &&
|
||||
now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
|
||||
{
|
||||
if (lossPr255 > _shortMaxLossPr255)
|
||||
{
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Only add a new value to the history once a second
|
||||
if(_lossPrHistory[0].timeMs == -1)
|
||||
{
|
||||
// First, no shift
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shift
|
||||
for(WebRtc_Word32 i = (kLossPrHistorySize - 2); i >= 0 ; i--)
|
||||
{
|
||||
_lossPrHistory[i+1].lossPr255 = _lossPrHistory[i].lossPr255;
|
||||
_lossPrHistory[i+1].timeMs = _lossPrHistory[i].timeMs;
|
||||
}
|
||||
}
|
||||
if (_shortMaxLossPr255 == 0)
|
||||
{
|
||||
_shortMaxLossPr255 = lossPr255;
|
||||
}
|
||||
|
||||
_lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
|
||||
_lossPrHistory[0].timeMs = now;
|
||||
_shortMaxLossPr255 = 0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMLossProtectionLogic::MaxFilteredLossPr(WebRtc_Word64 nowMs) const
|
||||
{
|
||||
WebRtc_UWord8 maxFound = _shortMaxLossPr255;
|
||||
if (_lossPrHistory[0].timeMs == -1)
|
||||
{
|
||||
return maxFound;
|
||||
}
|
||||
for (WebRtc_Word32 i=0; i < kLossPrHistorySize; i++)
|
||||
{
|
||||
if (_lossPrHistory[i].timeMs == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nowMs - _lossPrHistory[i].timeMs > kLossPrHistorySize * kLossPrShortFilterWinMs)
|
||||
{
|
||||
// This sample (and all samples after this) is too old
|
||||
break;
|
||||
}
|
||||
if (_lossPrHistory[i].lossPr255 > maxFound)
|
||||
{
|
||||
// This sample is the largest one this far into the history
|
||||
maxFound = _lossPrHistory[i].lossPr255;
|
||||
}
|
||||
}
|
||||
return maxFound;
|
||||
}
|
||||
|
||||
WebRtc_UWord8
|
||||
VCMLossProtectionLogic::FilteredLoss() const
|
||||
{
|
||||
|
||||
//take the average received loss
|
||||
//return static_cast<WebRtc_UWord8>(_lossPr255.Value() + 0.5f);
|
||||
|
||||
//take the windowed max of the received loss
|
||||
if (_selectedMethod != NULL && _selectedMethod->Type() == kFEC)
|
||||
{
|
||||
return MaxFilteredLossPr(static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp()));
|
||||
}
|
||||
else
|
||||
{
|
||||
return static_cast<WebRtc_UWord8>(_lossPr255.Value() + 0.5);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc)
|
||||
{
|
||||
_lossPr = (float)packetLossEnc/(float)255.0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateBitRate(float bitRate)
|
||||
{
|
||||
_bitRate = bitRate;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets)
|
||||
{
|
||||
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
|
||||
_packetsPerFrame.Apply(static_cast<float>(now - _lastPacketPerFrameUpdateT), nPackets);
|
||||
_lastPacketPerFrameUpdateT = now;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets)
|
||||
{
|
||||
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
|
||||
_packetsPerFrameKey.Apply(static_cast<float>(now - _lastPacketPerFrameUpdateTKey), nPackets);
|
||||
_lastPacketPerFrameUpdateTKey = now;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
|
||||
{
|
||||
_keyFrameSize = keyFrameSize;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMLossProtectionLogic::UpdateMethod(VCMProtectionMethod *newMethod /*=NULL */)
|
||||
{
|
||||
_currentParameters.rtt = _rtt;
|
||||
_currentParameters.lossPr = _lossPr;
|
||||
_currentParameters.bitRate = _bitRate;
|
||||
_currentParameters.frameRate = _frameRate; //should this be named actual frame rate?
|
||||
_currentParameters.keyFrameSize = _keyFrameSize;
|
||||
_currentParameters.fecRateDelta = _fecRateDelta;
|
||||
_currentParameters.fecRateKey = _fecRateKey;
|
||||
_currentParameters.packetsPerFrame = _packetsPerFrame.Value();
|
||||
_currentParameters.packetsPerFrameKey = _packetsPerFrameKey.Value();
|
||||
_currentParameters.residualPacketLoss = _residualPacketLoss;
|
||||
_currentParameters.fecType = _fecType;
|
||||
|
||||
if (newMethod == NULL)
|
||||
{
|
||||
//_selectedMethod = _bestNotOkMethod = NULL;
|
||||
VCMProtectionMethod *method;
|
||||
ListItem *item;
|
||||
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
|
||||
{
|
||||
method = static_cast<VCMProtectionMethod *>(item->GetItem());
|
||||
if (method != NULL)
|
||||
{
|
||||
if (method->Type() == kFEC)
|
||||
{
|
||||
_selectedMethod = method;
|
||||
}
|
||||
method->UpdateParameters(&_currentParameters);
|
||||
}
|
||||
}
|
||||
if (_selectedMethod != NULL && _selectedMethod->Type() != kFEC)
|
||||
{
|
||||
_selectedMethod = method;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_selectedMethod = newMethod;
|
||||
_selectedMethod->UpdateParameters(&_currentParameters);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
VCMProtectionMethod*
|
||||
VCMLossProtectionLogic::SelectedMethod() const
|
||||
{
|
||||
return _selectedMethod;
|
||||
}
|
||||
|
||||
void
|
||||
VCMLossProtectionLogic::Reset()
|
||||
{
|
||||
_lastPrUpdateT = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
|
||||
_lastPacketPerFrameUpdateT = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
|
||||
_lossPr255.Reset(0.9999f);
|
||||
_packetsPerFrame.Reset(0.9999f);
|
||||
_fecRateDelta = _fecRateKey = 0;
|
||||
for (WebRtc_Word32 i=0; i < kLossPrHistorySize; i++)
|
||||
{
|
||||
_lossPrHistory[i].lossPr255 = 0;
|
||||
_lossPrHistory[i].timeMs = -1;
|
||||
}
|
||||
_shortMaxLossPr255 = 0;
|
||||
ClearLossProtections();
|
||||
}
|
||||
|
||||
}
|
||||
372
modules/video_coding/main/source/media_opt_util.h
Normal file
372
modules/video_coding/main/source/media_opt_util.h
Normal file
@ -0,0 +1,372 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "list_wrapper.h"
|
||||
#include "trace.h"
|
||||
#include "exp_filter.h"
|
||||
#include "internal_defines.h"
|
||||
#include "tick_time.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
class ListWrapper;
|
||||
|
||||
enum { kLossPrHistorySize = 30 }; // 30 time periods
|
||||
enum { kLossPrShortFilterWinMs = 1000 }; // 1000 ms, total filter length is 30 000 ms
|
||||
|
||||
enum VCMFecTypes
|
||||
{
|
||||
kXORFec
|
||||
};
|
||||
|
||||
struct VCMProtectionParameters
|
||||
{
|
||||
VCMProtectionParameters() : rtt(0), lossPr(0), bitRate(0), packetsPerFrame(0),
|
||||
frameRate(0), keyFrameSize(0), fecRateDelta(0), fecRateKey(0),
|
||||
residualPacketLoss(0.0), fecType(kXORFec) {}
|
||||
|
||||
WebRtc_UWord32 rtt;
|
||||
float lossPr;
|
||||
float bitRate;
|
||||
float packetsPerFrame;
|
||||
float packetsPerFrameKey;
|
||||
float frameRate;
|
||||
float keyFrameSize;
|
||||
WebRtc_UWord8 fecRateDelta;
|
||||
WebRtc_UWord8 fecRateKey;
|
||||
float residualPacketLoss;
|
||||
VCMFecTypes fecType;
|
||||
};
|
||||
|
||||
|
||||
/******************************/
|
||||
/* VCMProtectionMethod class */
|
||||
/****************************/
|
||||
|
||||
enum VCMProtectionMethodEnum
|
||||
{
|
||||
kNACK,
|
||||
kFEC,
|
||||
kNackFec,
|
||||
kIntraRequest, // I-frame request
|
||||
kPeriodicIntra, // I-frame refresh
|
||||
kMBIntraRefresh, // Macro block refresh
|
||||
kNone
|
||||
};
|
||||
|
||||
class VCMLossProbabilitySample
|
||||
{
|
||||
public:
|
||||
VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
|
||||
|
||||
WebRtc_UWord8 lossPr255;
|
||||
WebRtc_Word64 timeMs;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
//friend VCMProtectionMethod;
|
||||
VCMProtectionMethod(VCMProtectionMethodEnum type) : _protectionFactorK(0),
|
||||
_protectionFactorD(0), _residualPacketLoss(0.0), _scaleProtKey(2.0),
|
||||
_maxPayloadSize(1460), _efficiency(0), _score(0), _type(type) {}
|
||||
virtual ~VCMProtectionMethod() {}
|
||||
|
||||
// Updates the efficiency of the method using the parameters provided
|
||||
//
|
||||
// Input:
|
||||
// - parameters : Parameters used to calculate the efficiency
|
||||
//
|
||||
// Return value : True if this method is recommended in
|
||||
// the given conditions.
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
|
||||
|
||||
// Returns the protection type
|
||||
//
|
||||
// Return value : The protection type
|
||||
enum VCMProtectionMethodEnum Type() const { return _type; }
|
||||
|
||||
// Evaluates if this protection method is considered
|
||||
// better than the provided method.
|
||||
//
|
||||
// Input:
|
||||
// - pm : The protection method to compare with
|
||||
bool BetterThan(VCMProtectionMethod *pm);
|
||||
|
||||
// Returns the bit rate required by this protection method
|
||||
// during these conditions.
|
||||
//
|
||||
// Return value : Required bit rate
|
||||
virtual float RequiredBitRate() { return _efficiency; }
|
||||
|
||||
// Returns the effective packet loss for ER, required by this protection method
|
||||
//
|
||||
// Return value : Required effective packet loss
|
||||
virtual WebRtc_UWord8 RequiredPacketLossER() { return _effectivePacketLoss; }
|
||||
|
||||
// Extracts the FEC protection factor for Key frame, required by this protection method
|
||||
//
|
||||
// Return value : Required protectionFactor for Key frame
|
||||
virtual WebRtc_UWord8 RequiredProtectionFactorK() { return _protectionFactorK; }
|
||||
|
||||
// Extracts the FEC protection factor for Delta frame, required by this protection method
|
||||
//
|
||||
// Return value : Required protectionFactor for delta frame
|
||||
virtual WebRtc_UWord8 RequiredProtectionFactorD() { return _protectionFactorD; }
|
||||
|
||||
WebRtc_UWord8 _effectivePacketLoss;
|
||||
WebRtc_UWord8 _protectionFactorK;
|
||||
WebRtc_UWord8 _protectionFactorD;
|
||||
float _residualPacketLoss;
|
||||
float _scaleProtKey;
|
||||
WebRtc_Word32 _maxPayloadSize;
|
||||
|
||||
protected:
|
||||
float _efficiency;
|
||||
float _score;
|
||||
|
||||
private:
|
||||
const enum VCMProtectionMethodEnum _type;
|
||||
|
||||
};
|
||||
|
||||
class VCMNackMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMNackMethod() : VCMProtectionMethod(kNACK), _NACK_MAX_RTT(200) {}
|
||||
virtual ~VCMNackMethod() {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
//get the effective packet loss for ER
|
||||
bool EffectivePacketLoss(WebRtc_UWord8 effPacketLoss, WebRtc_UWord16 rttTime);
|
||||
//get the threshold for NACK
|
||||
WebRtc_UWord16 MaxRttNack() const;
|
||||
private:
|
||||
const WebRtc_UWord16 _NACK_MAX_RTT;
|
||||
};
|
||||
|
||||
class VCMFecMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMFecMethod() : VCMProtectionMethod(kFEC) {}
|
||||
virtual ~VCMFecMethod() {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
//get the effective packet loss for ER
|
||||
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
|
||||
//get the FEC protection factors
|
||||
bool ProtectionFactor(const VCMProtectionParameters* parameters);
|
||||
//get the boost for key frame protection
|
||||
WebRtc_UWord8 BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta,
|
||||
WebRtc_UWord8 packetFrameKey) const;
|
||||
//convert the rates: defined relative to total# packets or source# packets
|
||||
WebRtc_UWord8 ConvertFECRate(WebRtc_UWord8 codeRate) const;
|
||||
//get the average effective recovery from FEC: for random loss model
|
||||
float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
|
||||
};
|
||||
|
||||
|
||||
class VCMNackFecMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMNackFecMethod() : VCMProtectionMethod(kNackFec) {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
//get the effective packet loss for ER
|
||||
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
|
||||
//get the FEC protection factors
|
||||
bool ProtectionFactor(const VCMProtectionParameters* parameters);
|
||||
|
||||
};
|
||||
|
||||
|
||||
class VCMIntraReqMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMIntraReqMethod() : VCMProtectionMethod(kIntraRequest), _IREQ_MAX_RTT(150) {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
private:
|
||||
const WebRtc_UWord32 _IREQ_MAX_RTT;
|
||||
};
|
||||
|
||||
class VCMPeriodicIntraMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMPeriodicIntraMethod() : VCMProtectionMethod(kPeriodicIntra) {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
};
|
||||
|
||||
class VCMMbIntraRefreshMethod : public VCMProtectionMethod
|
||||
{
|
||||
public:
|
||||
VCMMbIntraRefreshMethod() :
|
||||
VCMProtectionMethod(kMBIntraRefresh), _MBREF_MIN_BITRATE(150) {}
|
||||
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
|
||||
virtual float RequiredBitRate() { return 0.0; }
|
||||
private:
|
||||
const WebRtc_UWord32 _MBREF_MIN_BITRATE;
|
||||
};
|
||||
|
||||
class VCMLossProtectionLogic
|
||||
{
|
||||
public:
|
||||
VCMLossProtectionLogic() : _availableMethods(), _selectedMethod(NULL),
|
||||
_bestNotOkMethod(NULL), _rtt(0), _lossPr(0.0f), _bitRate(0.0f), _frameRate(0.0f),
|
||||
_keyFrameSize(0.0f), _fecRateKey(0), _fecRateDelta(0), _lastPrUpdateT(0),
|
||||
_lossPr255(0.9999f), _lossPrHistory(), _shortMaxLossPr255(0),
|
||||
_packetsPerFrame(0.9999f), _packetsPerFrameKey(0.9999f), _residualPacketLoss(0),
|
||||
_boostRateKey(2)
|
||||
{ Reset(); }
|
||||
|
||||
~VCMLossProtectionLogic();
|
||||
|
||||
void ClearLossProtections();
|
||||
bool AddMethod(VCMProtectionMethod *newMethod);
|
||||
bool RemoveMethod(VCMProtectionMethodEnum methodType);
|
||||
VCMProtectionMethod* FindMethod(VCMProtectionMethodEnum methodType) const;
|
||||
float HighestOverhead() const;
|
||||
|
||||
// Update the round-trip time
|
||||
//
|
||||
// Input:
|
||||
// - rtt : Round-trip time in seconds.
|
||||
void UpdateRtt(WebRtc_UWord32 rtt);
|
||||
|
||||
// Update residual packet loss
|
||||
//
|
||||
// Input:
|
||||
// - residualPacketLoss : residual packet loss: effective loss after FEC recovery
|
||||
void UpdateResidualPacketLoss(float residualPacketLoss);
|
||||
|
||||
// Update fecType
|
||||
//
|
||||
// Input:
|
||||
// - fecType : kXORFec for generic XOR FEC
|
||||
void UpdateFecType(VCMFecTypes fecType);
|
||||
|
||||
// Update the loss probability.
|
||||
//
|
||||
// Input:
|
||||
// - lossPr255 : The packet loss probability in the interval [0, 255],
|
||||
// reported by RTCP.
|
||||
void UpdateLossPr(WebRtc_UWord8 lossPr255);
|
||||
|
||||
// Update the filtered packet loss.
|
||||
//
|
||||
// Input:
|
||||
// - packetLossEnc : The reported packet loss filtered (max window or average)
|
||||
void UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc);
|
||||
|
||||
// Update the current target bit rate.
|
||||
//
|
||||
// Input:
|
||||
// - bitRate : The current target bit rate in kbits/s
|
||||
void UpdateBitRate(float bitRate);
|
||||
|
||||
// Update the number of packets per frame estimate, for delta frames
|
||||
//
|
||||
// Input:
|
||||
// - nPackets : Number of packets used to send the latest frame.
|
||||
void UpdatePacketsPerFrame(float nPackets);
|
||||
|
||||
// Update the number of packets per frame estimate, for key frames
|
||||
//
|
||||
// Input:
|
||||
// - nPackets : Number of packets used to send the latest frame.
|
||||
void UpdatePacketsPerFrameKey(float nPackets);
|
||||
|
||||
// Update the keyFrameSize estimate
|
||||
//
|
||||
// Input:
|
||||
// - keyFrameSize : The size of the latest sent key frame.
|
||||
void UpdateKeyFrameSize(float keyFrameSize);
|
||||
|
||||
// Update the frame rate
|
||||
//
|
||||
// Input:
|
||||
// - frameRate : The current target frame rate.
|
||||
void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
|
||||
|
||||
// The amount of packet loss to cover for with FEC.
|
||||
//
|
||||
// Input:
|
||||
// - fecRateKey : Packet loss to cover for with FEC when sending key frames.
|
||||
// - fecRateDelta : Packet loss to cover for with FEC when sending delta frames.
|
||||
void UpdateFECRates(WebRtc_UWord8 fecRateKey, WebRtc_UWord8 fecRateDelta)
|
||||
{ _fecRateKey = fecRateKey; _fecRateDelta = fecRateDelta; }
|
||||
|
||||
// Update the protection methods with the current VCMProtectionParameters and
|
||||
// choose the best method available. The update involves computing the robustness settings
|
||||
// for the protection method.
|
||||
//
|
||||
// Input:
|
||||
// - newMethod : If not NULL, this is method will be selected by force.
|
||||
//
|
||||
// Return value : True if the selected method is recommended using these settings,
|
||||
// false if it's the best method, but still not recommended to be used.
|
||||
// E.g. if NACK is the best available, but the RTT is too large, false
|
||||
// will be returned.
|
||||
bool UpdateMethod(VCMProtectionMethod *newMethod = NULL);
|
||||
|
||||
// Returns the method currently selected.
|
||||
//
|
||||
// Return value : The protection method currently selected.
|
||||
VCMProtectionMethod* SelectedMethod() const;
|
||||
|
||||
// Returns the filtered loss probability in the interval [0, 255].
|
||||
//
|
||||
// Return value : The filtered loss probability
|
||||
WebRtc_UWord8 FilteredLoss() const;
|
||||
|
||||
// Get constraint on NACK
|
||||
//
|
||||
// return value : RTT threshold for using NACK
|
||||
WebRtc_UWord8 GetNackThreshold() const;
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
// Sets the available loss protection methods.
|
||||
void UpdateMaxLossHistory(WebRtc_UWord8 lossPr255, WebRtc_Word64 now);
|
||||
WebRtc_UWord8 MaxFilteredLossPr(WebRtc_Word64 nowMs) const;
|
||||
ListWrapper _availableMethods;
|
||||
VCMProtectionMethod* _selectedMethod;
|
||||
VCMProtectionMethod* _bestNotOkMethod;
|
||||
VCMProtectionParameters _currentParameters;
|
||||
WebRtc_UWord32 _rtt;
|
||||
float _lossPr;
|
||||
float _bitRate;
|
||||
float _frameRate;
|
||||
float _keyFrameSize;
|
||||
WebRtc_UWord8 _fecRateKey;
|
||||
WebRtc_UWord8 _fecRateDelta;
|
||||
WebRtc_UWord32 _lastPrUpdateT;
|
||||
WebRtc_UWord32 _lastPacketPerFrameUpdateT;
|
||||
WebRtc_UWord32 _lastPacketPerFrameUpdateTKey;
|
||||
VCMExpFilter _lossPr255;
|
||||
VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
|
||||
WebRtc_UWord8 _shortMaxLossPr255;
|
||||
VCMExpFilter _packetsPerFrame;
|
||||
VCMExpFilter _packetsPerFrameKey;
|
||||
float _residualPacketLoss;
|
||||
WebRtc_UWord8 _boostRateKey;
|
||||
VCMFecTypes _fecType;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
|
||||
687
modules/video_coding/main/source/media_optimization.cc
Normal file
687
modules/video_coding/main/source/media_optimization.cc
Normal file
@ -0,0 +1,687 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "media_optimization.h"
|
||||
#include "content_metrics_processing.h"
|
||||
#include "frame_dropper.h"
|
||||
#include "qm_select.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMMediaOptimization::VCMMediaOptimization(WebRtc_Word32 id):
|
||||
_id(id),
|
||||
_maxBitRate(0),
|
||||
_sendCodecType(kVideoCodecUnknown),
|
||||
_codecWidth(0),
|
||||
_codecHeight(0),
|
||||
_userFrameRate(0),
|
||||
_lossProtOverhead(0),
|
||||
_packetLossEnc(0),
|
||||
_fractionLost(0),
|
||||
_sendStatisticsZeroEncode(0),
|
||||
_maxPayloadSize(1460),
|
||||
_lastBitRate(0),
|
||||
_targetBitRate(0),
|
||||
_enableQm(false),
|
||||
_videoProtectionCallback(NULL),
|
||||
_videoQMSettingsCallback(NULL),
|
||||
_encodedFrameSamples(),
|
||||
_avgSentBitRateBps(0.0f),
|
||||
_keyFrameCnt(0),
|
||||
_deltaFrameCnt(0),
|
||||
_lastQMUpdateTime(0),
|
||||
_lastChangeTime(0)
|
||||
{
|
||||
memset(_sendStatistics, 0, sizeof(_sendStatistics));
|
||||
|
||||
_frameDropper = new VCMFrameDropper(_id);
|
||||
_lossProtLogic = new VCMLossProtectionLogic();
|
||||
_content = new VCMContentMetricsProcessing();
|
||||
_qms = new VCMQmSelect();
|
||||
}
|
||||
|
||||
VCMMediaOptimization::~VCMMediaOptimization(void)
|
||||
{
|
||||
_lossProtLogic->ClearLossProtections();
|
||||
delete _lossProtLogic;
|
||||
delete _frameDropper;
|
||||
delete _content;
|
||||
delete _qms;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::Reset()
|
||||
{
|
||||
_frameDropper->Reset();
|
||||
_lossProtLogic->Reset();
|
||||
_frameDropper->SetRates(0, 0);
|
||||
_content->Reset();
|
||||
_qms->Reset();
|
||||
_lossProtLogic->UpdateFrameRate(static_cast<float>(InputFrameRate()));
|
||||
_lossProtLogic->Reset();
|
||||
_sendStatisticsZeroEncode = 0;
|
||||
_lastBitRate = 0;
|
||||
_targetBitRate = 0;
|
||||
_lossProtOverhead = 0;
|
||||
_codecWidth = 0;
|
||||
_codecHeight = 0;
|
||||
_userFrameRate = 0;
|
||||
_keyFrameCnt = 0;
|
||||
_deltaFrameCnt = 0;
|
||||
_lastQMUpdateTime = 0;
|
||||
_lastChangeTime = 0;
|
||||
for (WebRtc_Word32 i = 0; i < kBitrateMaxFrameSamples; i++)
|
||||
{
|
||||
_encodedFrameSamples[i]._sizeBytes = -1;
|
||||
_encodedFrameSamples[i]._timeCompleteMs = -1;
|
||||
}
|
||||
_avgSentBitRateBps = 0.0f;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMMediaOptimization::SetTargetRates(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord8 &fractionLost,
|
||||
WebRtc_UWord32 roundTripTimeMs)
|
||||
{
|
||||
VCMProtectionMethod *selectedMethod = _lossProtLogic->SelectedMethod();
|
||||
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
|
||||
_lossProtLogic->UpdateLossPr(fractionLost);
|
||||
_lossProtLogic->UpdateRtt(roundTripTimeMs);
|
||||
_lossProtLogic->UpdateResidualPacketLoss(static_cast<float>(fractionLost));
|
||||
|
||||
VCMFecTypes fecType = kXORFec; // generic FEC
|
||||
_lossProtLogic->UpdateFecType(fecType);
|
||||
|
||||
//Get frame rate for encoder: this is the actual/sent frame rate
|
||||
float actualFrameRate = SentFrameRate();
|
||||
|
||||
// sanity
|
||||
if (actualFrameRate < 1.0)
|
||||
{
|
||||
actualFrameRate = 1.0;
|
||||
}
|
||||
|
||||
// Update frame rate for the loss protection logic class: frame rate should be the actual/sent rate
|
||||
_lossProtLogic->UpdateFrameRate(actualFrameRate);
|
||||
|
||||
_fractionLost = fractionLost;
|
||||
|
||||
// The effective packet loss may be the received loss or filtered, i.e., average or max filter may be used.
|
||||
//We should think about which filter is appropriate for low/high bit rates, low/high loss rates, etc.
|
||||
WebRtc_UWord8 packetLossEnc = _lossProtLogic->FilteredLoss();
|
||||
|
||||
//For now use the filtered loss for computing the robustness settings
|
||||
_lossProtLogic->UpdateFilteredLossPr(packetLossEnc);
|
||||
|
||||
// Rate cost of the protection methods
|
||||
_lossProtOverhead = 0;
|
||||
|
||||
if(selectedMethod)
|
||||
{
|
||||
|
||||
//Update method will compute the robustness settings for the given protection method and the overhead cost
|
||||
//the protection method is set by the user via SetVideoProtection.
|
||||
//The robustness settings are: the effecitve packet loss for ER and the FEC protection settings
|
||||
_lossProtLogic->UpdateMethod();
|
||||
|
||||
//Get the code rate for Key frames
|
||||
const WebRtc_UWord8 codeRateKeyRTP = selectedMethod->RequiredProtectionFactorK();
|
||||
|
||||
//Get the code rate for Delta frames
|
||||
const WebRtc_UWord8 codeRateDeltaRTP = selectedMethod->RequiredProtectionFactorD();
|
||||
|
||||
//Get the effective packet loss for ER
|
||||
packetLossEnc = selectedMethod->RequiredPacketLossER();
|
||||
|
||||
// Get the bit cost of protection method
|
||||
_lossProtOverhead = static_cast<WebRtc_UWord32>(_lossProtLogic->HighestOverhead() + 0.5f);
|
||||
|
||||
//NACK is on for NACK and NackFec protection method: off for FEC method
|
||||
bool nackStatus = true;
|
||||
if (selectedMethod->Type() == kFEC)
|
||||
{
|
||||
nackStatus = false;
|
||||
}
|
||||
|
||||
if(_videoProtectionCallback)
|
||||
{
|
||||
_videoProtectionCallback->ProtectionRequest(codeRateDeltaRTP ,codeRateKeyRTP, nackStatus);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Update effective packet loss for encoder: note: fractionLost was passed as reference
|
||||
fractionLost = packetLossEnc;
|
||||
|
||||
WebRtc_UWord32 nackBitRate=0;
|
||||
if(selectedMethod && _lossProtLogic->FindMethod(kNACK) != NULL)
|
||||
{
|
||||
// Make sure we don't over-use the channel momentarily. This is
|
||||
// necessary for NACK since it can be very bursty.
|
||||
nackBitRate = (_lastBitRate * fractionLost) / 255;
|
||||
if (nackBitRate > _targetBitRate)
|
||||
{
|
||||
nackBitRate = _targetBitRate;
|
||||
}
|
||||
_frameDropper->SetRates(static_cast<float>(bitRate - nackBitRate), 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
_frameDropper->SetRates(static_cast<float>(bitRate - _lossProtOverhead), 0);
|
||||
}
|
||||
|
||||
//This may be used for UpdateEncoderBitRate: lastBitRate is total rate, before compensation
|
||||
_lastBitRate = _targetBitRate;
|
||||
|
||||
//Source coding rate: total rate - protection overhead
|
||||
_targetBitRate = bitRate - _lossProtOverhead;
|
||||
|
||||
if (_enableQm)
|
||||
{
|
||||
//Update QM with rates
|
||||
_qms->UpdateRates((float)_targetBitRate, _avgSentBitRateBps,_incomingFrameRate);
|
||||
//Check for QM selection
|
||||
bool selectQM = checkStatusForQMchange();
|
||||
if (selectQM)
|
||||
{
|
||||
SelectQuality();
|
||||
}
|
||||
}
|
||||
|
||||
return _targetBitRate;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::DropFrame()
|
||||
{
|
||||
_frameDropper->Leak((WebRtc_UWord32)(InputFrameRate() + 0.5f)); // leak appropriate number of bytes
|
||||
return _frameDropper->DropFrame();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SentFrameCount(VCMFrameCount &frameCount) const
|
||||
{
|
||||
frameCount.numDeltaFrames = _deltaFrameCnt;
|
||||
frameCount.numKeyFrames = _keyFrameCnt;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SetEncodingData(VideoCodecType sendCodecType, WebRtc_Word32 maxBitRate,
|
||||
WebRtc_UWord32 frameRate, WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord16 width, WebRtc_UWord16 height)
|
||||
{
|
||||
// Everything codec specific should be reset here since this means the codec has changed.
|
||||
// If native dimension values have changed, then either user initiated change, or QM
|
||||
// initiated change. Will be able to determine only after the processing of the first frame
|
||||
_lastChangeTime = VCMTickTime::MillisecondTimestamp();
|
||||
_content->Reset();
|
||||
_content->UpdateFrameRate(frameRate);
|
||||
|
||||
_maxBitRate = maxBitRate;
|
||||
_sendCodecType = sendCodecType;
|
||||
_targetBitRate = bitRate;
|
||||
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
|
||||
_lossProtLogic->UpdateFrameRate(static_cast<float>(frameRate));
|
||||
_frameDropper->Reset();
|
||||
_frameDropper->SetRates(static_cast<float>(bitRate), static_cast<float>(frameRate));
|
||||
_userFrameRate = (float)frameRate;
|
||||
_codecWidth = width;
|
||||
_codecHeight = height;
|
||||
WebRtc_Word32 ret = VCM_OK;
|
||||
ret = _qms->Initialize((float)_targetBitRate, _userFrameRate, _codecWidth, _codecHeight);
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::RegisterProtectionCallback(VCMProtectionCallback* protectionCallback)
|
||||
{
|
||||
_videoProtectionCallback = protectionCallback;
|
||||
return VCM_OK;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VCMMediaOptimization::EnableFrameDropper(bool enable)
|
||||
{
|
||||
_frameDropper->Enable(enable);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VCMMediaOptimization::EnableNack(bool enable)
|
||||
{
|
||||
// Add NACK to the list of loss protection methods
|
||||
bool updated = false;
|
||||
if (enable)
|
||||
{
|
||||
VCMProtectionMethod *nackMethod = new VCMNackMethod();
|
||||
updated = _lossProtLogic->AddMethod(nackMethod);
|
||||
if (!updated)
|
||||
{
|
||||
delete nackMethod;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
updated = _lossProtLogic->RemoveMethod(kNACK);
|
||||
}
|
||||
if (updated)
|
||||
{
|
||||
_lossProtLogic->UpdateMethod();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::IsNackEnabled()
|
||||
{
|
||||
return (_lossProtLogic->FindMethod(kFEC) != NULL);
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::EnableFEC(bool enable)
|
||||
{
|
||||
// Add FEC to the list of loss protection methods
|
||||
bool updated = false;
|
||||
if (enable)
|
||||
{
|
||||
VCMProtectionMethod *fecMethod = new VCMFecMethod();
|
||||
updated = _lossProtLogic->AddMethod(fecMethod);
|
||||
if (!updated)
|
||||
{
|
||||
delete fecMethod;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
updated = _lossProtLogic->RemoveMethod(kFEC);
|
||||
}
|
||||
if (updated)
|
||||
{
|
||||
_lossProtLogic->UpdateMethod();
|
||||
}
|
||||
}
|
||||
void
|
||||
VCMMediaOptimization::EnableNackFEC(bool enable)
|
||||
{
|
||||
// Add NackFec to the list of loss protection methods
|
||||
bool updated = false;
|
||||
if (enable)
|
||||
{
|
||||
VCMProtectionMethod *nackfecMethod = new VCMNackFecMethod();
|
||||
updated = _lossProtLogic->AddMethod(nackfecMethod);
|
||||
if (!updated)
|
||||
{
|
||||
delete nackfecMethod;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
updated = _lossProtLogic->RemoveMethod(kNackFec);
|
||||
}
|
||||
if (updated)
|
||||
{
|
||||
_lossProtLogic->UpdateMethod();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::IsFecEnabled()
|
||||
{
|
||||
return (_lossProtLogic->FindMethod(kFEC) != NULL);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::IsNackFecEnabled()
|
||||
{
|
||||
return (_lossProtLogic->FindMethod(kNackFec) != NULL);
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::SetMtu(WebRtc_Word32 mtu)
|
||||
{
|
||||
_maxPayloadSize = mtu;
|
||||
}
|
||||
|
||||
float
|
||||
VCMMediaOptimization::SentFrameRate()
|
||||
{
|
||||
if(_frameDropper)
|
||||
{
|
||||
return _frameDropper->ActualFrameRate((WebRtc_UWord32)(InputFrameRate() + 0.5f));
|
||||
}
|
||||
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
|
||||
float
|
||||
VCMMediaOptimization::SentBitRate()
|
||||
{
|
||||
UpdateBitRateEstimate(-1, VCMTickTime::MillisecondTimestamp());
|
||||
return _avgSentBitRateBps / 1000.0f;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::MaxBitRate()
|
||||
{
|
||||
return _maxBitRate;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::UpdateWithEncodedData(WebRtc_Word32 encodedLength,
|
||||
FrameType encodedFrameType)
|
||||
{
|
||||
// look into the ViE version - debug mode - needs also number of layers.
|
||||
UpdateBitRateEstimate(encodedLength, VCMTickTime::MillisecondTimestamp());
|
||||
if(encodedLength > 0)
|
||||
{
|
||||
const bool deltaFrame = (encodedFrameType != kVideoFrameKey &&
|
||||
encodedFrameType != kVideoFrameGolden);
|
||||
|
||||
_frameDropper->Fill(encodedLength, deltaFrame);
|
||||
if (_maxPayloadSize > 0 && encodedLength > 0)
|
||||
{
|
||||
const float minPacketsPerFrame = encodedLength /
|
||||
static_cast<float>(_maxPayloadSize);
|
||||
if (deltaFrame)
|
||||
{
|
||||
_lossProtLogic->UpdatePacketsPerFrame(minPacketsPerFrame);
|
||||
}
|
||||
else
|
||||
{
|
||||
_lossProtLogic->UpdatePacketsPerFrameKey(minPacketsPerFrame);
|
||||
}
|
||||
|
||||
if (_enableQm)
|
||||
{
|
||||
// update quality select with encoded length
|
||||
_qms->UpdateEncodedSize(encodedLength, encodedFrameType);
|
||||
}
|
||||
}
|
||||
if (!deltaFrame && encodedLength > 0)
|
||||
{
|
||||
_lossProtLogic->UpdateKeyFrameSize(static_cast<float>(encodedLength));
|
||||
}
|
||||
|
||||
// updating counters
|
||||
if (deltaFrame){
|
||||
_deltaFrameCnt++;
|
||||
} else {
|
||||
_keyFrameCnt++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
|
||||
}
|
||||
|
||||
void VCMMediaOptimization::UpdateBitRateEstimate(WebRtc_Word64 encodedLength,
|
||||
WebRtc_Word64 nowMs)
|
||||
{
|
||||
int i = kBitrateMaxFrameSamples - 1;
|
||||
WebRtc_UWord32 frameSizeSum = 0;
|
||||
WebRtc_Word64 timeOldest = -1;
|
||||
// Find an empty slot for storing the new sample and at the same time
|
||||
// accumulate the history.
|
||||
for (; i >= 0; i--)
|
||||
{
|
||||
if (_encodedFrameSamples[i]._sizeBytes == -1)
|
||||
{
|
||||
// Found empty slot
|
||||
break;
|
||||
}
|
||||
if (nowMs - _encodedFrameSamples[i]._timeCompleteMs < kBitrateAverageWinMs)
|
||||
{
|
||||
frameSizeSum += static_cast<WebRtc_UWord32>(_encodedFrameSamples[i]._sizeBytes);
|
||||
if (timeOldest == -1)
|
||||
{
|
||||
timeOldest = _encodedFrameSamples[i]._timeCompleteMs;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (encodedLength > 0)
|
||||
{
|
||||
if (i < 0)
|
||||
{
|
||||
// No empty slot, shift
|
||||
for (i = kBitrateMaxFrameSamples - 2; i >= 0; i--)
|
||||
{
|
||||
_encodedFrameSamples[i + 1] = _encodedFrameSamples[i];
|
||||
}
|
||||
i++;
|
||||
}
|
||||
// Insert new sample
|
||||
_encodedFrameSamples[i]._sizeBytes = encodedLength;
|
||||
_encodedFrameSamples[i]._timeCompleteMs = nowMs;
|
||||
}
|
||||
if (timeOldest > -1)
|
||||
{
|
||||
// Update average bit rate
|
||||
float denom = static_cast<float>(nowMs - timeOldest);
|
||||
if (denom < 1.0)
|
||||
{
|
||||
denom = 1.0;
|
||||
}
|
||||
_avgSentBitRateBps = (frameSizeSum + encodedLength) * 8 * 1000 / denom;
|
||||
}
|
||||
else if (encodedLength > 0)
|
||||
{
|
||||
_avgSentBitRateBps = static_cast<float>(encodedLength * 8);
|
||||
}
|
||||
else
|
||||
{
|
||||
_avgSentBitRateBps = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::RegisterVideoQMCallback(VCMQMSettingsCallback *videoQMSettings)
|
||||
{
|
||||
_videoQMSettingsCallback = videoQMSettings;
|
||||
//Callback setting controls QM
|
||||
if (_videoQMSettingsCallback != NULL)
|
||||
{
|
||||
_enableQm = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
_enableQm = false;
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void
|
||||
VCMMediaOptimization::updateContentData(const VideoContentMetrics *contentMetrics)
|
||||
{
|
||||
//Updating content metrics
|
||||
if (contentMetrics == NULL)
|
||||
{
|
||||
//No QM if metrics are NULL
|
||||
_enableQm = false;
|
||||
_qms->Reset();
|
||||
}
|
||||
else
|
||||
{
|
||||
_content->UpdateContentData(contentMetrics);
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMMediaOptimization::SelectQuality()
|
||||
{
|
||||
// Reset quantities for QM select
|
||||
_qms->ResetQM();
|
||||
|
||||
// Select quality mode
|
||||
VCMQualityMode* qm = NULL;
|
||||
WebRtc_Word32 ret = _qms->SelectQuality(_content->Data(), &qm);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Check for updates to spatial/temporal modes
|
||||
QMUpdate(qm);
|
||||
|
||||
//Reset all the rate and related frame counters quantities
|
||||
_qms->ResetRates();
|
||||
|
||||
// Reset counters
|
||||
_lastQMUpdateTime = VCMTickTime::MillisecondTimestamp();
|
||||
|
||||
// Reset content metrics
|
||||
_content->Reset();
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
|
||||
// Check timing constraints and look for significant change in:
|
||||
// (1) scene content
|
||||
// (2) target bit rate
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::checkStatusForQMchange()
|
||||
{
|
||||
|
||||
bool status = true;
|
||||
|
||||
// Check that we do not call QMSelect too often, and that we waited some time (to sample the metrics) from the event lastChangeTime
|
||||
// lastChangeTime is the time where user changed the size/rate/frame rate (via SetEncodingData)
|
||||
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
|
||||
if ((now - _lastQMUpdateTime) < kQmMinIntervalMs ||
|
||||
(now - _lastChangeTime) < kQmMinIntervalMs)
|
||||
{
|
||||
status = false;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
}
|
||||
|
||||
bool
|
||||
VCMMediaOptimization::QMUpdate(VCMQualityMode* qm)
|
||||
{
|
||||
//Check for no change
|
||||
if (qm->spatialHeightFact == 1 &&
|
||||
qm->spatialWidthFact == 1 &&
|
||||
qm->temporalFact == 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
//Content metrics hold native values
|
||||
VideoContentMetrics* cm = _content->Data();
|
||||
|
||||
//Temporal
|
||||
WebRtc_UWord32 frameRate = static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f);
|
||||
//Check if go back up in temporal resolution
|
||||
if (qm->temporalFact == 0)
|
||||
{
|
||||
frameRate = (WebRtc_UWord32) 2 * _incomingFrameRate;
|
||||
}
|
||||
//go down in temporal resolution
|
||||
else
|
||||
{
|
||||
frameRate = (WebRtc_UWord32)(_incomingFrameRate / qm->temporalFact + 1);
|
||||
}
|
||||
|
||||
//Spatial
|
||||
WebRtc_UWord32 height = _codecHeight;
|
||||
WebRtc_UWord32 width = _codecWidth;
|
||||
//Check if go back up in spatial resolution
|
||||
if (qm->spatialHeightFact == 0 && qm->spatialWidthFact == 0)
|
||||
{
|
||||
height = cm->nativeHeight;
|
||||
width = cm->nativeWidth;
|
||||
}
|
||||
else
|
||||
{
|
||||
height = _codecHeight / qm->spatialHeightFact;
|
||||
width = _codecWidth / qm->spatialWidthFact;
|
||||
}
|
||||
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, _id,
|
||||
"Quality Mode Update: W = %d, H = %d, FR = %f",
|
||||
width, height, frameRate);
|
||||
|
||||
//Update VPM with new target frame rate and size
|
||||
_videoQMSettingsCallback->SetVideoQMSettings(frameRate, width, height);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void
|
||||
VCMMediaOptimization::UpdateIncomingFrameRate()
|
||||
{
|
||||
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
|
||||
if(_incomingFrameTimes[0] == 0)
|
||||
{
|
||||
// first no shift
|
||||
} else
|
||||
{
|
||||
// shift
|
||||
for(WebRtc_Word32 i = (kFrameCountHistorySize - 2); i >= 0 ; i--)
|
||||
{
|
||||
_incomingFrameTimes[i+1] = _incomingFrameTimes[i];
|
||||
}
|
||||
}
|
||||
_incomingFrameTimes[0] = now;
|
||||
ProcessIncomingFrameRate(now);
|
||||
}
|
||||
|
||||
// allowing VCM to keep track of incoming frame rate
|
||||
void
|
||||
VCMMediaOptimization::ProcessIncomingFrameRate(WebRtc_Word64 now)
|
||||
{
|
||||
WebRtc_Word32 num = 0;
|
||||
WebRtc_Word32 nrOfFrames = 0;
|
||||
for(num = 1; num < (kFrameCountHistorySize - 1); num++)
|
||||
{
|
||||
if (_incomingFrameTimes[num] <= 0 ||
|
||||
// don't use data older than 2 s
|
||||
now - _incomingFrameTimes[num] > kFrameHistoryWinMs)
|
||||
{
|
||||
break;
|
||||
} else
|
||||
{
|
||||
nrOfFrames++;
|
||||
}
|
||||
}
|
||||
if (num > 1)
|
||||
{
|
||||
const WebRtc_Word64 diff = now - _incomingFrameTimes[num-1];
|
||||
_incomingFrameRate = 1.0;
|
||||
if(diff >0)
|
||||
{
|
||||
_incomingFrameRate = nrOfFrames * 1000.0f / static_cast<float>(diff);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_incomingFrameRate = static_cast<float>(nrOfFrames);
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMMediaOptimization::InputFrameRate()
|
||||
{
|
||||
ProcessIncomingFrameRate(VCMTickTime::MillisecondTimestamp());
|
||||
return WebRtc_UWord32 (_incomingFrameRate + 0.5f);
|
||||
}
|
||||
|
||||
}
|
||||
220
modules/video_coding/main/source/media_optimization.h
Normal file
220
modules/video_coding/main/source/media_optimization.h
Normal file
@ -0,0 +1,220 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
|
||||
#include "list_wrapper.h"
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding.h"
|
||||
#include "trace.h"
|
||||
#include "media_opt_util.h"
|
||||
#include "qm_select.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
enum { kBitrateMaxFrameSamples = 60 };
|
||||
enum { kBitrateAverageWinMs = 1000 };
|
||||
|
||||
class VCMContentMetricsProcessing;
|
||||
class VCMFrameDropper;
|
||||
|
||||
struct VCMEncodedFrameSample
|
||||
{
|
||||
VCMEncodedFrameSample() : _sizeBytes(-1), _timeCompleteMs(-1) {}
|
||||
|
||||
WebRtc_Word64 _sizeBytes;
|
||||
WebRtc_Word64 _timeCompleteMs;
|
||||
};
|
||||
|
||||
class VCMMediaOptimization
|
||||
{
|
||||
public:
|
||||
VCMMediaOptimization(WebRtc_Word32 id);
|
||||
~VCMMediaOptimization(void);
|
||||
/*
|
||||
* Reset the Media Optimization module
|
||||
*/
|
||||
WebRtc_Word32 Reset();
|
||||
/**
|
||||
* Set target Rates for the encoder given the channel parameters
|
||||
* Inputs: bitRate - target bitRate, in the conference case this is the rate
|
||||
* between the sending client and the server
|
||||
* fractionLost - packet loss in % in the network
|
||||
* roundTripTimeMs - round trip time in miliseconds
|
||||
* minBitRate - the bit rate of the end-point with lowest rate
|
||||
* maxBitRate - the bit rate of the end-point with highest rate
|
||||
*/
|
||||
WebRtc_UWord32 SetTargetRates(WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord8 &fractionLost,
|
||||
WebRtc_UWord32 roundTripTimeMs);
|
||||
|
||||
/**
|
||||
* Inform media optimization of initial encoding state
|
||||
*/
|
||||
WebRtc_Word32 SetEncodingData(VideoCodecType sendCodecType,
|
||||
WebRtc_Word32 maxBitRate,
|
||||
WebRtc_UWord32 frameRate,
|
||||
WebRtc_UWord32 bitRate,
|
||||
WebRtc_UWord16 width,
|
||||
WebRtc_UWord16 height);
|
||||
/**
|
||||
* Enable NACK and update error resilience parameters
|
||||
*/
|
||||
void EnableNack(bool enable);
|
||||
/**
|
||||
* Returns weather or not NACK is enabled
|
||||
*/
|
||||
bool IsNackEnabled();
|
||||
/**
|
||||
* Enable FEC and update error resilience parameters
|
||||
*/
|
||||
void EnableFEC(bool enable);
|
||||
/**
|
||||
* Returns weather or not FEC is enabled
|
||||
*/
|
||||
bool IsFecEnabled();
|
||||
/**
|
||||
* Returns weather or not NackFec is enabled
|
||||
*/
|
||||
bool IsNackFecEnabled();
|
||||
/**
|
||||
* Updates the max pay load size
|
||||
*/
|
||||
/**
|
||||
* Enable NackFec and update error resilience parameters
|
||||
*/
|
||||
void EnableNackFEC(bool enable);
|
||||
|
||||
void SetMtu(WebRtc_Word32 mtu);
|
||||
|
||||
/*
|
||||
* Get actual input frame rate
|
||||
*/
|
||||
WebRtc_UWord32 InputFrameRate();
|
||||
|
||||
/*
|
||||
* Get actual sent frame rate
|
||||
*/
|
||||
float SentFrameRate();
|
||||
/*
|
||||
* Get actual sent bit rate
|
||||
*/
|
||||
float SentBitRate();
|
||||
/*
|
||||
* Get maximum allowed bit rate
|
||||
*/
|
||||
WebRtc_Word32 MaxBitRate();
|
||||
/*
|
||||
* Inform Media Optimization of encoding output: Length and frame type
|
||||
*/
|
||||
WebRtc_Word32 UpdateWithEncodedData(WebRtc_Word32 encodedLength,
|
||||
FrameType encodedFrameType);
|
||||
/*
|
||||
* Register a protection callback to be used to inform the user about the
|
||||
* protection methods used
|
||||
*/
|
||||
WebRtc_Word32 RegisterProtectionCallback(VCMProtectionCallback* protectionCallback);
|
||||
/*
|
||||
* Register a quality settings callback to be used to inform VPM/user about the optimal
|
||||
* quality settings (frame rate/dimension) required
|
||||
*/
|
||||
WebRtc_Word32 RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings);
|
||||
void EnableFrameDropper(bool enable);
|
||||
|
||||
bool DropFrame();
|
||||
|
||||
/*
|
||||
* Get number of key/delta frames encoded
|
||||
*/
|
||||
WebRtc_Word32 SentFrameCount(VCMFrameCount &frameCount) const;
|
||||
|
||||
/*
|
||||
* update incoming frame rate value
|
||||
*/
|
||||
void UpdateIncomingFrameRate();
|
||||
|
||||
/**
|
||||
* Update content metric Data
|
||||
*/
|
||||
void updateContentData(const VideoContentMetrics* contentMetrics);
|
||||
|
||||
/**
|
||||
* Compute new Quality Mode
|
||||
*/
|
||||
WebRtc_Word32 SelectQuality();
|
||||
|
||||
private:
|
||||
|
||||
void UpdateBitRateEstimate(WebRtc_Word64 encodedLength, WebRtc_Word64 nowMs);
|
||||
/*
|
||||
* verify if QM settings differ from default, i.e. if an update is required
|
||||
* Compute actual values, as will be sent to the encoder
|
||||
*/
|
||||
bool QMUpdate(VCMQualityMode* qm);
|
||||
/**
|
||||
* check if we should make a QM change
|
||||
* will return 1 if yes, 0 otherwise
|
||||
*/
|
||||
bool checkStatusForQMchange();
|
||||
|
||||
void ProcessIncomingFrameRate(WebRtc_Word64 now);
|
||||
|
||||
enum { kFrameCountHistorySize = 90};
|
||||
enum { kFrameHistoryWinMs = 2000};
|
||||
|
||||
WebRtc_Word32 _id;
|
||||
|
||||
WebRtc_Word32 _maxBitRate;
|
||||
VideoCodecType _sendCodecType;
|
||||
WebRtc_UWord16 _codecWidth;
|
||||
WebRtc_UWord16 _codecHeight;
|
||||
float _userFrameRate;
|
||||
|
||||
VCMFrameDropper* _frameDropper;
|
||||
VCMLossProtectionLogic* _lossProtLogic;
|
||||
WebRtc_UWord32 _lossProtOverhead;
|
||||
WebRtc_UWord8 _packetLossEnc;
|
||||
WebRtc_UWord8 _fractionLost;
|
||||
|
||||
|
||||
WebRtc_UWord32 _sendStatistics[4];
|
||||
WebRtc_UWord32 _sendStatisticsZeroEncode;
|
||||
WebRtc_Word32 _maxPayloadSize;
|
||||
WebRtc_UWord32 _lastBitRate;
|
||||
WebRtc_UWord32 _targetBitRate;
|
||||
|
||||
float _incomingFrameRate;
|
||||
WebRtc_Word64 _incomingFrameTimes[kFrameCountHistorySize];
|
||||
|
||||
bool _enableQm;
|
||||
|
||||
VCMProtectionCallback* _videoProtectionCallback;
|
||||
VCMQMSettingsCallback* _videoQMSettingsCallback;
|
||||
|
||||
VCMEncodedFrameSample _encodedFrameSamples[kBitrateMaxFrameSamples];
|
||||
float _avgSentBitRateBps;
|
||||
|
||||
WebRtc_UWord32 _keyFrameCnt;
|
||||
WebRtc_UWord32 _deltaFrameCnt;
|
||||
|
||||
VCMContentMetricsProcessing* _content;
|
||||
VCMQmSelect* _qms;
|
||||
|
||||
WebRtc_Word64 _lastQMUpdateTime;
|
||||
WebRtc_Word64 _lastChangeTime; // content or user triggered
|
||||
|
||||
|
||||
}; // end of VCMMediaOptimization class definition
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
|
||||
226
modules/video_coding/main/source/nack_fec_tables.h
Normal file
226
modules/video_coding/main/source/nack_fec_tables.h
Normal file
@ -0,0 +1,226 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
// Table for softening FEC rate for NACK/FEC protection method
|
||||
const WebRtc_UWord16 VCMNackFecTable[200] = {
|
||||
|
||||
27,
|
||||
28,
|
||||
30,
|
||||
31,
|
||||
33,
|
||||
35,
|
||||
36,
|
||||
38,
|
||||
40,
|
||||
42,
|
||||
45,
|
||||
47,
|
||||
49,
|
||||
52,
|
||||
54,
|
||||
57,
|
||||
60,
|
||||
63,
|
||||
66,
|
||||
70,
|
||||
73,
|
||||
77,
|
||||
81,
|
||||
85,
|
||||
89,
|
||||
94,
|
||||
98,
|
||||
103,
|
||||
108,
|
||||
114,
|
||||
120,
|
||||
126,
|
||||
132,
|
||||
138,
|
||||
145,
|
||||
152,
|
||||
160,
|
||||
168,
|
||||
176,
|
||||
185,
|
||||
194,
|
||||
203,
|
||||
213,
|
||||
223,
|
||||
234,
|
||||
246,
|
||||
257,
|
||||
270,
|
||||
283,
|
||||
296,
|
||||
310,
|
||||
325,
|
||||
340,
|
||||
356,
|
||||
373,
|
||||
390,
|
||||
408,
|
||||
427,
|
||||
446,
|
||||
467,
|
||||
488,
|
||||
510,
|
||||
532,
|
||||
556,
|
||||
581,
|
||||
606,
|
||||
632,
|
||||
659,
|
||||
688,
|
||||
717,
|
||||
747,
|
||||
778,
|
||||
810,
|
||||
843,
|
||||
877,
|
||||
912,
|
||||
948,
|
||||
985,
|
||||
1022,
|
||||
1061,
|
||||
1101,
|
||||
1142,
|
||||
1183,
|
||||
1226,
|
||||
1269,
|
||||
1314,
|
||||
1359,
|
||||
1404,
|
||||
1451,
|
||||
1498,
|
||||
1546,
|
||||
1594,
|
||||
1643,
|
||||
1693,
|
||||
1743,
|
||||
1793,
|
||||
1843,
|
||||
1894,
|
||||
1945,
|
||||
1996,
|
||||
2048,
|
||||
2099,
|
||||
2150,
|
||||
2201,
|
||||
2252,
|
||||
2302,
|
||||
2352,
|
||||
2402,
|
||||
2452,
|
||||
2501,
|
||||
2549,
|
||||
2597,
|
||||
2644,
|
||||
2691,
|
||||
2736,
|
||||
2781,
|
||||
2826,
|
||||
2869,
|
||||
2912,
|
||||
2953,
|
||||
2994,
|
||||
3034,
|
||||
3073,
|
||||
3110,
|
||||
3147,
|
||||
3183,
|
||||
3218,
|
||||
3252,
|
||||
3285,
|
||||
3317,
|
||||
3348,
|
||||
3378,
|
||||
3407,
|
||||
3436,
|
||||
3463,
|
||||
3489,
|
||||
3514,
|
||||
3539,
|
||||
3563,
|
||||
3585,
|
||||
3607,
|
||||
3628,
|
||||
3649,
|
||||
3668,
|
||||
3687,
|
||||
3705,
|
||||
3722,
|
||||
3739,
|
||||
3755,
|
||||
3770,
|
||||
3785,
|
||||
3799,
|
||||
3812,
|
||||
3825,
|
||||
3838,
|
||||
3849,
|
||||
3861,
|
||||
3872,
|
||||
3882,
|
||||
3892,
|
||||
3901,
|
||||
3910,
|
||||
3919,
|
||||
3927,
|
||||
3935,
|
||||
3943,
|
||||
3950,
|
||||
3957,
|
||||
3963,
|
||||
3969,
|
||||
3975,
|
||||
3981,
|
||||
3987,
|
||||
3992,
|
||||
3997,
|
||||
4001,
|
||||
4006,
|
||||
4010,
|
||||
4014,
|
||||
4018,
|
||||
4022,
|
||||
4025,
|
||||
4029,
|
||||
4032,
|
||||
4035,
|
||||
4038,
|
||||
4041,
|
||||
4043,
|
||||
4046,
|
||||
4048,
|
||||
4050,
|
||||
4053,
|
||||
4055,
|
||||
4057,
|
||||
4059,
|
||||
4060,
|
||||
4062,
|
||||
4064,
|
||||
4065,
|
||||
4067,
|
||||
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
|
||||
77
modules/video_coding/main/source/packet.cc
Normal file
77
modules/video_coding/main/source/packet.cc
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet.h"
|
||||
#include "module_common_types.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr,
|
||||
const WebRtc_UWord32 size,
|
||||
const WebRtcRTPHeader& rtpHeader) :
|
||||
payloadType(rtpHeader.header.payloadType),
|
||||
timestamp(rtpHeader.header.timestamp),
|
||||
seqNum(rtpHeader.header.sequenceNumber),
|
||||
dataPtr(ptr),
|
||||
sizeBytes(size),
|
||||
markerBit(rtpHeader.header.markerBit),
|
||||
|
||||
frameType(rtpHeader.frameType),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(rtpHeader.type.Video.isFirstPacket),
|
||||
completeNALU(kNaluComplete),
|
||||
insertStartCode(false),
|
||||
bits(false)
|
||||
{
|
||||
CopyCodecSpecifics(rtpHeader.type.Video);
|
||||
}
|
||||
|
||||
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr, WebRtc_UWord32 size, WebRtc_UWord16 seq, WebRtc_UWord32 ts, bool mBit) :
|
||||
payloadType(0),
|
||||
timestamp(ts),
|
||||
seqNum(seq),
|
||||
dataPtr(ptr),
|
||||
sizeBytes(size),
|
||||
markerBit(mBit),
|
||||
|
||||
frameType(kVideoFrameDelta),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(false),
|
||||
completeNALU(kNaluComplete),
|
||||
insertStartCode(false),
|
||||
bits(false)
|
||||
{}
|
||||
|
||||
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader)
|
||||
{
|
||||
RTPVideoTypeHeader codecHeader = videoHeader.codecHeader;
|
||||
switch(videoHeader.codec)
|
||||
{
|
||||
case kRTPVideoVP8:
|
||||
{
|
||||
codec = kVideoCodecVP8;
|
||||
break;
|
||||
}
|
||||
case kRTPVideoI420:
|
||||
{
|
||||
codec = kVideoCodecI420;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
codec = kVideoCodecUnknown;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
58
modules/video_coding/main/source/packet.h
Normal file
58
modules/video_coding/main/source/packet.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMPacket
|
||||
{
|
||||
public:
|
||||
VCMPacket(const WebRtc_UWord8* ptr,
|
||||
const WebRtc_UWord32 size,
|
||||
const WebRtcRTPHeader& rtpHeader);
|
||||
VCMPacket(const WebRtc_UWord8* ptr,
|
||||
WebRtc_UWord32 size,
|
||||
WebRtc_UWord16 seqNum,
|
||||
WebRtc_UWord32 timestamp,
|
||||
bool markerBit);
|
||||
|
||||
WebRtc_UWord8 payloadType;
|
||||
WebRtc_UWord32 timestamp;
|
||||
WebRtc_UWord16 seqNum;
|
||||
const WebRtc_UWord8* dataPtr;
|
||||
WebRtc_UWord32 sizeBytes;
|
||||
bool markerBit;
|
||||
|
||||
FrameType frameType;
|
||||
webrtc::VideoCodecType codec;
|
||||
|
||||
bool isFirstPacket; // Is this first packet in a frame.
|
||||
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
|
||||
bool insertStartCode; // True if a start code should be inserted before this
|
||||
// packet.
|
||||
bool bits; // The first bits of this packets are zero and the
|
||||
// first
|
||||
// byte should be ORed with the last packet of the
|
||||
// previous frame.
|
||||
|
||||
protected:
|
||||
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
|
||||
684
modules/video_coding/main/source/qm_select.cc
Normal file
684
modules/video_coding/main/source/qm_select.cc
Normal file
@ -0,0 +1,684 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "qm_select.h"
|
||||
#include "internal_defines.h"
|
||||
#include "qm_select_data.h"
|
||||
|
||||
#include "module_common_types.h"
|
||||
#include "video_coding_defines.h"
|
||||
#include "trace.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMQmSelect::VCMQmSelect()
|
||||
{
|
||||
_qm = new VCMQualityMode();
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMQmSelect::~VCMQmSelect()
|
||||
{
|
||||
delete _qm;
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::ResetQM()
|
||||
{
|
||||
_motion.Reset();
|
||||
_spatial.Reset();
|
||||
_coherence.Reset();
|
||||
_stationaryMotion = 0;
|
||||
_aspectRatio = 1;
|
||||
_maxRateQM = 0;
|
||||
_imageType = 1;
|
||||
_userResolutionPref = 50; // Neutral
|
||||
_qm->Reset();
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::ResetRates()
|
||||
{
|
||||
_sumEncodedBytes = 0;
|
||||
_sumTargetRate = 0;
|
||||
_sumIncomingFrameRate = 0;
|
||||
_sumFrameRateMM = 0;
|
||||
_sumSeqRateMM = 0;
|
||||
_frameCnt = 0;
|
||||
_frameCntDelta = 0;
|
||||
_lowBufferCnt = 0;
|
||||
_updateRateCnt = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::Reset()
|
||||
{
|
||||
_stateDecFactorSpatial = 1;
|
||||
_stateDecFactorTemp = 1;
|
||||
_bufferLevel = 0;
|
||||
_targetBitRate = 0;
|
||||
_incomingFrameRate = 0;
|
||||
_userFrameRate = 0;
|
||||
_perFrameBandwidth =0;
|
||||
ResetQM();
|
||||
ResetRates();
|
||||
return;
|
||||
}
|
||||
|
||||
//Initialize after reset of encoder
|
||||
WebRtc_Word32
|
||||
VCMQmSelect::Initialize(float bitRate, float userFrameRate, WebRtc_UWord32 width, WebRtc_UWord32 height)
|
||||
{
|
||||
if (userFrameRate == 0.0f || width == 0 || height == 0)
|
||||
{
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
_targetBitRate = bitRate;
|
||||
_userFrameRate = userFrameRate;
|
||||
//Encoder width and height
|
||||
_width = width;
|
||||
_height = height;
|
||||
//Initial buffer level
|
||||
_bufferLevel = INIT_BUFFER_LEVEL * _targetBitRate;
|
||||
if ( _incomingFrameRate == 0 )
|
||||
{
|
||||
_perFrameBandwidth = _targetBitRate / _userFrameRate;
|
||||
_incomingFrameRate = _userFrameRate;
|
||||
}
|
||||
else
|
||||
{
|
||||
//Take average: this is due to delay in update of new frame rate in encoder:
|
||||
//userFrameRate is the new one, incomingFrameRate is the old one (based on previous ~ 1sec)
|
||||
_perFrameBandwidth = 0.5 *( _targetBitRate / _userFrameRate + _targetBitRate / _incomingFrameRate );
|
||||
}
|
||||
_init = true;
|
||||
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMQmSelect::SetPreferences(WebRtc_Word8 resolPref)
|
||||
{
|
||||
// Preference setting for temporal over spatial resolution
|
||||
// 100 means temporal, 0 means spatial, 50 is neutral (we decide)
|
||||
_userResolutionPref = resolPref;
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
//Update after every encoded frame
|
||||
void
|
||||
VCMQmSelect::UpdateEncodedSize(WebRtc_Word64 encodedSize, FrameType encodedFrameType)
|
||||
{
|
||||
//Update encoded size;
|
||||
_sumEncodedBytes += encodedSize;
|
||||
_frameCnt++;
|
||||
|
||||
//Convert to Kbps
|
||||
float encodedSizeKbits = (float)((encodedSize * 8.0) / 1000.0);
|
||||
|
||||
//Update the buffer level: per_frame_BW is updated when encoder is updated, every ~1sec
|
||||
_bufferLevel += _perFrameBandwidth - encodedSizeKbits;
|
||||
|
||||
const bool deltaFrame = (encodedFrameType != kVideoFrameKey &&
|
||||
encodedFrameType != kVideoFrameGolden);
|
||||
|
||||
//Sum the frame mismatch:
|
||||
//Mismatch here is based on difference of actual encoded frame size and per-frame bandwidth, for delta frames
|
||||
//This is a much stronger condition on rate mismatch than sumSeqRateMM
|
||||
// Note: not used in this version
|
||||
/*
|
||||
if (deltaFrame)
|
||||
{
|
||||
_frameCntDelta++;
|
||||
if (encodedSizeKbits > 0)
|
||||
_sumFrameRateMM += (float) (fabs(encodedSizeKbits - _perFrameBandwidth) / encodedSizeKbits);
|
||||
}
|
||||
*/
|
||||
|
||||
//Counter for occurrences of low buffer level
|
||||
if (_bufferLevel <= PERC_BUFFER_THR * INIT_BUFFER_LEVEL * _targetBitRate)
|
||||
{
|
||||
_lowBufferCnt++;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
//Update after SetTargetRates in MediaOpt (every ~1sec)
|
||||
void
|
||||
VCMQmSelect::UpdateRates(float targetBitRate, float avgSentBitRate, float incomingFrameRate)
|
||||
{
|
||||
|
||||
//Sum the target bitrate and incoming frame rate: these values are the encoder rates (from previous ~1sec),
|
||||
//i.e, before the update for next ~1sec
|
||||
_sumTargetRate += _targetBitRate;
|
||||
_sumIncomingFrameRate += _incomingFrameRate;
|
||||
_updateRateCnt++;
|
||||
|
||||
//Convert to kbps
|
||||
float avgSentBitRatekbps = avgSentBitRate / 1000.0f;
|
||||
|
||||
//Sum the sequence rate mismatch:
|
||||
//Mismatch here is based on difference between target rate the encoder used (in previous ~1sec) and the average actual
|
||||
//encoding rate at current time
|
||||
if (fabs(_targetBitRate - avgSentBitRatekbps) < THRESH_SUM_MM && _targetBitRate > 0.0 )
|
||||
_sumSeqRateMM += (float) (fabs(_targetBitRate - avgSentBitRatekbps) / _targetBitRate );
|
||||
|
||||
//Update QM with the current new target and frame rate: these values are ones the encoder will use for the current/next ~1sec
|
||||
_targetBitRate = targetBitRate;
|
||||
_incomingFrameRate = incomingFrameRate;
|
||||
|
||||
//Update QM with an (average) encoder per_frame_bandwidth: this is the per_frame_bw for the next ~1sec
|
||||
_perFrameBandwidth = 0.0f;
|
||||
if (_incomingFrameRate > 0.0f)
|
||||
{
|
||||
_perFrameBandwidth = _targetBitRate / _incomingFrameRate;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMQmSelect::SelectQuality(const VideoContentMetrics* contentMetrics, VCMQualityMode** qm)
|
||||
{
|
||||
if (!_init)
|
||||
{
|
||||
return VCM_UNINITIALIZED;
|
||||
}
|
||||
if (contentMetrics == NULL)
|
||||
{
|
||||
Reset(); //default values
|
||||
*qm = _qm;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
//Default settings
|
||||
_qm->spatialWidthFact = 1;
|
||||
_qm->spatialHeightFact = 1;
|
||||
_qm->temporalFact = 1;
|
||||
|
||||
_contentMetrics = contentMetrics;
|
||||
|
||||
//Update native values
|
||||
_nativeWidth = _contentMetrics->nativeWidth;
|
||||
_nativeHeight = _contentMetrics->nativeHeight;
|
||||
_nativeFrameRate = _contentMetrics->nativeFrameRate;
|
||||
|
||||
//Aspect ratio: used for selection of 1x2,2x1,2x2
|
||||
_aspectRatio = (float)_width / (float)_height;
|
||||
|
||||
float avgTargetRate = 0.0f;
|
||||
float avgIncomingFrameRate = 0.0f;
|
||||
float ratioBufferLow = 0.0f;
|
||||
float rateMisMatch = 0.0f;
|
||||
if (_frameCnt > 0)
|
||||
{
|
||||
ratioBufferLow = (float)_lowBufferCnt / (float)_frameCnt;
|
||||
}
|
||||
if (_updateRateCnt > 0)
|
||||
{
|
||||
//use seq-rate mismatch for now
|
||||
rateMisMatch = (float)_sumSeqRateMM / (float)_updateRateCnt;
|
||||
//rateMisMatch = (float)_sumFrameRateMM / (float)_frameCntDelta;
|
||||
|
||||
//average target and incoming frame rates
|
||||
avgTargetRate = (float)_sumTargetRate / (float)_updateRateCnt;
|
||||
avgIncomingFrameRate = (float)_sumIncomingFrameRate / (float)_updateRateCnt;
|
||||
}
|
||||
|
||||
//For qm selection below, may want to weight the average encoder rates with the current (for next ~1sec) rate values
|
||||
//uniform average for now:
|
||||
float w1 = 0.5f;
|
||||
float w2 = 0.5f;
|
||||
avgTargetRate = w1 * avgTargetRate + w2 * _targetBitRate;
|
||||
avgIncomingFrameRate = w1 * avgIncomingFrameRate + w2 * _incomingFrameRate;
|
||||
|
||||
//Set the maximum transitional rate and image type: for up-sampled spatial dimensions
|
||||
//Needed to get the transRate for going back up in spatial resolution (only 2x2 allowed in this version)
|
||||
SetMaxRateForQM(2 * _width, 2 * _height);
|
||||
WebRtc_UWord8 imageType2 = _imageType;
|
||||
WebRtc_UWord32 maxRateQM2 = _maxRateQM;
|
||||
|
||||
//Set the maximum transitional rate and image type: for the input/encoder spatial dimensions
|
||||
SetMaxRateForQM(_width, _height);
|
||||
|
||||
//Compute metric features
|
||||
MotionNFD();
|
||||
Spatial();
|
||||
|
||||
//
|
||||
//Get transitional rate from table, based on image type and content class
|
||||
//
|
||||
|
||||
//Get image size class: map _imageType to 2 classes
|
||||
WebRtc_UWord8 imageClass = 1;
|
||||
if (_imageType <= 3) imageClass = 0;
|
||||
|
||||
WebRtc_UWord8 contentClass = 3 * _motion.level + _spatial.level;
|
||||
WebRtc_UWord8 tableIndex = imageClass * 9 + contentClass;
|
||||
float scaleTransRate = kScaleTransRateQm[tableIndex];
|
||||
|
||||
// for transRate for going back up spatially
|
||||
WebRtc_UWord8 imageClass2 = 1;
|
||||
if (imageType2 <= 3) imageClass2 = 0;
|
||||
WebRtc_UWord8 tableIndex2 = imageClass2 * 9 + contentClass;
|
||||
float scaleTransRate2 = kScaleTransRateQm[tableIndex2];
|
||||
//
|
||||
|
||||
WebRtc_UWord32 estimatedTransRateDown = (WebRtc_UWord32) (_incomingFrameRate * scaleTransRate * _maxRateQM / 30);
|
||||
WebRtc_UWord32 estimatedTransRateUpT = (WebRtc_UWord32) (TRANS_RATE_SCALE_UP_TEMP * 2 * _incomingFrameRate * scaleTransRate * _maxRateQM / 30);
|
||||
WebRtc_UWord32 estimatedTransRateUpS = (WebRtc_UWord32) (TRANS_RATE_SCALE_UP_SPATIAL * _incomingFrameRate * scaleTransRate2 * maxRateQM2 / 30);
|
||||
|
||||
//
|
||||
//done with transitional rate
|
||||
//
|
||||
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideo, -1,
|
||||
"Content Metrics: Motion = %d , Spatial = %d, Est. Trans. BR = %d",
|
||||
_motion.level, _spatial.level, estimatedTransRateDown);
|
||||
|
||||
|
||||
|
||||
//
|
||||
//CHECK FOR GOING BACK UP IN RESOLUTION
|
||||
//
|
||||
bool selectedUp = false;
|
||||
//Check if native has been spatially down-sampled
|
||||
if (_stateDecFactorSpatial > 1)
|
||||
{
|
||||
//check conditions on frame_skip and rate_mismatch
|
||||
if ( (avgTargetRate > estimatedTransRateUpS) &&
|
||||
(ratioBufferLow < MAX_BUFFER_LOW) && (rateMisMatch < MAX_RATE_MM) )
|
||||
{
|
||||
//width/height scaled back up: setting 0 indicates scaling back to native
|
||||
_qm->spatialHeightFact = 0;
|
||||
_qm->spatialWidthFact = 0;
|
||||
selectedUp = true;
|
||||
}
|
||||
}
|
||||
//Check if native has been temporally down-sampled
|
||||
if (_stateDecFactorTemp > 1)
|
||||
{
|
||||
if ( (avgTargetRate > estimatedTransRateUpT) &&
|
||||
(ratioBufferLow < MAX_BUFFER_LOW) && (rateMisMatch < MAX_RATE_MM) )
|
||||
{
|
||||
//temporal scale back up: setting 0 indicates scaling back to native
|
||||
_qm->temporalFact = 0;
|
||||
selectedUp = true;
|
||||
}
|
||||
}
|
||||
|
||||
//leave QM if we selected to go back up in either spatial or temporal resolution
|
||||
if (selectedUp == true)
|
||||
{
|
||||
//Update down-sampling state
|
||||
//Note: only temp reduction by 2 is allowed
|
||||
if (_qm->temporalFact == 0)
|
||||
{
|
||||
_stateDecFactorTemp = _stateDecFactorTemp / 2;
|
||||
}
|
||||
//Update down-sampling state
|
||||
//Note: only spatial reduction by 2x2 is allowed
|
||||
if (_qm->spatialHeightFact == 0 && _qm->spatialWidthFact == 0 )
|
||||
{
|
||||
_stateDecFactorSpatial = _stateDecFactorSpatial / 4;
|
||||
}
|
||||
*qm = _qm;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
//
|
||||
//done with checking for going back up
|
||||
//
|
||||
|
||||
//
|
||||
//CHECK FOR RESOLUTION REDUCTION
|
||||
//
|
||||
|
||||
//ST QM extraction if:
|
||||
// (1) target rate is lower than transitional rate (with safety margin), or
|
||||
// (2) frame skip is larger than threshold, or
|
||||
// (3) rate mismatch is larger than threshold
|
||||
|
||||
if ( (avgTargetRate < estimatedTransRateDown ) || (ratioBufferLow > MAX_BUFFER_LOW)
|
||||
|| (rateMisMatch > MAX_RATE_MM) )
|
||||
{
|
||||
|
||||
WebRtc_UWord8 spatialFact = 1;
|
||||
WebRtc_UWord8 tempFact = 1;
|
||||
|
||||
//Get the Action:
|
||||
//Note: only consider spatial by 2x2 OR temporal reduction by 2 in this version
|
||||
if (_motion.level == kLow && _spatial.level == kLow)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 1;
|
||||
}
|
||||
else if (_motion.level == kLow && _spatial.level == kHigh)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 2;
|
||||
}
|
||||
else if (_motion.level == kLow && _spatial.level == kDefault)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 2;
|
||||
}
|
||||
else if (_motion.level == kHigh && _spatial.level == kLow)
|
||||
{
|
||||
spatialFact = 4;
|
||||
tempFact = 1;
|
||||
}
|
||||
else if (_motion.level == kHigh && _spatial.level == kHigh)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 2;
|
||||
}
|
||||
else if (_motion.level == kHigh && _spatial.level == kDefault)
|
||||
{
|
||||
spatialFact = 4;
|
||||
tempFact = 1;
|
||||
}
|
||||
else if (_motion.level == kDefault && _spatial.level == kLow)
|
||||
{
|
||||
spatialFact = 4;
|
||||
tempFact = 1;
|
||||
}
|
||||
else if (_motion.level == kDefault && _spatial.level == kHigh)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 2;
|
||||
}
|
||||
else if (_motion.level == kDefault && _spatial.level == kDefault)
|
||||
{
|
||||
spatialFact = 1;
|
||||
tempFact = 1;
|
||||
}
|
||||
//
|
||||
switch(spatialFact)
|
||||
{
|
||||
case 4:
|
||||
_qm->spatialWidthFact = 2;
|
||||
_qm->spatialHeightFact = 2;
|
||||
break;
|
||||
case 2:
|
||||
//default is 1x2 (H)
|
||||
_qm->spatialWidthFact = 2;
|
||||
_qm->spatialHeightFact = 1;
|
||||
//Select 1x2,2x1, or back to 2x2: depends on prediction errors, aspect ratio, and horizontalness of motion
|
||||
//Note: directional selection not used in this version
|
||||
//SelectSpatialDirectionMode((float) estimatedTransRateDown);
|
||||
break;
|
||||
default:
|
||||
_qm->spatialWidthFact = 1;
|
||||
_qm->spatialHeightFact = 1;
|
||||
break;
|
||||
}
|
||||
_qm->temporalFact = tempFact;
|
||||
|
||||
//Sanity check on ST QM selection: override the settings for too small image size and frame rate
|
||||
//Also check limit the current down-sampling state
|
||||
|
||||
//No spatial sampling if image size is too small (QCIF)
|
||||
if ( (_width * _height) <= MIN_IMAGE_SIZE || _stateDecFactorSpatial >= MAX_SPATIAL_DOWN_FACT)
|
||||
{
|
||||
_qm->spatialWidthFact = 1;
|
||||
_qm->spatialHeightFact = 1;
|
||||
}
|
||||
|
||||
//No frame rate reduction below some point: use the (average) incoming frame rate
|
||||
if ( avgIncomingFrameRate <= MIN_FRAME_RATE_QM || _stateDecFactorTemp >= MAX_TEMP_DOWN_FACT)
|
||||
{
|
||||
_qm->temporalFact = 1;
|
||||
}
|
||||
|
||||
//No down-sampling if current spatial-temporal downsampling state is above threshold
|
||||
if (_stateDecFactorTemp * _stateDecFactorSpatial >= MAX_SPATIAL_TEMP_DOWN_FACT)
|
||||
{
|
||||
_qm->spatialWidthFact = 1;
|
||||
_qm->spatialHeightFact = 1;
|
||||
_qm->temporalFact = 1;
|
||||
}
|
||||
//
|
||||
//done with sanity checks on ST QM selection
|
||||
//
|
||||
|
||||
//Note: to disable spatial down-sampling
|
||||
// _qm->spatialWidthFact = 1;
|
||||
// _qm->spatialHeightFact = 1;
|
||||
|
||||
//Update down-sampling states
|
||||
_stateDecFactorSpatial = _stateDecFactorSpatial * _qm->spatialWidthFact * _qm->spatialHeightFact;
|
||||
_stateDecFactorTemp = _stateDecFactorTemp * _qm->temporalFact;
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
*qm = _qm;
|
||||
return VCM_OK;
|
||||
}
|
||||
// done with checking for resolution reduction
|
||||
|
||||
*qm = _qm;
|
||||
return VCM_OK;
|
||||
|
||||
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMQmSelect::SelectSpatialDirectionMode(float transRate)
|
||||
{
|
||||
//Default is 1x2 (H)
|
||||
|
||||
//For bit rates well below transitional rate, we select 2x2
|
||||
if ( _targetBitRate < transRate * RATE_RED_SPATIAL_2X2 )
|
||||
{
|
||||
_qm->spatialWidthFact = 2;
|
||||
_qm->spatialHeightFact = 2;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
//Otherwise check prediction errors, aspect ratio, horizonalness of motion
|
||||
|
||||
float spatialErr = _contentMetrics->spatialPredErr;
|
||||
float spatialErrH = _contentMetrics->spatialPredErrH;
|
||||
float spatialErrV = _contentMetrics->spatialPredErrV;
|
||||
|
||||
//favor 1x2 if aspect_ratio is 16:9
|
||||
if (_aspectRatio >= 16.0f / 9.0f )
|
||||
{
|
||||
//check if 1x2 has lowest prediction error
|
||||
if (spatialErrH < spatialErr && spatialErrH < spatialErrV)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
//check for 2x2 selection: favor 2x2 over 1x2 and 2x1
|
||||
if (spatialErr < spatialErrH * (1.0f + SPATIAL_ERR_2X2_VS_H) &&
|
||||
spatialErr < spatialErrV * (1.0f + SPATIAL_ERR_2X2_VS_V))
|
||||
{
|
||||
_qm->spatialWidthFact = 2;
|
||||
_qm->spatialHeightFact = 2;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
//check for 2x1 selection:
|
||||
if (spatialErrV < spatialErrH * (1.0f - SPATIAL_ERR_V_VS_H) &&
|
||||
spatialErrV < spatialErr * (1.0f - SPATIAL_ERR_2X2_VS_V))
|
||||
{
|
||||
_qm->spatialWidthFact = 1;
|
||||
_qm->spatialHeightFact = 2;
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::Coherence()
|
||||
{
|
||||
float horizNZ = _contentMetrics->motionHorizontalness;
|
||||
float distortionNZ = _contentMetrics->motionClusterDistortion;
|
||||
|
||||
//Coherence measure: combine horizontalness with cluster distortion
|
||||
_coherence.value = COH_MAX;
|
||||
if (distortionNZ > 0.)
|
||||
{
|
||||
_coherence.value = horizNZ / distortionNZ;
|
||||
}
|
||||
_coherence.value = VCM_MIN(COH_MAX, _coherence.value);
|
||||
|
||||
if (_coherence.value < COHERENCE_THR)
|
||||
{
|
||||
_coherence.level = kLow;
|
||||
}
|
||||
else
|
||||
{
|
||||
_coherence.level = kHigh;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::MotionNFD()
|
||||
{
|
||||
_motion.value = _contentMetrics->motionMagnitudeNZ;
|
||||
|
||||
// determine motion level
|
||||
if (_motion.value < LOW_MOTION_NFD)
|
||||
{
|
||||
_motion.level = kLow;
|
||||
}
|
||||
else if (_motion.value > HIGH_MOTION_NFD)
|
||||
{
|
||||
_motion.level = kHigh;
|
||||
}
|
||||
else
|
||||
{
|
||||
_motion.level = kDefault;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
VCMQmSelect::Motion()
|
||||
{
|
||||
|
||||
float sizeZeroMotion = _contentMetrics->sizeZeroMotion;
|
||||
float motionMagNZ = _contentMetrics->motionMagnitudeNZ;
|
||||
|
||||
//take product of size and magnitude with equal weight for now
|
||||
_motion.value = (1.0f - sizeZeroMotion) * motionMagNZ;
|
||||
|
||||
//stabilize: motionMagNZ could be large when only few motion blocks are non-zero
|
||||
_stationaryMotion = false;
|
||||
if (sizeZeroMotion > HIGH_ZERO_MOTION_SIZE)
|
||||
{
|
||||
_motion.value = 0.0f;
|
||||
_stationaryMotion = true;
|
||||
}
|
||||
// determine motion level
|
||||
if (_motion.value < LOW_MOTION)
|
||||
{
|
||||
_motion.level = kLow;
|
||||
}
|
||||
else if (_motion.value > HIGH_MOTION)
|
||||
{
|
||||
_motion.level = kHigh;
|
||||
}
|
||||
else
|
||||
{
|
||||
_motion.level = kDefault;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VCMQmSelect::Spatial()
|
||||
{
|
||||
float spatialErr = _contentMetrics->spatialPredErr;
|
||||
float spatialErrH = _contentMetrics->spatialPredErrH;
|
||||
float spatialErrV = _contentMetrics->spatialPredErrV;
|
||||
//Spatial measure: take average of 3 prediction errors
|
||||
_spatial.value = (spatialErr + spatialErrH + spatialErrV) / 3.0f;
|
||||
|
||||
float scale = 1.0f;
|
||||
//Reduce thresholds for HD scenes
|
||||
if (_imageType > 3)
|
||||
{
|
||||
scale = (float)SCALE_TEXTURE_HD;
|
||||
}
|
||||
|
||||
if (_spatial.value > scale * HIGH_TEXTURE)
|
||||
{
|
||||
_spatial.level = kHigh;
|
||||
}
|
||||
else if (_spatial.value < scale * LOW_TEXTURE)
|
||||
{
|
||||
_spatial.level = kLow;
|
||||
}
|
||||
else
|
||||
{
|
||||
_spatial.level = kDefault;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
WebRtc_Word32
|
||||
VCMQmSelect::SetMaxRateForQM(WebRtc_UWord32 width, WebRtc_UWord32 height)
|
||||
{
|
||||
// Match image type
|
||||
WebRtc_UWord32 imageSize = width * height;
|
||||
|
||||
if (imageSize < kFrameSizeTh[0])
|
||||
{
|
||||
_imageType = 0;
|
||||
}
|
||||
else if (imageSize < kFrameSizeTh[1])
|
||||
{
|
||||
_imageType = 1;
|
||||
}
|
||||
else if (imageSize < kFrameSizeTh[2])
|
||||
{
|
||||
_imageType = 2;
|
||||
}
|
||||
else if (imageSize < kFrameSizeTh[3])
|
||||
{
|
||||
_imageType = 3;
|
||||
}
|
||||
else if (imageSize < kFrameSizeTh[4])
|
||||
{
|
||||
_imageType = 4;
|
||||
}
|
||||
else if (imageSize < kFrameSizeTh[5])
|
||||
{
|
||||
_imageType = 5;
|
||||
}
|
||||
else
|
||||
{
|
||||
_imageType = 6;
|
||||
}
|
||||
|
||||
// set max rate based on image size
|
||||
_maxRateQM = kMaxRateQm[_imageType];
|
||||
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
}
|
||||
166
modules/video_coding/main/source/qm_select.h
Normal file
166
modules/video_coding/main/source/qm_select.h
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "common_types.h"
|
||||
/************************/
|
||||
/* Quality Modes */
|
||||
/**********************/
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
struct VideoContentMetrics;
|
||||
|
||||
struct VCMQualityMode
|
||||
{
|
||||
VCMQualityMode():spatialWidthFact(1), spatialHeightFact(1), temporalFact(1){}
|
||||
void Reset()
|
||||
{
|
||||
spatialWidthFact = 1;
|
||||
spatialHeightFact = 1;
|
||||
temporalFact = 1;
|
||||
}
|
||||
|
||||
WebRtc_UWord16 spatialWidthFact;
|
||||
WebRtc_UWord16 spatialHeightFact;
|
||||
WebRtc_UWord16 temporalFact;
|
||||
};
|
||||
|
||||
enum VCMMagValues
|
||||
{
|
||||
kLow,
|
||||
kHigh,
|
||||
kDefault //default do nothing mode
|
||||
};
|
||||
|
||||
struct VCMContFeature
|
||||
{
|
||||
VCMContFeature(): value(0.0f), level(kDefault){}
|
||||
|
||||
void Reset()
|
||||
{
|
||||
value = 0.0f;
|
||||
level = kDefault;
|
||||
}
|
||||
|
||||
float value;
|
||||
VCMMagValues level;
|
||||
};
|
||||
|
||||
class VCMQmSelect
|
||||
{
|
||||
public:
|
||||
VCMQmSelect();
|
||||
~VCMQmSelect();
|
||||
|
||||
// Initialize:
|
||||
WebRtc_Word32 Initialize(float bitRate, float userFrameRate, WebRtc_UWord32 width, WebRtc_UWord32 height);
|
||||
|
||||
// Allow the user to set preferences: favor frame rate/resolution
|
||||
WebRtc_Word32 SetPreferences(WebRtc_Word8 resolPref);
|
||||
|
||||
// Extract ST QM behavior and make decision
|
||||
// Inputs: Content Metrics per frame (averaged over time)
|
||||
// qm: Reference to the quality modes pointer
|
||||
WebRtc_Word32 SelectQuality(const VideoContentMetrics* contentMetrics, VCMQualityMode** qm);
|
||||
|
||||
// Update QMselect with actual bit rate (size of the latest encoded frame) and frame type
|
||||
// -> update buffer level and frame-mismatch
|
||||
void UpdateEncodedSize(WebRtc_Word64 encodedSize, FrameType encodedFrameType);
|
||||
|
||||
// Update QM with new rates from SetTargetRates
|
||||
void UpdateRates(float targetBitRate, float avgSentRate, float incomingFrameRate);
|
||||
|
||||
// Select 1x2,2x2,2x2 spatial sampling mode
|
||||
WebRtc_Word32 SelectSpatialDirectionMode(float transRate);
|
||||
|
||||
// Reset values prior to QMSelect
|
||||
void ResetQM();
|
||||
|
||||
// Reset rate quantities and counter values after every QMSelect call
|
||||
void ResetRates();
|
||||
|
||||
// Reset all
|
||||
void Reset();
|
||||
private:
|
||||
|
||||
// Compute spatial texture magnitude and level
|
||||
void Spatial();
|
||||
|
||||
// Compute motion magnitude and level
|
||||
void Motion();
|
||||
|
||||
// Compute motion magnitude and level for NFD metric
|
||||
void MotionNFD();
|
||||
|
||||
// Compute coherence magnitude and level
|
||||
void Coherence();
|
||||
|
||||
// Set the max rate for QM selection
|
||||
WebRtc_Word32 SetMaxRateForQM(WebRtc_UWord32 width, WebRtc_UWord32 height);
|
||||
|
||||
// Content Data
|
||||
const VideoContentMetrics* _contentMetrics;
|
||||
|
||||
// Encoder stats/rate-control metrics
|
||||
float _targetBitRate;
|
||||
float _userFrameRate;
|
||||
float _incomingFrameRate;
|
||||
float _perFrameBandwidth;
|
||||
float _bufferLevel;
|
||||
float _sumTargetRate;
|
||||
float _sumIncomingFrameRate;
|
||||
float _sumSeqRateMM;
|
||||
float _sumFrameRateMM;
|
||||
WebRtc_Word64 _sumEncodedBytes;
|
||||
|
||||
//Encoder and native frame sizes
|
||||
WebRtc_UWord32 _width;
|
||||
WebRtc_UWord32 _height;
|
||||
WebRtc_UWord32 _nativeWidth;
|
||||
WebRtc_UWord32 _nativeHeight;
|
||||
WebRtc_UWord8 _stateDecFactorSpatial;
|
||||
|
||||
WebRtc_UWord32 _nativeFrameRate;
|
||||
WebRtc_UWord8 _stateDecFactorTemp;
|
||||
|
||||
//Counters
|
||||
WebRtc_UWord32 _frameCnt;
|
||||
WebRtc_UWord32 _frameCntDelta;
|
||||
WebRtc_UWord32 _updateRateCnt;
|
||||
WebRtc_UWord32 _lowBufferCnt;
|
||||
|
||||
//Content L/M/H values
|
||||
VCMContFeature _motion;
|
||||
VCMContFeature _spatial;
|
||||
VCMContFeature _coherence;
|
||||
bool _stationaryMotion;
|
||||
|
||||
//aspect ratio
|
||||
float _aspectRatio;
|
||||
|
||||
//Max rate to saturate the transitionalRate
|
||||
WebRtc_UWord32 _maxRateQM;
|
||||
WebRtc_UWord8 _imageType;
|
||||
|
||||
//User preference for resolution or qmax change
|
||||
WebRtc_UWord8 _userResolutionPref;
|
||||
bool _init;
|
||||
VCMQualityMode* _qm;
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
|
||||
144
modules/video_coding/main/source/qm_select_data.h
Normal file
144
modules/video_coding/main/source/qm_select_data.h
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
|
||||
/***************************************************************
|
||||
*QMSelectData.h
|
||||
* This file includes parameters used by the Quality Modes selection process
|
||||
****************************************************************/
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
//Initial level of buffer in secs: should corresponds to wrapper settings
|
||||
#define INIT_BUFFER_LEVEL 0.5
|
||||
|
||||
//
|
||||
//PARAMETERS FOR QM SELECTION
|
||||
//
|
||||
|
||||
//Threshold of (max) buffer size below which we consider too low (underflow)
|
||||
#define PERC_BUFFER_THR 0.10
|
||||
|
||||
//Threshold on rate mismatch
|
||||
#define MAX_RATE_MM 0.5
|
||||
|
||||
//Threshold on the occurrences of low buffer levels
|
||||
#define MAX_BUFFER_LOW 0.5
|
||||
|
||||
//Factor for transitional rate for going back up in resolution
|
||||
#define TRANS_RATE_SCALE_UP_SPATIAL 1.25
|
||||
#define TRANS_RATE_SCALE_UP_TEMP 1.25
|
||||
|
||||
//Maximum possible transitional rate: (units in kbps), for 30fps
|
||||
const WebRtc_UWord16 kMaxRateQm[7] = {
|
||||
100, //QCIF
|
||||
500, //CIF
|
||||
800, //VGA
|
||||
1500, //4CIF
|
||||
2000, //720 HD 4:3,
|
||||
2500, //720 HD 16:9
|
||||
3000 //1080HD
|
||||
};
|
||||
|
||||
//Scale for transitional rate: based on content class
|
||||
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
|
||||
const float kScaleTransRateQm[18] = {
|
||||
//4CIF and lower
|
||||
0.25f, // L, L
|
||||
0.75f, // L, H
|
||||
0.75f, // L, D
|
||||
0.75f, // H ,L
|
||||
0.50f, // H, H
|
||||
0.50f, // H, D
|
||||
0.50f, // D, L
|
||||
0.625f, // D, D
|
||||
0.25f, // D, H
|
||||
|
||||
//over 4CIF: WHD, HD
|
||||
0.25f, // L, L
|
||||
0.75f, // L, H
|
||||
0.75f, // L, D
|
||||
0.75f, // H ,L
|
||||
0.50f, // H, H
|
||||
0.50f, // H, D
|
||||
0.50f, // D, L
|
||||
0.625f, // D, D
|
||||
0.25f // D, H
|
||||
};
|
||||
|
||||
//Control the total amount of down-sampling allowed
|
||||
#define MAX_SPATIAL_DOWN_FACT 4
|
||||
#define MAX_TEMP_DOWN_FACT 4
|
||||
#define MAX_SPATIAL_TEMP_DOWN_FACT 8
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
//PARAMETETS FOR SETTING LOW/HIGH VALUES OF METRICS:
|
||||
//
|
||||
//Threshold to determine if high amount of zero_motion
|
||||
#define HIGH_ZERO_MOTION_SIZE 0.95
|
||||
|
||||
//Thresholds for motion: motion level is derived from motion vectors: motion = size_nz*magn_nz
|
||||
#define HIGH_MOTION 0.7
|
||||
#define LOW_MOTION 0.4
|
||||
|
||||
//Thresholds for motion: motion level is from NFD
|
||||
#define HIGH_MOTION_NFD 0.075
|
||||
#define LOW_MOTION_NFD 0.04
|
||||
|
||||
//Thresholds for spatial prediction error: this is appLied on the min(2x2,1x2,2x1)
|
||||
#define HIGH_TEXTURE 0.035
|
||||
#define LOW_TEXTURE 0.025
|
||||
|
||||
//Used to reduce thresholds for HD scenes: correction factor since higher
|
||||
//correlation in HD scenes means lower spatial prediction error
|
||||
#define SCALE_TEXTURE_HD 0.9;
|
||||
|
||||
//Thresholds for distortion and horizontalness: applied on product: horiz_nz/dist_nz
|
||||
#define COHERENCE_THR 1.0
|
||||
#define COH_MAX 10
|
||||
//
|
||||
//
|
||||
#define RATE_RED_SPATIAL_2X2 0.6 //percentage reduction in transitional bitrate where 2x2 is selected over 1x2/2x1
|
||||
#define SPATIAL_ERR_2X2_VS_H 0.1 //percentage to favor 2x2
|
||||
#define SPATIAL_ERR_2X2_VS_V 0.1 //percentage to favor 2x2 over V
|
||||
#define SPATIAL_ERR_V_VS_H 0.1 //percentage to favor H over V
|
||||
|
||||
|
||||
//Minimum image size for a spatial mode selection: no spatial down-sampling if input size <= MIN_IMAGE_SIZE
|
||||
#define MIN_IMAGE_SIZE 25344 //176*144
|
||||
|
||||
//Minimum frame rate for temporal mode: no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE
|
||||
#define MIN_FRAME_RATE_QM 8
|
||||
|
||||
//Avoid outliers in seq-rate MM
|
||||
#define THRESH_SUM_MM 1000
|
||||
|
||||
const WebRtc_UWord32 kFrameSizeTh[6] = {
|
||||
// boundaries for the closest standard frame size
|
||||
63360, //between 176*144 and 352*288
|
||||
204288, //between 352*288 and 640*480
|
||||
356352, //between 640*480 and 704*576
|
||||
548352, //between 704*576 and 960*720
|
||||
806400, //between 960*720 and 1280*720
|
||||
1497600, // between 1280*720 and 1920*1080
|
||||
};
|
||||
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
|
||||
472
modules/video_coding/main/source/receiver.cc
Normal file
472
modules/video_coding/main/source/receiver.cc
Normal file
@ -0,0 +1,472 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video_coding.h"
|
||||
#include "trace.h"
|
||||
#include "encoded_frame.h"
|
||||
#include "internal_defines.h"
|
||||
#include "receiver.h"
|
||||
#include "tick_time.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMReceiver::VCMReceiver(VCMTiming& timing,
|
||||
WebRtc_Word32 vcmId,
|
||||
WebRtc_Word32 receiverId,
|
||||
bool master)
|
||||
:
|
||||
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_vcmId(vcmId),
|
||||
_receiverId(receiverId),
|
||||
_master(master),
|
||||
_jitterBuffer(vcmId, receiverId, master),
|
||||
_timing(timing),
|
||||
_renderWaitEvent(*new VCMEvent()),
|
||||
_nackMode(kNoNack),
|
||||
_state(kPassive)
|
||||
{
|
||||
}
|
||||
|
||||
VCMReceiver::~VCMReceiver()
|
||||
{
|
||||
_renderWaitEvent.Set();
|
||||
delete &_renderWaitEvent;
|
||||
delete &_critSect;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::Initialize()
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (!_jitterBuffer.Running())
|
||||
{
|
||||
_jitterBuffer.Start();
|
||||
}
|
||||
else
|
||||
{
|
||||
_jitterBuffer.Flush();
|
||||
}
|
||||
_renderWaitEvent.Reset();
|
||||
if (_master)
|
||||
{
|
||||
_state = kReceiving;
|
||||
}
|
||||
else
|
||||
{
|
||||
_state = kPassive;
|
||||
SetNackMode(kNoNack);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateRtt(WebRtc_UWord32 rtt)
|
||||
{
|
||||
_jitterBuffer.UpdateRtt(rtt);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight)
|
||||
{
|
||||
// Find an empty frame
|
||||
VCMEncodedFrame *buffer = NULL;
|
||||
const WebRtc_Word32 error = _jitterBuffer.GetFrame(packet, buffer);
|
||||
if (error == VCM_OLD_PACKET_ERROR)
|
||||
{
|
||||
return VCM_OK;
|
||||
}
|
||||
else if (error < 0)
|
||||
{
|
||||
return error;
|
||||
}
|
||||
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
|
||||
if (frameWidth && frameHeight)
|
||||
{
|
||||
buffer->SetEncodedSize(static_cast<WebRtc_UWord32>(frameWidth),
|
||||
static_cast<WebRtc_UWord32>(frameHeight));
|
||||
}
|
||||
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible
|
||||
// to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Packet seqNo %u of frame %u at %u",
|
||||
packet.seqNum, packet.timestamp,
|
||||
MaskWord64ToUWord32(VCMTickTime::MillisecondTimestamp()));
|
||||
}
|
||||
|
||||
const bool emptyFrame = (buffer->Length() == 0);
|
||||
const WebRtc_Word64 nowMs = VCMTickTime::MillisecondTimestamp();
|
||||
|
||||
WebRtc_Word64 renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
|
||||
if(renderTimeMs < 0)
|
||||
{
|
||||
// Render time error. Assume that this is due to some change in
|
||||
// the incoming video stream and reset the JB and the timing.
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset();
|
||||
return VCM_OK;
|
||||
}
|
||||
else if (renderTimeMs < nowMs - kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"This frame should have been rendered more than %u ms ago."
|
||||
"Flushing jitter buffer and resetting timing.", kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset();
|
||||
return VCM_OK;
|
||||
}
|
||||
else if (_timing.TargetVideoDelay() > kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"More than %u ms target delay. Flushing jitter buffer and resetting timing.",
|
||||
kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset();
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
// First packet received belonging to this frame.
|
||||
if (buffer->Length() == 0)
|
||||
{
|
||||
const WebRtc_Word64 nowMs = VCMTickTime::MillisecondTimestamp();
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"First packet of frame %u at %u", packet.timestamp,
|
||||
MaskWord64ToUWord32(nowMs));
|
||||
}
|
||||
renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
if (renderTimeMs >= 0)
|
||||
{
|
||||
buffer->SetRenderTime(renderTimeMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer->SetRenderTime(nowMs);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert packet into jitter buffer
|
||||
const VCMFrameBufferEnum ret = _jitterBuffer.InsertPacket(buffer, packet);
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Error inserting packet seqNo=%u, timeStamp=%u",
|
||||
packet.seqNum, packet.timestamp);
|
||||
return VCM_JITTER_BUFFER_ERROR;
|
||||
}
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs, WebRtc_Word64& nextRenderTimeMs, bool renderTiming, VCMReceiver* dualReceiver)
|
||||
{
|
||||
// No need to enter the critical section here since the jitter buffer
|
||||
// is thread-safe.
|
||||
FrameType incomingFrameType = kVideoFrameDelta;
|
||||
nextRenderTimeMs = -1;
|
||||
const WebRtc_Word64 startTimeMs = VCMTickTime::MillisecondTimestamp();
|
||||
WebRtc_Word64 ret = _jitterBuffer.GetNextTimeStamp(maxWaitTimeMs,
|
||||
incomingFrameType,
|
||||
nextRenderTimeMs);
|
||||
if (ret < 0)
|
||||
{
|
||||
// No timestamp in jitter buffer at the moment
|
||||
return NULL;
|
||||
}
|
||||
const WebRtc_UWord32 timeStamp = static_cast<WebRtc_UWord32>(ret);
|
||||
|
||||
// Update the timing
|
||||
_timing.SetRequiredDelay(_jitterBuffer.GetEstimatedJitterMS());
|
||||
_timing.UpdateCurrentDelay(timeStamp);
|
||||
|
||||
const WebRtc_Word32 tempWaitTime = maxWaitTimeMs -
|
||||
static_cast<WebRtc_Word32>(VCMTickTime::MillisecondTimestamp() - startTimeMs);
|
||||
WebRtc_UWord16 newMaxWaitTime = static_cast<WebRtc_UWord16>(VCM_MAX(tempWaitTime, 0));
|
||||
|
||||
VCMEncodedFrame* frame = NULL;
|
||||
|
||||
if (renderTiming)
|
||||
{
|
||||
frame = FrameForDecoding(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
}
|
||||
else
|
||||
{
|
||||
frame = FrameForRendering(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
}
|
||||
|
||||
if (frame != NULL)
|
||||
{
|
||||
bool retransmitted = false;
|
||||
const WebRtc_Word64 lastPacketTimeMs =
|
||||
_jitterBuffer.LastPacketTime(frame, retransmitted);
|
||||
if (lastPacketTimeMs >= 0 && !retransmitted)
|
||||
{
|
||||
// We don't want to include timestamps which have suffered from retransmission
|
||||
// here, since we compensate with extra retransmission delay within
|
||||
// the jitter estimate.
|
||||
_timing.IncomingTimestamp(timeStamp, lastPacketTimeMs);
|
||||
}
|
||||
if (dualReceiver != NULL)
|
||||
{
|
||||
dualReceiver->UpdateState(*frame);
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long can we wait until we must decode the next frame
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
VCMTickTime::MillisecondTimestamp());
|
||||
|
||||
// Try to get a complete frame from the jitter buffer
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL && maxWaitTimeMs == 0 && waitTimeMs > 0)
|
||||
{
|
||||
// If we're not allowed to wait for frames to get complete we must calculate if
|
||||
// it's time to decode, and if it's not we will just return for now.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Wait for a complete frame
|
||||
waitTimeMs = VCM_MIN(waitTimeMs, maxWaitTimeMs);
|
||||
frame = _jitterBuffer.GetCompleteFrameForDecoding(waitTimeMs);
|
||||
}
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
if (_timing.MaxWaitingTime(nextRenderTimeMs, VCMTickTime::MillisecondTimestamp()) > 0)
|
||||
{
|
||||
// Still time to wait for a complete frame
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// No time left to wait, we must decode this frame now.
|
||||
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite;
|
||||
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long MUST we wait until we must decode the next frame. This is different for the case
|
||||
// where we have a renderer which can render at a specified time. Here we must wait as long
|
||||
// as possible before giving the frame to the decoder, which will render the frame as soon
|
||||
// as it has been decoded.
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
VCMTickTime::MillisecondTimestamp());
|
||||
if (maxWaitTimeMs < waitTimeMs)
|
||||
{
|
||||
// If we're not allowed to wait until the frame is supposed to be rendered
|
||||
// we will have to return NULL for now.
|
||||
return NULL;
|
||||
}
|
||||
// Wait until it's time to render
|
||||
_renderWaitEvent.Wait(waitTimeMs);
|
||||
|
||||
// Get a complete frame if possible
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite;
|
||||
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame)
|
||||
{
|
||||
_jitterBuffer.ReleaseFrame(frame);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate)
|
||||
{
|
||||
const WebRtc_Word32 ret = _jitterBuffer.GetUpdate(frameRate, bitRate);
|
||||
bitRate /= 1000; // Should be in kbps
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const
|
||||
{
|
||||
return _jitterBuffer.GetFrameStatistics(frameCount.numDeltaFrames,
|
||||
frameCount.numKeyFrames);
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::SetNackMode(VCMNackMode nackMode)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_nackMode = nackMode;
|
||||
switch (_nackMode)
|
||||
{
|
||||
case kNackInfinite:
|
||||
{
|
||||
_jitterBuffer.SetNackStatus(true);
|
||||
break;
|
||||
}
|
||||
case kNoNack:
|
||||
{
|
||||
_jitterBuffer.SetNackStatus(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!_master)
|
||||
{
|
||||
_state = kPassive; // The dual decoder defaults to passive
|
||||
}
|
||||
}
|
||||
|
||||
VCMNackMode
|
||||
VCMReceiver::NackMode() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _nackMode;
|
||||
}
|
||||
|
||||
VCMNackStatus
|
||||
VCMReceiver::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
|
||||
{
|
||||
bool extended = false;
|
||||
WebRtc_UWord16 nackListSize = 0;
|
||||
WebRtc_UWord16* internalNackList = _jitterBuffer.GetNackList(nackListSize, extended);
|
||||
if (internalNackList == NULL && nackListSize == 0xffff)
|
||||
{
|
||||
// This combination is used to trigger key frame requests.
|
||||
size = 0;
|
||||
return kNackKeyFrameRequest;
|
||||
}
|
||||
if (nackListSize > size)
|
||||
{
|
||||
size = nackListSize;
|
||||
return kNackNeedMoreMemory;
|
||||
}
|
||||
memcpy(nackList, internalNackList, nackListSize * sizeof(WebRtc_UWord16));
|
||||
size = nackListSize;
|
||||
return kNackOk;
|
||||
}
|
||||
|
||||
// Decide whether we should change decoder state. This should be done if the dual decoder
|
||||
// has caught up with the decoder decoding with packet losses.
|
||||
bool
|
||||
VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const
|
||||
{
|
||||
if (dualFrame == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (_jitterBuffer.LastDecodedTimestamp() == dualFrame->TimeStamp())
|
||||
{
|
||||
dualReceiver.UpdateState(kWaitForPrimaryDecode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver)
|
||||
{
|
||||
_jitterBuffer = receiver._jitterBuffer;
|
||||
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (_nackMode != kNoNack)
|
||||
{
|
||||
_jitterBuffer.SetNackStatus(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VCMReceiverState
|
||||
VCMReceiver::State() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _state;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMReceiverState newState)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
assert(!(_state == kPassive && newState == kWaitForPrimaryDecode));
|
||||
// assert(!(_state == kReceiving && newState == kPassive));
|
||||
_state = newState;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMEncodedFrame& frame)
|
||||
{
|
||||
if (_nackMode == kNoNack)
|
||||
{
|
||||
// Dual decoder mode has not been enabled.
|
||||
return;
|
||||
}
|
||||
// Update the dual receiver state
|
||||
if (frame.Complete() && frame.FrameType() == kVideoFrameKey)
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (State() == kWaitForPrimaryDecode &&
|
||||
frame.Complete() && !frame.MissingFrame())
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (frame.MissingFrame() || !frame.Complete())
|
||||
{
|
||||
// State was corrupted, enable dual receiver.
|
||||
UpdateState(kReceiving);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
102
modules/video_coding/main/source/receiver.h
Normal file
102
modules/video_coding/main/source/receiver.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "jitter_buffer.h"
|
||||
#include "timing.h"
|
||||
#include "packet.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMEncodedFrame;
|
||||
|
||||
enum VCMNackStatus
|
||||
{
|
||||
kNackOk,
|
||||
kNackNeedMoreMemory,
|
||||
kNackKeyFrameRequest
|
||||
};
|
||||
|
||||
enum VCMNackMode
|
||||
{
|
||||
kNackInfinite,
|
||||
kNoNack
|
||||
};
|
||||
|
||||
enum VCMReceiverState
|
||||
{
|
||||
kReceiving,
|
||||
kPassive,
|
||||
kWaitForPrimaryDecode
|
||||
};
|
||||
|
||||
class VCMReceiver
|
||||
{
|
||||
public:
|
||||
VCMReceiver(VCMTiming& timing,
|
||||
WebRtc_Word32 vcmId = -1,
|
||||
WebRtc_Word32 receiverId = -1,
|
||||
bool master = true);
|
||||
~VCMReceiver();
|
||||
|
||||
WebRtc_Word32 Initialize();
|
||||
void UpdateRtt(WebRtc_UWord32 rtt);
|
||||
WebRtc_Word32 InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight);
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64& nextRenderTimeMs,
|
||||
bool renderTiming = true,
|
||||
VCMReceiver* dualReceiver = NULL);
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
WebRtc_Word32 ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate);
|
||||
WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
|
||||
|
||||
// NACK
|
||||
void SetNackMode(VCMNackMode nackMode);
|
||||
VCMNackMode NackMode() const;
|
||||
VCMNackStatus NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
|
||||
|
||||
// Dual decoder
|
||||
bool DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const;
|
||||
VCMReceiverState State() const;
|
||||
|
||||
private:
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
VCMEncodedFrame* FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
|
||||
void UpdateState(VCMReceiverState newState);
|
||||
void UpdateState(VCMEncodedFrame& frame);
|
||||
static WebRtc_Word32 GenerateReceiverId();
|
||||
|
||||
CriticalSectionWrapper& _critSect;
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _master;
|
||||
VCMJitterBuffer _jitterBuffer;
|
||||
VCMTiming& _timing;
|
||||
VCMEvent& _renderWaitEvent;
|
||||
VCMNackMode _nackMode;
|
||||
VCMReceiverState _state;
|
||||
|
||||
static WebRtc_Word32 _receiverIdCounter;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
214
modules/video_coding/main/source/rtt_filter.cc
Normal file
214
modules/video_coding/main/source/rtt_filter.cc
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "rtt_filter.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMRttFilter::VCMRttFilter(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId)
|
||||
:
|
||||
_vcmId(vcmId),
|
||||
_receiverId(receiverId),
|
||||
_filtFactMax(35),
|
||||
_jumpStdDevs(2.5),
|
||||
_driftStdDevs(3.5),
|
||||
_detectThreshold(kMaxDriftJumpCount)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
VCMRttFilter&
|
||||
VCMRttFilter::operator=(const VCMRttFilter& rhs)
|
||||
{
|
||||
if (this != &rhs)
|
||||
{
|
||||
_gotNonZeroUpdate = rhs._gotNonZeroUpdate;
|
||||
_avgRtt = rhs._avgRtt;
|
||||
_varRtt = rhs._varRtt;
|
||||
_maxRtt = rhs._maxRtt;
|
||||
_filtFactCount = rhs._filtFactCount;
|
||||
_jumpCount = rhs._jumpCount;
|
||||
_driftCount = rhs._driftCount;
|
||||
memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
|
||||
memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::Reset()
|
||||
{
|
||||
_gotNonZeroUpdate = false;
|
||||
_avgRtt = 0;
|
||||
_varRtt = 0;
|
||||
_maxRtt = 0;
|
||||
_filtFactCount = 1;
|
||||
_jumpCount = 0;
|
||||
_driftCount = 0;
|
||||
memset(_jumpBuf, 0, kMaxDriftJumpCount);
|
||||
memset(_driftBuf, 0, kMaxDriftJumpCount);
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::Update(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
if (!_gotNonZeroUpdate)
|
||||
{
|
||||
if (rttMs == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_gotNonZeroUpdate = true;
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if (rttMs > 3000)
|
||||
{
|
||||
rttMs = 3000;
|
||||
}
|
||||
|
||||
double filtFactor = 0;
|
||||
if (_filtFactCount > 1)
|
||||
{
|
||||
filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
|
||||
}
|
||||
_filtFactCount++;
|
||||
if (_filtFactCount > _filtFactMax)
|
||||
{
|
||||
// This prevents filtFactor from going above
|
||||
// (_filtFactMax - 1) / _filtFactMax,
|
||||
// e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
|
||||
_filtFactCount = _filtFactMax;
|
||||
}
|
||||
double oldAvg = _avgRtt;
|
||||
double oldVar = _varRtt;
|
||||
_avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
|
||||
_varRtt = filtFactor * _varRtt + (1 - filtFactor) *
|
||||
(rttMs - _avgRtt) * (rttMs - _avgRtt);
|
||||
_maxRtt = VCM_MAX(rttMs, _maxRtt);
|
||||
if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
|
||||
{
|
||||
// In some cases we don't want to update the statistics
|
||||
_avgRtt = oldAvg;
|
||||
_varRtt = oldVar;
|
||||
}
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"RttFilter Update: sample=%u avgRtt=%f varRtt=%f maxRtt=%u",
|
||||
rttMs, _avgRtt, _varRtt, _maxRtt);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMRttFilter::JumpDetection(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
double diffFromAvg = _avgRtt - rttMs;
|
||||
if (abs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
|
||||
{
|
||||
int diffSign = (diffFromAvg >= 0) ? 1 : -1;
|
||||
int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
|
||||
if (diffSign != jumpCountSign)
|
||||
{
|
||||
// Since the signs differ the samples currently
|
||||
// in the buffer is useless as they represent a
|
||||
// jump in a different direction.
|
||||
_jumpCount = 0;
|
||||
}
|
||||
if (abs(_jumpCount) < kMaxDriftJumpCount)
|
||||
{
|
||||
// Update the buffer used for the short time
|
||||
// statistics.
|
||||
// The sign of the diff is used for updating the counter since
|
||||
// we want to use the same buffer for keeping track of when
|
||||
// the RTT jumps down and up.
|
||||
_jumpBuf[abs(_jumpCount)] = rttMs;
|
||||
_jumpCount += diffSign;
|
||||
}
|
||||
if (abs(_jumpCount) >= _detectThreshold)
|
||||
{
|
||||
// Detected an RTT jump
|
||||
ShortRttFilter(_jumpBuf, abs(_jumpCount));
|
||||
_filtFactCount = _detectThreshold + 1;
|
||||
_jumpCount = 0;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Detected an RTT jump");
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_jumpCount = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMRttFilter::DriftDetection(WebRtc_UWord32 rttMs)
|
||||
{
|
||||
if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
|
||||
{
|
||||
if (_driftCount < kMaxDriftJumpCount)
|
||||
{
|
||||
// Update the buffer used for the short time
|
||||
// statistics.
|
||||
_driftBuf[_driftCount] = rttMs;
|
||||
_driftCount++;
|
||||
}
|
||||
if (_driftCount >= _detectThreshold)
|
||||
{
|
||||
// Detected an RTT drift
|
||||
ShortRttFilter(_driftBuf, _driftCount);
|
||||
_filtFactCount = _detectThreshold + 1;
|
||||
_driftCount = 0;
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"Detected an RTT drift");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_driftCount = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
VCMRttFilter::ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length)
|
||||
{
|
||||
if (length == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_maxRtt = 0;
|
||||
_avgRtt = 0;
|
||||
for (WebRtc_UWord32 i=0; i < length; i++)
|
||||
{
|
||||
if (buf[i] > _maxRtt)
|
||||
{
|
||||
_maxRtt = buf[i];
|
||||
}
|
||||
_avgRtt += buf[i];
|
||||
}
|
||||
_avgRtt = _avgRtt / static_cast<double>(length);
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMRttFilter::RttMs() const
|
||||
{
|
||||
return static_cast<WebRtc_UWord32>(_maxRtt + 0.5);
|
||||
}
|
||||
|
||||
}
|
||||
70
modules/video_coding/main/source/rtt_filter.h
Normal file
70
modules/video_coding/main/source/rtt_filter.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMRttFilter
|
||||
{
|
||||
public:
|
||||
VCMRttFilter(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
|
||||
|
||||
VCMRttFilter& operator=(const VCMRttFilter& rhs);
|
||||
|
||||
// Resets the filter.
|
||||
void Reset();
|
||||
// Updates the filter with a new sample.
|
||||
void Update(WebRtc_UWord32 rttMs);
|
||||
// A getter function for the current RTT level in ms.
|
||||
WebRtc_UWord32 RttMs() const;
|
||||
|
||||
private:
|
||||
// The size of the drift and jump memory buffers
|
||||
// and thus also the detection threshold for these
|
||||
// detectors in number of samples.
|
||||
enum { kMaxDriftJumpCount = 5 };
|
||||
// Detects RTT jumps by comparing the difference between
|
||||
// samples and average to the standard deviation.
|
||||
// Returns true if the long time statistics should be updated
|
||||
// and false otherwise
|
||||
bool JumpDetection(WebRtc_UWord32 rttMs);
|
||||
// Detects RTT drifts by comparing the difference between
|
||||
// max and average to the standard deviation.
|
||||
// Returns true if the long time statistics should be updated
|
||||
// and false otherwise
|
||||
bool DriftDetection(WebRtc_UWord32 rttMs);
|
||||
// Computes the short time average and maximum of the vector buf.
|
||||
void ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length);
|
||||
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _gotNonZeroUpdate;
|
||||
double _avgRtt;
|
||||
double _varRtt;
|
||||
WebRtc_UWord32 _maxRtt;
|
||||
WebRtc_UWord32 _filtFactCount;
|
||||
const WebRtc_UWord32 _filtFactMax;
|
||||
const double _jumpStdDevs;
|
||||
const double _driftStdDevs;
|
||||
WebRtc_Word32 _jumpCount;
|
||||
WebRtc_Word32 _driftCount;
|
||||
const WebRtc_Word32 _detectThreshold;
|
||||
WebRtc_UWord32 _jumpBuf[kMaxDriftJumpCount];
|
||||
WebRtc_UWord32 _driftBuf[kMaxDriftJumpCount];
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
|
||||
636
modules/video_coding/main/source/session_info.cc
Normal file
636
modules/video_coding/main/source/session_info.cc
Normal file
@ -0,0 +1,636 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "packet.h"
|
||||
#include "session_info.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <cassert>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMSessionInfo::VCMSessionInfo():
|
||||
_haveFirstPacket(false),
|
||||
_markerBit(false),
|
||||
_sessionNACK(false),
|
||||
_completeSession(false),
|
||||
_frameType(kVideoFrameDelta),
|
||||
_previousFrameLoss(false),
|
||||
_lowSeqNum(-1),
|
||||
_highSeqNum(-1),
|
||||
_highestPacketIndex(0)
|
||||
{
|
||||
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
|
||||
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
|
||||
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
|
||||
}
|
||||
|
||||
VCMSessionInfo::~VCMSessionInfo()
|
||||
{
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMSessionInfo::GetLowSeqNum() const
|
||||
{
|
||||
return _lowSeqNum;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMSessionInfo::GetHighSeqNum() const
|
||||
{
|
||||
return _highSeqNum;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::Reset()
|
||||
{
|
||||
_lowSeqNum = -1;
|
||||
_highSeqNum = -1;
|
||||
_markerBit = false;
|
||||
_haveFirstPacket = false;
|
||||
_completeSession = false;
|
||||
_frameType = kVideoFrameDelta;
|
||||
_previousFrameLoss = false;
|
||||
_sessionNACK = false;
|
||||
_highestPacketIndex = 0;
|
||||
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
|
||||
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
|
||||
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMSessionInfo::GetSessionLength()
|
||||
{
|
||||
WebRtc_UWord32 length = 0;
|
||||
for (WebRtc_Word32 i=0; i<=_highestPacketIndex; ++i)
|
||||
{
|
||||
length += _packetSizeBytes[i];
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
void
|
||||
VCMSessionInfo::SetStartSeqNumber(WebRtc_UWord16 seqNumber)
|
||||
{
|
||||
_lowSeqNum = seqNumber;
|
||||
_highSeqNum = seqNumber;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMSessionInfo::HaveStartSeqNumber()
|
||||
{
|
||||
if(_lowSeqNum == -1 || _highSeqNum == -1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMSessionInfo::InsertBuffer(WebRtc_UWord8* ptrStartOfLayer, WebRtc_Word32 packetIndex, const VCMPacket& packet)
|
||||
{
|
||||
WebRtc_UWord32 moveLength = 0;
|
||||
WebRtc_UWord32 returnLength = 0;
|
||||
int i = 0;
|
||||
|
||||
// need to calc offset before updating _packetSizeBytes
|
||||
WebRtc_UWord32 offset = 0;
|
||||
WebRtc_UWord32 packetSize = 0;
|
||||
|
||||
// Store this packet length. Add length since we could have data present already (e.g. multicall case).
|
||||
if (packet.bits)
|
||||
{
|
||||
packetSize = packet.sizeBytes;
|
||||
}
|
||||
else
|
||||
{
|
||||
packetSize = packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0);
|
||||
}
|
||||
|
||||
_packetSizeBytes[packetIndex] += packetSize;
|
||||
|
||||
// count only the one in our layer
|
||||
for (i=0; i<packetIndex; ++i)
|
||||
{
|
||||
offset += _packetSizeBytes[i];
|
||||
}
|
||||
for (i=packetIndex+1; i<=_highestPacketIndex; ++i)
|
||||
{
|
||||
moveLength += _packetSizeBytes[i];
|
||||
}
|
||||
if (moveLength > 0)
|
||||
{
|
||||
memmove((void*)(ptrStartOfLayer + offset + packetSize), ptrStartOfLayer + offset, moveLength);
|
||||
}
|
||||
|
||||
if (packet.bits)
|
||||
{
|
||||
// Add the packet without ORing end and start bytes together.
|
||||
// This is done when the frame is fetched for decoding by calling
|
||||
// GlueTogether().
|
||||
_ORwithPrevByte[packetIndex] = true;
|
||||
if (packet.dataPtr != NULL)
|
||||
{
|
||||
memcpy((void*)(ptrStartOfLayer + offset), packet.dataPtr, packetSize);
|
||||
}
|
||||
returnLength = packetSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
_ORwithPrevByte[packetIndex] = false;
|
||||
if (packet.dataPtr != NULL)
|
||||
{
|
||||
const unsigned char startCode[] = {0, 0, 0, 1};
|
||||
if(packet.insertStartCode)
|
||||
{
|
||||
memcpy((void*)(ptrStartOfLayer + offset), startCode, kH264StartCodeLengthBytes);
|
||||
}
|
||||
memcpy((void*)(ptrStartOfLayer + offset
|
||||
+ (packet.insertStartCode?kH264StartCodeLengthBytes:0)),
|
||||
packet.dataPtr,
|
||||
packet.sizeBytes);
|
||||
}
|
||||
returnLength = packetSize;
|
||||
}
|
||||
|
||||
if (packet.isFirstPacket)
|
||||
{
|
||||
_haveFirstPacket = true;
|
||||
}
|
||||
if (packet.markerBit)
|
||||
{
|
||||
_markerBit = true;
|
||||
}
|
||||
// Store information about if the packet is decodable as is or not.
|
||||
_naluCompleteness[packetIndex]=packet.completeNALU;
|
||||
|
||||
UpdateCompleteSession();
|
||||
|
||||
return returnLength;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::UpdateCompleteSession()
|
||||
{
|
||||
if (_haveFirstPacket && _markerBit)
|
||||
{
|
||||
// do we have all packets in this session?
|
||||
bool completeSession = true;
|
||||
for (int i=0; i<= _highestPacketIndex; ++i)
|
||||
{
|
||||
if (_naluCompleteness[i] == kNaluUnset)
|
||||
{
|
||||
completeSession = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_completeSession = completeSession;
|
||||
}
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::IsSessionComplete()
|
||||
{
|
||||
return _completeSession;
|
||||
}
|
||||
|
||||
|
||||
// Find the start and end index of packetIndex packet.
|
||||
// startIndex -1 if start not found endIndex=-1 if end index not found
|
||||
void VCMSessionInfo::FindNaluBorder(WebRtc_Word32 packetIndex,WebRtc_Word32& startIndex, WebRtc_Word32& endIndex)
|
||||
{
|
||||
|
||||
if(_naluCompleteness[packetIndex]==kNaluStart ||
|
||||
_naluCompleteness[packetIndex]==kNaluComplete)
|
||||
{
|
||||
startIndex=packetIndex;
|
||||
}
|
||||
else // Need to find the start
|
||||
{
|
||||
for(startIndex=packetIndex-1;startIndex>=0;--startIndex)
|
||||
{
|
||||
|
||||
if( (_naluCompleteness[startIndex]==kNaluComplete && _packetSizeBytes[startIndex]>0) ||(_naluCompleteness[startIndex]==kNaluEnd && startIndex>0)) // Found previous NALU.
|
||||
{
|
||||
startIndex++;
|
||||
break;
|
||||
}
|
||||
if( _naluCompleteness[startIndex]==kNaluStart) // This is where the NALU start.
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(_naluCompleteness[packetIndex]==kNaluEnd ||
|
||||
_naluCompleteness[packetIndex]==kNaluComplete)
|
||||
{
|
||||
endIndex=packetIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Find the next NALU
|
||||
for(endIndex=packetIndex+1;endIndex<=_highestPacketIndex;++endIndex)
|
||||
{
|
||||
if((_naluCompleteness[endIndex]==kNaluComplete && _packetSizeBytes[endIndex]>0) || _naluCompleteness[endIndex]==kNaluStart) // Found next NALU.
|
||||
{
|
||||
endIndex--;
|
||||
break;
|
||||
}
|
||||
if( _naluCompleteness[endIndex]==kNaluEnd) // This is where the NALU end.
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(endIndex>_highestPacketIndex)
|
||||
endIndex=-1;
|
||||
}
|
||||
}
|
||||
|
||||
// Deletes all packets between startIndex and endIndex
|
||||
WebRtc_UWord32 VCMSessionInfo::DeletePackets(WebRtc_UWord8* ptrStartOfLayer,WebRtc_Word32 startIndex,WebRtc_Word32 endIndex)
|
||||
{
|
||||
|
||||
//Get the number of bytes to delete.
|
||||
//Clear the size of these packets.
|
||||
WebRtc_UWord32 bytesToDelete=0; /// The number of bytes to delete.
|
||||
for(int j=startIndex;j<=endIndex;++j)
|
||||
{
|
||||
bytesToDelete+=_packetSizeBytes[j];
|
||||
_packetSizeBytes[j]=0;
|
||||
}
|
||||
if (bytesToDelete > 0)
|
||||
{
|
||||
// Get the offset we want to move to.
|
||||
int destOffset=0;
|
||||
for(int j=0;j<startIndex;j++)
|
||||
{
|
||||
destOffset+=_packetSizeBytes[j];
|
||||
}
|
||||
|
||||
//Get the number of bytes to move
|
||||
WebRtc_UWord32 numberOfBytesToMove=0;
|
||||
for (int j=endIndex+1; j<=_highestPacketIndex; ++j)
|
||||
{
|
||||
numberOfBytesToMove += _packetSizeBytes[j];
|
||||
}
|
||||
|
||||
memmove((void*)(ptrStartOfLayer + destOffset),(void*)(ptrStartOfLayer + destOffset+bytesToDelete), numberOfBytesToMove);
|
||||
|
||||
}
|
||||
|
||||
return bytesToDelete;
|
||||
}
|
||||
|
||||
// Makes the layer decodable. Ie only contain decodable NALU
|
||||
// return the number of bytes deleted from the session. -1 if an error occurs
|
||||
WebRtc_UWord32 VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
|
||||
{
|
||||
if(_lowSeqNum<0) // No packets in this session
|
||||
return 0;
|
||||
|
||||
WebRtc_Word32 startIndex=0;
|
||||
WebRtc_Word32 endIndex=0;
|
||||
int packetIndex=0;
|
||||
WebRtc_UWord32 returnLength=0;
|
||||
for (packetIndex=0; packetIndex<= _highestPacketIndex; ++packetIndex)
|
||||
{
|
||||
if (_naluCompleteness[packetIndex] == kNaluUnset) // Found a lost packet
|
||||
{
|
||||
FindNaluBorder(packetIndex,startIndex,endIndex);
|
||||
if(startIndex==-1)
|
||||
startIndex=0;
|
||||
if(endIndex==-1)
|
||||
endIndex=_highestPacketIndex;
|
||||
|
||||
returnLength+=DeletePackets(ptrStartOfLayer,packetIndex,endIndex);
|
||||
packetIndex=endIndex;
|
||||
}// end lost packet
|
||||
}
|
||||
|
||||
//Make sure the first packet is decodable (Either complete nalu or start of NALU)
|
||||
if(_packetSizeBytes[0]>0)
|
||||
{
|
||||
switch(_naluCompleteness[0])
|
||||
{
|
||||
case kNaluComplete: //Packet can be decoded as is.
|
||||
break;
|
||||
|
||||
case kNaluStart: // Packet contain beginning of NALU- No need to do anything.
|
||||
break;
|
||||
case kNaluIncomplete: //Packet is not beginning or end of NALU
|
||||
//Need to find the end of this fua NALU and delete all packets.
|
||||
FindNaluBorder(0,startIndex,endIndex);
|
||||
if(endIndex==-1) // No end found. Delete
|
||||
{
|
||||
endIndex=_highestPacketIndex;
|
||||
}
|
||||
returnLength+=DeletePackets(ptrStartOfLayer,0,endIndex);//Delete this NALU.
|
||||
break;
|
||||
case kNaluEnd: // Packet is the end of a NALU
|
||||
//Need to delete this packet
|
||||
returnLength+=DeletePackets(ptrStartOfLayer,0,0);//Delete this NALU.
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
return returnLength;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMSessionInfo::ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num)
|
||||
{
|
||||
if ((NULL == list) || (num < 1))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
if (_lowSeqNum == -1)
|
||||
{
|
||||
// no packets in this frame
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Find end point (index of entry that equals _lowSeqNum)
|
||||
int index = 0;
|
||||
for (; index <num; index++)
|
||||
{
|
||||
if (list[index] == _lowSeqNum)
|
||||
{
|
||||
list[index] = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Zero out between first entry and end point
|
||||
int i = 0;
|
||||
while ( i <= _highestPacketIndex && index < num)
|
||||
{
|
||||
if (_naluCompleteness[i] != kNaluUnset)
|
||||
{
|
||||
list[index] = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
_sessionNACK = true;
|
||||
}
|
||||
i++;
|
||||
index++;
|
||||
}
|
||||
if(!_haveFirstPacket)
|
||||
{
|
||||
_sessionNACK = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 VCMSessionInfo::GetHighestPacketIndex()
|
||||
{
|
||||
return _highestPacketIndex;
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::HaveLastPacket()
|
||||
{
|
||||
return _markerBit;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::ForceSetHaveLastPacket()
|
||||
{
|
||||
_markerBit = true;
|
||||
UpdateCompleteSession();
|
||||
}
|
||||
|
||||
bool VCMSessionInfo::IsRetransmitted()
|
||||
{
|
||||
return _sessionNACK;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::UpdatePacketSize(WebRtc_Word32 packetIndex, WebRtc_UWord32 length)
|
||||
{
|
||||
// sanity
|
||||
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
|
||||
{
|
||||
//not allowed
|
||||
assert(!"SessionInfo::UpdatePacketSize Error: invalid packetIndex");
|
||||
return;
|
||||
}
|
||||
_packetSizeBytes[packetIndex] = length;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::PrependPacketIndices(WebRtc_Word32 numberOfPacketIndices)
|
||||
{
|
||||
// sanity
|
||||
if((numberOfPacketIndices + GetHighestPacketIndex() >= kMaxPacketsInJitterBuffer) || numberOfPacketIndices < 0)
|
||||
{
|
||||
//not allowed
|
||||
assert(!"SessionInfo::PrependPacketIndexes Error: invalid packetIndex");
|
||||
return;
|
||||
}
|
||||
// Works if we have new packets before packetIndex = 0
|
||||
int numOfPacketsToMove = GetHighestPacketIndex()+1;
|
||||
memmove(&_packetSizeBytes[numberOfPacketIndices], &_packetSizeBytes[0], (numOfPacketsToMove)*sizeof(WebRtc_UWord16));
|
||||
memset(&_packetSizeBytes[0], 0, numberOfPacketIndices*sizeof(WebRtc_UWord16));
|
||||
|
||||
_highestPacketIndex += (WebRtc_UWord16)numberOfPacketIndices;
|
||||
}
|
||||
|
||||
void VCMSessionInfo::ClearPacketSize(WebRtc_Word32 packetIndex)
|
||||
{
|
||||
// sanity
|
||||
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
|
||||
{
|
||||
//not allowed
|
||||
assert(!"SessionInfo::ClearPacketSize Error: invalid packetIndex");
|
||||
return;
|
||||
}
|
||||
_packetSizeBytes[packetIndex] =0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMSessionInfo::GetPacketSize(WebRtc_Word32 packetIndex)
|
||||
{
|
||||
// sanity
|
||||
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
|
||||
{
|
||||
//not allowed
|
||||
assert(!"SessionInfo::GetPacketSize Error: invalid packetIndex");
|
||||
return 0;
|
||||
}
|
||||
return _packetSizeBytes[packetIndex];
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMSessionInfo::InsertPacket(const VCMPacket& packet, WebRtc_UWord8* ptrStartOfLayer)
|
||||
{
|
||||
//not allowed
|
||||
assert(!packet.insertStartCode || !packet.bits);
|
||||
|
||||
// Check if this is first packet (only valid for some codecs)
|
||||
if (packet.isFirstPacket)
|
||||
{
|
||||
// the first packet in the frame always signals the frametype
|
||||
_frameType = packet.frameType;
|
||||
}
|
||||
|
||||
// Check sequence number and update highest and lowest sequence numbers received.
|
||||
// Move data if this seq num is lower than previously lowest.
|
||||
|
||||
if (packet.seqNum > _highSeqNum)
|
||||
{
|
||||
// This packet's seq num is higher than previously highest seq num; normal case
|
||||
// if we have a wrap, only update with wrapped values
|
||||
if (!(_highSeqNum < 0x00ff && packet.seqNum > 0xff00))
|
||||
{
|
||||
_highSeqNum = packet.seqNum;
|
||||
}
|
||||
} else if (_highSeqNum > 0xff00 && packet.seqNum < 0x00ff)
|
||||
{
|
||||
// wrap
|
||||
_highSeqNum = packet.seqNum;
|
||||
}
|
||||
int packetIndex = packet.seqNum - (WebRtc_UWord16)_lowSeqNum;
|
||||
if(_lowSeqNum < 0x00ff && packet.seqNum > 0xff00)
|
||||
{
|
||||
// negative wrap
|
||||
packetIndex = packet.seqNum - 0x10000 - _lowSeqNum;
|
||||
}
|
||||
if (packetIndex < 0)
|
||||
{
|
||||
if (_lowSeqNum > 0xff00 && packet.seqNum < 0x00ff)
|
||||
{
|
||||
// we have a false detect due to the wrap
|
||||
packetIndex = (0xffff - (WebRtc_UWord16)_lowSeqNum) + packet.seqNum + (WebRtc_UWord16)1;
|
||||
} else
|
||||
{
|
||||
// This packet's seq num is lower than previously lowest seq num, but no wrap
|
||||
// We need to move the data in all arrays indexed by packetIndex and insert the new
|
||||
// packet's info
|
||||
// How many packets should we leave room for (positions to shift)?
|
||||
// Example - this seq num is 3 lower than previously lowest seq num
|
||||
// Before: |--prev packet with lowest seq num--|--|...|
|
||||
// After: |--new lowest seq num--|--|--|--prev packet with lowest seq num--|--|...|
|
||||
|
||||
WebRtc_UWord16 positionsToShift = (WebRtc_UWord16)_lowSeqNum - packet.seqNum;
|
||||
WebRtc_UWord16 numOfPacketsToMove = _highestPacketIndex + 1;
|
||||
|
||||
// sanity, do we have room for the shift?
|
||||
if ((positionsToShift + numOfPacketsToMove) > kMaxPacketsInJitterBuffer)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Shift _ORwithPrevByte array
|
||||
memmove(&_ORwithPrevByte[positionsToShift],
|
||||
&_ORwithPrevByte[0], numOfPacketsToMove*sizeof(bool));
|
||||
memset(&_ORwithPrevByte[0], false, positionsToShift*sizeof(bool));
|
||||
|
||||
// Shift _packetSizeBytes array
|
||||
memmove(&_packetSizeBytes[positionsToShift],
|
||||
&_packetSizeBytes[0], numOfPacketsToMove*sizeof(WebRtc_UWord32));
|
||||
memset(&_packetSizeBytes[0], 0, positionsToShift*sizeof(WebRtc_UWord32));
|
||||
|
||||
//Shift _naluCompleteness
|
||||
memmove(&_naluCompleteness[positionsToShift],
|
||||
&_naluCompleteness[0], numOfPacketsToMove*sizeof(WebRtc_UWord8));
|
||||
memset(&_naluCompleteness[0], kNaluUnset, positionsToShift*sizeof(WebRtc_UWord8));
|
||||
|
||||
_highestPacketIndex += positionsToShift;
|
||||
_lowSeqNum = packet.seqNum;
|
||||
packetIndex = 0; // (seqNum - _lowSeqNum) = 0
|
||||
}
|
||||
} // if (_lowSeqNum > seqNum)
|
||||
|
||||
// sanity
|
||||
if (packetIndex >= kMaxPacketsInJitterBuffer )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
if (packetIndex < 0 )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check for duplicate packets
|
||||
if (_packetSizeBytes[packetIndex] != 0)
|
||||
{
|
||||
// We have already received a packet with this sequence number, ignore it.
|
||||
return -2;
|
||||
}
|
||||
|
||||
// update highest packet index
|
||||
_highestPacketIndex = packetIndex > _highestPacketIndex ? packetIndex :_highestPacketIndex;
|
||||
|
||||
return InsertBuffer(ptrStartOfLayer, packetIndex, packet);
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMSessionInfo::PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer, VideoCodecType codec)
|
||||
{
|
||||
WebRtc_UWord32 currentPacketOffset = 0;
|
||||
WebRtc_UWord32 length = GetSessionLength();
|
||||
WebRtc_UWord32 idSum = 0;
|
||||
WebRtc_UWord32 realDataBytes = 0;
|
||||
if (length == 0)
|
||||
{
|
||||
return length;
|
||||
}
|
||||
bool previousLost = false;
|
||||
for (int i=0; i <= _highestPacketIndex; i++)
|
||||
{
|
||||
if (_ORwithPrevByte[i])
|
||||
{
|
||||
if (currentPacketOffset > 0)
|
||||
{
|
||||
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer + currentPacketOffset;
|
||||
|
||||
if (_packetSizeBytes[i-1] == 0 || previousLost)
|
||||
{
|
||||
// It is be better to throw away this packet if we are missing the
|
||||
// previous packet.
|
||||
memset(ptrFirstByte, 0, _packetSizeBytes[i]);
|
||||
previousLost = true;
|
||||
}
|
||||
else if (_packetSizeBytes[i] > 0) // Ignore if empty packet
|
||||
{
|
||||
// Glue with previous byte
|
||||
// Move everything from [this packet start + 1, end of buffer] one byte to the left
|
||||
WebRtc_UWord8* ptrPrevByte = ptrFirstByte - 1;
|
||||
*ptrPrevByte = (*ptrPrevByte) | (*ptrFirstByte);
|
||||
WebRtc_UWord32 lengthToEnd = length - (currentPacketOffset + 1);
|
||||
memmove((void*)ptrFirstByte, (void*)(ptrFirstByte + 1), lengthToEnd);
|
||||
_packetSizeBytes[i]--;
|
||||
length--;
|
||||
previousLost = false;
|
||||
realDataBytes += _packetSizeBytes[i];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(ptrStartOfLayer, 0, _packetSizeBytes[i]);
|
||||
previousLost = true;
|
||||
}
|
||||
}
|
||||
else if (_packetSizeBytes[i] == 0 && codec == kVideoCodecH263)
|
||||
{
|
||||
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer + currentPacketOffset;
|
||||
memmove(ptrFirstByte + 10, ptrFirstByte, length - currentPacketOffset);
|
||||
memset(ptrFirstByte, 0, 10);
|
||||
_packetSizeBytes[i] = 10;
|
||||
length += _packetSizeBytes[i];
|
||||
previousLost = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
realDataBytes += _packetSizeBytes[i];
|
||||
previousLost = false;
|
||||
}
|
||||
currentPacketOffset += _packetSizeBytes[i];
|
||||
}
|
||||
if (realDataBytes == 0)
|
||||
{
|
||||
// Drop the frame since all it contains are zeros
|
||||
length = 0;
|
||||
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
}
|
||||
94
modules/video_coding/main/source/session_info.h
Normal file
94
modules/video_coding/main/source/session_info.h
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "module_common_types.h"
|
||||
#include "packet.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMSessionInfo
|
||||
{
|
||||
public:
|
||||
VCMSessionInfo();
|
||||
virtual ~VCMSessionInfo();
|
||||
|
||||
VCMSessionInfo(const VCMSessionInfo& rhs);
|
||||
|
||||
WebRtc_Word32 ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num);
|
||||
virtual void Reset();
|
||||
|
||||
WebRtc_Word64 InsertPacket(const VCMPacket& packet, WebRtc_UWord8* ptrStartOfLayer);
|
||||
|
||||
virtual bool IsSessionComplete();
|
||||
WebRtc_UWord32 MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer);
|
||||
|
||||
WebRtc_UWord32 GetSessionLength();
|
||||
bool HaveLastPacket();
|
||||
void ForceSetHaveLastPacket();
|
||||
bool IsRetransmitted();
|
||||
webrtc::FrameType FrameType() const { return _frameType; }
|
||||
|
||||
virtual WebRtc_Word32 GetHighestPacketIndex();
|
||||
virtual WebRtc_UWord32 GetPacketSize(WebRtc_Word32 packetIndex);
|
||||
virtual void ClearPacketSize(WebRtc_Word32 packetIndex);
|
||||
virtual void UpdatePacketSize(WebRtc_Word32 packetIndex, WebRtc_UWord32 length);
|
||||
virtual void PrependPacketIndices(WebRtc_Word32 numberOfPacketIndexes);
|
||||
|
||||
void SetStartSeqNumber(WebRtc_UWord16 seqNumber);
|
||||
|
||||
bool HaveStartSeqNumber();
|
||||
|
||||
WebRtc_Word32 GetLowSeqNum() const;
|
||||
WebRtc_Word32 GetHighSeqNum() const;
|
||||
|
||||
WebRtc_UWord32 PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer, VideoCodecType codec);
|
||||
|
||||
void SetPreviousFrameLoss() { _previousFrameLoss = true; }
|
||||
bool PreviousFrameLoss() const { return _previousFrameLoss; }
|
||||
|
||||
protected:
|
||||
WebRtc_UWord32 InsertBuffer(WebRtc_UWord8* ptrStartOfLayer,
|
||||
WebRtc_Word32 packetIndex,
|
||||
const VCMPacket& packet);
|
||||
void FindNaluBorder(WebRtc_Word32 packetIndex,
|
||||
WebRtc_Word32& startIndex,
|
||||
WebRtc_Word32& endIndex);
|
||||
WebRtc_UWord32 DeletePackets(WebRtc_UWord8* ptrStartOfLayer,
|
||||
WebRtc_Word32 startIndex,
|
||||
WebRtc_Word32 endIndex);
|
||||
void UpdateCompleteSession();
|
||||
|
||||
bool _haveFirstPacket; // If we have inserted the first packet into this frame
|
||||
bool _markerBit; // If we have inserted a packet with markerbit into this frame
|
||||
bool _sessionNACK; // If this session has been NACKed by JB
|
||||
bool _completeSession;
|
||||
webrtc::FrameType _frameType;
|
||||
bool _previousFrameLoss;
|
||||
|
||||
WebRtc_Word32 _lowSeqNum; // Lowest packet sequence number in a session
|
||||
WebRtc_Word32 _highSeqNum; // Highest packet sequence number in a session
|
||||
|
||||
// Highest packet index in this frame
|
||||
WebRtc_UWord16 _highestPacketIndex;
|
||||
// Length of packet (used for reordering)
|
||||
WebRtc_UWord32 _packetSizeBytes[kMaxPacketsInJitterBuffer];
|
||||
// Completness of packets. Used for deciding if the frame is decodable.
|
||||
WebRtc_UWord8 _naluCompleteness[kMaxPacketsInJitterBuffer];
|
||||
bool _ORwithPrevByte[kMaxPacketsInJitterBuffer];
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
|
||||
55
modules/video_coding/main/source/tick_time.h
Normal file
55
modules/video_coding/main/source/tick_time.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_
|
||||
|
||||
#include "tick_util.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
//#define TICK_TIME_DEBUG
|
||||
|
||||
class VCMTickTime : public TickTime
|
||||
{
|
||||
#ifdef TICK_TIME_DEBUG
|
||||
public:
|
||||
/*
|
||||
* Get current time
|
||||
*/
|
||||
static TickTime Now() { assert(false); };
|
||||
|
||||
/*
|
||||
* Get time in milli seconds
|
||||
*/
|
||||
static WebRtc_Word64 MillisecondTimestamp() { return _timeNowDebug; };
|
||||
|
||||
/*
|
||||
* Get time in micro seconds
|
||||
*/
|
||||
static WebRtc_Word64 MicrosecondTimestamp() { return _timeNowDebug * 1000LL; };
|
||||
|
||||
static void IncrementDebugClock() { _timeNowDebug++; };
|
||||
|
||||
private:
|
||||
static WebRtc_Word64 _timeNowDebug;
|
||||
|
||||
#else
|
||||
public:
|
||||
static void IncrementDebugClock() { assert(false); };
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_
|
||||
259
modules/video_coding/main/source/timestamp_extrapolator.cc
Normal file
259
modules/video_coding/main/source/timestamp_extrapolator.cc
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "internal_defines.h"
|
||||
#include "timestamp_extrapolator.h"
|
||||
#include "tick_time.h"
|
||||
#include "trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMTimestampExtrapolator::VCMTimestampExtrapolator(WebRtc_Word32 vcmId, WebRtc_Word32 id)
|
||||
:
|
||||
_rwLock(*RWLockWrapper::CreateRWLock()),
|
||||
_vcmId(vcmId),
|
||||
_id(id),
|
||||
_startMs(0),
|
||||
_firstTimestamp(0),
|
||||
_wrapArounds(0),
|
||||
_prevTs90khz(0),
|
||||
_lambda(1),
|
||||
_firstAfterReset(true),
|
||||
_packetCount(0),
|
||||
_startUpFilterDelayInPackets(2),
|
||||
_detectorAccumulatorPos(0),
|
||||
_detectorAccumulatorNeg(0),
|
||||
_alarmThreshold(60e3),
|
||||
_accDrift(6600), // in timestamp ticks, i.e. 15 ms
|
||||
_accMaxError(7000),
|
||||
_P11(1e10)
|
||||
{
|
||||
Reset(VCMTickTime::MillisecondTimestamp());
|
||||
}
|
||||
|
||||
VCMTimestampExtrapolator::~VCMTimestampExtrapolator()
|
||||
{
|
||||
delete &_rwLock;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTimestampExtrapolator::Reset(const WebRtc_Word64 nowMs /* = -1 */)
|
||||
{
|
||||
WriteLockScoped wl(_rwLock);
|
||||
if (nowMs > -1)
|
||||
{
|
||||
_startMs = nowMs;
|
||||
}
|
||||
else
|
||||
{
|
||||
_startMs = VCMTickTime::MillisecondTimestamp();
|
||||
}
|
||||
_prevMs = _startMs;
|
||||
_firstTimestamp = 0;
|
||||
_w[0] = 90.0;
|
||||
_w[1] = 0;
|
||||
_P[0][0] = 1;
|
||||
_P[1][1] = _P11;
|
||||
_P[0][1] = _P[1][0] = 0;
|
||||
_firstAfterReset = true;
|
||||
_prevTs90khz = 0;
|
||||
_wrapArounds = 0;
|
||||
_packetCount = 0;
|
||||
_detectorAccumulatorPos = 0;
|
||||
_detectorAccumulatorNeg = 0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTimestampExtrapolator::Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace)
|
||||
{
|
||||
|
||||
_rwLock.AcquireLockExclusive();
|
||||
if (tMs - _prevMs > 10e3)
|
||||
{
|
||||
// Ten seconds without a complete frame.
|
||||
// Reset the extrapolator
|
||||
_rwLock.ReleaseLockExclusive();
|
||||
Reset();
|
||||
_rwLock.AcquireLockExclusive();
|
||||
}
|
||||
else
|
||||
{
|
||||
_prevMs = tMs;
|
||||
}
|
||||
|
||||
// Remove offset to prevent badly scaled matrices
|
||||
tMs -= _startMs;
|
||||
|
||||
WebRtc_Word32 prevWrapArounds = _wrapArounds;
|
||||
CheckForWrapArounds(ts90khz);
|
||||
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
|
||||
|
||||
if (wrapAroundsSincePrev == 0 && ts90khz < _prevTs90khz)
|
||||
{
|
||||
_rwLock.ReleaseLockExclusive();
|
||||
return;
|
||||
}
|
||||
|
||||
if (_firstAfterReset)
|
||||
{
|
||||
// Make an initial guess of the offset,
|
||||
// should be almost correct since tMs - _startMs
|
||||
// should about zero at this time.
|
||||
_w[1] = -_w[0] * tMs;
|
||||
_firstTimestamp = ts90khz;
|
||||
_firstAfterReset = false;
|
||||
}
|
||||
|
||||
// Compensate for wraparounds by changing the line offset
|
||||
_w[1] = _w[1] - wrapAroundsSincePrev * ((static_cast<WebRtc_Word64>(1)<<32) - 1);
|
||||
|
||||
double residual = (static_cast<double>(ts90khz) - _firstTimestamp) - static_cast<double>(tMs) * _w[0] - _w[1];
|
||||
if (DelayChangeDetection(residual, trace) &&
|
||||
_packetCount >= _startUpFilterDelayInPackets)
|
||||
{
|
||||
// A sudden change of average network delay has been detected.
|
||||
// Force the filter to adjust its offset parameter by changing
|
||||
// the offset uncertainty. Don't do this during startup.
|
||||
_P[1][1] = _P11;
|
||||
}
|
||||
//T = [t(k) 1]';
|
||||
//that = T'*w;
|
||||
//K = P*T/(lambda + T'*P*T);
|
||||
double K[2];
|
||||
K[0] = _P[0][0] * tMs + _P[0][1];
|
||||
K[1] = _P[1][0] * tMs + _P[1][1];
|
||||
double TPT = _lambda + tMs * K[0] + K[1];
|
||||
K[0] /= TPT;
|
||||
K[1] /= TPT;
|
||||
//w = w + K*(ts(k) - that);
|
||||
_w[0] = _w[0] + K[0] * residual;
|
||||
_w[1] = _w[1] + K[1] * residual;
|
||||
//P = 1/lambda*(P - K*T'*P);
|
||||
double p00 = 1 / _lambda * (_P[0][0] - (K[0] * tMs * _P[0][0] + K[0] * _P[1][0]));
|
||||
double p01 = 1 / _lambda * (_P[0][1] - (K[0] * tMs * _P[0][1] + K[0] * _P[1][1]));
|
||||
_P[1][0] = 1 / _lambda * (_P[1][0] - (K[1] * tMs * _P[0][0] + K[1] * _P[1][0]));
|
||||
_P[1][1] = 1 / _lambda * (_P[1][1] - (K[1] * tMs * _P[0][1] + K[1] * _P[1][1]));
|
||||
_P[0][0] = p00;
|
||||
_P[0][1] = p01;
|
||||
if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
_packetCount++;
|
||||
}
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "w[0]=%f w[1]=%f ts=%u tMs=%u", _w[0], _w[1], ts90khz, tMs);
|
||||
}
|
||||
_rwLock.ReleaseLockExclusive();
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTimestampExtrapolator::ExtrapolateTimestamp(WebRtc_Word64 tMs) const
|
||||
{
|
||||
ReadLockScoped rl(_rwLock);
|
||||
WebRtc_UWord32 timestamp = 0;
|
||||
if (_packetCount == 0)
|
||||
{
|
||||
timestamp = 0;
|
||||
}
|
||||
else if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
timestamp = static_cast<WebRtc_UWord32>(90.0 * (tMs - _prevMs) + _prevTs90khz + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
timestamp = static_cast<WebRtc_UWord32>(_w[0] * (tMs - _startMs) + _w[1] + _firstTimestamp + 0.5);
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTimestampExtrapolator::ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const
|
||||
{
|
||||
ReadLockScoped rl(_rwLock);
|
||||
WebRtc_Word64 localTimeMs = 0;
|
||||
if (_packetCount == 0)
|
||||
{
|
||||
localTimeMs = -1;
|
||||
}
|
||||
else if (_packetCount < _startUpFilterDelayInPackets)
|
||||
{
|
||||
localTimeMs = _prevMs + static_cast<WebRtc_Word64>(static_cast<double>(timestamp90khz - _prevTs90khz) / 90.0 + 0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_w[0] < 1e-3)
|
||||
{
|
||||
localTimeMs = _startMs;
|
||||
}
|
||||
else
|
||||
{
|
||||
double timestampDiff = static_cast<double>(timestamp90khz) - static_cast<double>(_firstTimestamp);
|
||||
localTimeMs = static_cast<WebRtc_Word64>(static_cast<double>(_startMs) + (timestampDiff - _w[1]) / _w[0] + 0.5);
|
||||
}
|
||||
}
|
||||
return localTimeMs;
|
||||
}
|
||||
|
||||
// Investigates if the timestamp clock has overflowed since the last timestamp and
|
||||
// keeps track of the number of wrap arounds since reset.
|
||||
void
|
||||
VCMTimestampExtrapolator::CheckForWrapArounds(WebRtc_UWord32 ts90khz)
|
||||
{
|
||||
if (_prevTs90khz == 0)
|
||||
{
|
||||
_prevTs90khz = ts90khz;
|
||||
return;
|
||||
}
|
||||
if (ts90khz < _prevTs90khz)
|
||||
{
|
||||
// This difference will probably be less than -2^31 if we have had a wrap around
|
||||
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is casted to a Word32,
|
||||
// it should be positive.
|
||||
if (static_cast<WebRtc_Word32>(ts90khz - _prevTs90khz) > 0)
|
||||
{
|
||||
// Forward wrap around
|
||||
_wrapArounds++;
|
||||
}
|
||||
}
|
||||
// This difference will probably be less than -2^31 if we have had a backward wrap around.
|
||||
// Since it is casted to a Word32, it should be positive.
|
||||
else if (static_cast<WebRtc_Word32>(_prevTs90khz - ts90khz) > 0)
|
||||
{
|
||||
// Backward wrap around
|
||||
_wrapArounds--;
|
||||
}
|
||||
_prevTs90khz = ts90khz;
|
||||
}
|
||||
|
||||
bool
|
||||
VCMTimestampExtrapolator::DelayChangeDetection(double error, bool trace)
|
||||
{
|
||||
// CUSUM detection of sudden delay changes
|
||||
error = (error > 0) ? VCM_MIN(error, _accMaxError) : VCM_MAX(error, -_accMaxError);
|
||||
_detectorAccumulatorPos = VCM_MAX(_detectorAccumulatorPos + error - _accDrift, (double)0);
|
||||
_detectorAccumulatorNeg = VCM_MIN(_detectorAccumulatorNeg + error + _accDrift, (double)0);
|
||||
if (_detectorAccumulatorPos > _alarmThreshold || _detectorAccumulatorNeg < -_alarmThreshold)
|
||||
{
|
||||
// Alarm
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=1", _detectorAccumulatorPos, _detectorAccumulatorNeg);
|
||||
}
|
||||
_detectorAccumulatorPos = _detectorAccumulatorNeg = 0;
|
||||
return true;
|
||||
}
|
||||
if (trace)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=0", _detectorAccumulatorPos, _detectorAccumulatorNeg);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
59
modules/video_coding/main/source/timestamp_extrapolator.h
Normal file
59
modules/video_coding/main/source/timestamp_extrapolator.h
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "rw_lock_wrapper.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMTimestampExtrapolator
|
||||
{
|
||||
public:
|
||||
VCMTimestampExtrapolator(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
|
||||
~VCMTimestampExtrapolator();
|
||||
void Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace = true);
|
||||
WebRtc_UWord32 ExtrapolateTimestamp(WebRtc_Word64 tMs) const;
|
||||
WebRtc_Word64 ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const;
|
||||
void Reset(WebRtc_Word64 nowMs = -1);
|
||||
|
||||
private:
|
||||
void CheckForWrapArounds(WebRtc_UWord32 ts90khz);
|
||||
bool DelayChangeDetection(double error, bool trace = true);
|
||||
RWLockWrapper& _rwLock;
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _id;
|
||||
bool _trace;
|
||||
double _w[2];
|
||||
double _P[2][2];
|
||||
WebRtc_Word64 _startMs;
|
||||
WebRtc_Word64 _prevMs;
|
||||
WebRtc_UWord32 _firstTimestamp;
|
||||
WebRtc_Word32 _wrapArounds;
|
||||
WebRtc_UWord32 _prevTs90khz;
|
||||
const double _lambda;
|
||||
bool _firstAfterReset;
|
||||
WebRtc_UWord32 _packetCount;
|
||||
const WebRtc_UWord32 _startUpFilterDelayInPackets;
|
||||
|
||||
double _detectorAccumulatorPos;
|
||||
double _detectorAccumulatorNeg;
|
||||
const double _alarmThreshold;
|
||||
const double _accDrift;
|
||||
const double _accMaxError;
|
||||
const double _P11;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
|
||||
99
modules/video_coding/main/source/timestamp_map.cc
Normal file
99
modules/video_coding/main/source/timestamp_map.cc
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "timestamp_map.h"
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Constructor. Optional parameter specifies maximum number of
|
||||
// coexisting timers.
|
||||
VCMTimestampMap::VCMTimestampMap(WebRtc_Word32 length):
|
||||
_nextAddIx(0),
|
||||
_nextPopIx(0)
|
||||
{
|
||||
if (length <= 0)
|
||||
{
|
||||
// default
|
||||
length = 10;
|
||||
}
|
||||
|
||||
_map = new VCMTimestampDataTuple[length];
|
||||
_length = length;
|
||||
}
|
||||
|
||||
// Destructor.
|
||||
VCMTimestampMap::~VCMTimestampMap()
|
||||
{
|
||||
delete [] _map;
|
||||
}
|
||||
|
||||
// Empty the list of timers.
|
||||
void
|
||||
VCMTimestampMap::Reset()
|
||||
{
|
||||
_nextAddIx = 0;
|
||||
_nextPopIx = 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMTimestampMap::Add(WebRtc_UWord32 timestamp, void* data)
|
||||
{
|
||||
_map[_nextAddIx].timestamp = timestamp;
|
||||
_map[_nextAddIx].data = data;
|
||||
_nextAddIx = (_nextAddIx + 1) % _length;
|
||||
|
||||
if (_nextAddIx == _nextPopIx)
|
||||
{
|
||||
// Circular list full; forget oldest entry
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void*
|
||||
VCMTimestampMap::Pop(WebRtc_UWord32 timestamp)
|
||||
{
|
||||
while (!IsEmpty())
|
||||
{
|
||||
if (_map[_nextPopIx].timestamp == timestamp)
|
||||
{
|
||||
// found start time for this timestamp
|
||||
void* data = _map[_nextPopIx].data;
|
||||
_map[_nextPopIx].data = NULL;
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
return data;
|
||||
}
|
||||
else if (_map[_nextPopIx].timestamp > timestamp)
|
||||
{
|
||||
// the timestamp we are looking for is not in the list
|
||||
assert(_nextPopIx < _length && _nextPopIx >= 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// not in this position, check next (and forget this position)
|
||||
_nextPopIx = (_nextPopIx + 1) % _length;
|
||||
}
|
||||
|
||||
// could not find matching timestamp in list
|
||||
assert(_nextPopIx < _length && _nextPopIx >= 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Check if no timers are currently running
|
||||
bool
|
||||
VCMTimestampMap::IsEmpty() const
|
||||
{
|
||||
return (_nextAddIx == _nextPopIx);
|
||||
}
|
||||
|
||||
}
|
||||
52
modules/video_coding/main/source/timestamp_map.h
Normal file
52
modules/video_coding/main/source/timestamp_map.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
struct VCMTimestampDataTuple
|
||||
{
|
||||
WebRtc_UWord32 timestamp;
|
||||
void* data;
|
||||
};
|
||||
|
||||
class VCMTimestampMap
|
||||
{
|
||||
public:
|
||||
// Constructor. Optional parameter specifies maximum number of
|
||||
// timestamps in map.
|
||||
VCMTimestampMap(const WebRtc_Word32 length = 10);
|
||||
|
||||
// Destructor.
|
||||
~VCMTimestampMap();
|
||||
|
||||
// Empty the map
|
||||
void Reset();
|
||||
|
||||
WebRtc_Word32 Add(WebRtc_UWord32 timestamp, void* data);
|
||||
void* Pop(WebRtc_UWord32 timestamp);
|
||||
|
||||
private:
|
||||
bool IsEmpty() const;
|
||||
|
||||
VCMTimestampDataTuple* _map;
|
||||
WebRtc_Word32 _nextAddIx;
|
||||
WebRtc_Word32 _nextPopIx;
|
||||
WebRtc_Word32 _length;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
|
||||
333
modules/video_coding/main/source/timing.cc
Normal file
333
modules/video_coding/main/source/timing.cc
Normal file
@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
#include "internal_defines.h"
|
||||
#include "jitter_buffer_common.h"
|
||||
#include "timing.h"
|
||||
#include "timestamp_extrapolator.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMTiming::VCMTiming(WebRtc_Word32 vcmId, WebRtc_Word32 timingId, VCMTiming* masterTiming)
|
||||
:
|
||||
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_vcmId(vcmId),
|
||||
_timingId(timingId),
|
||||
_master(false),
|
||||
_tsExtrapolator(),
|
||||
_codecTimer(),
|
||||
_renderDelayMs(kDefaultRenderDelayMs),
|
||||
_minTotalDelayMs(0),
|
||||
_requiredDelayMs(0),
|
||||
_currentDelayMs(0),
|
||||
_prevFrameTimestamp(0)
|
||||
{
|
||||
if (masterTiming == NULL)
|
||||
{
|
||||
_master = true;
|
||||
_tsExtrapolator = new VCMTimestampExtrapolator(vcmId, timingId);
|
||||
}
|
||||
else
|
||||
{
|
||||
_tsExtrapolator = masterTiming->_tsExtrapolator;
|
||||
}
|
||||
}
|
||||
|
||||
VCMTiming::~VCMTiming()
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
delete _tsExtrapolator;
|
||||
}
|
||||
delete &_critSect;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::Reset(WebRtc_Word64 nowMs /* = -1 */)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (nowMs > -1)
|
||||
{
|
||||
_tsExtrapolator->Reset(nowMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_tsExtrapolator->Reset();
|
||||
}
|
||||
_codecTimer.Reset();
|
||||
_renderDelayMs = kDefaultRenderDelayMs;
|
||||
_minTotalDelayMs = 0;
|
||||
_requiredDelayMs = 0;
|
||||
_currentDelayMs = 0;
|
||||
_prevFrameTimestamp = 0;
|
||||
}
|
||||
|
||||
void VCMTiming::ResetDecodeTime()
|
||||
{
|
||||
_codecTimer.Reset();
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetRenderDelay(WebRtc_UWord32 renderDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_renderDelayMs = renderDelayMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_minTotalDelayMs = minTotalDelayMs;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::SetRequiredDelay(WebRtc_UWord32 requiredDelayMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (requiredDelayMs != _requiredDelayMs)
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Desired jitter buffer level: %u ms", requiredDelayMs);
|
||||
}
|
||||
_requiredDelayMs = requiredDelayMs;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMTiming::UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
|
||||
|
||||
// Make sure we try to sync with audio
|
||||
if (targetDelayMs < _minTotalDelayMs)
|
||||
{
|
||||
targetDelayMs = _minTotalDelayMs;
|
||||
}
|
||||
|
||||
if (_currentDelayMs == 0)
|
||||
{
|
||||
// Not initialized, set current delay to target.
|
||||
_currentDelayMs = targetDelayMs;
|
||||
}
|
||||
else if (targetDelayMs != _currentDelayMs)
|
||||
{
|
||||
WebRtc_Word64 delayDiffMs = static_cast<WebRtc_Word64>(targetDelayMs) -
|
||||
_currentDelayMs;
|
||||
// Never change the delay with more than 100 ms every second. If we're changing the
|
||||
// delay in too large steps we will get noticable freezes. By limiting the change we
|
||||
// can increase the delay in smaller steps, which will be experienced as the video is
|
||||
// played in slow motion. When lowering the delay the video will be played at a faster
|
||||
// pace.
|
||||
WebRtc_Word64 maxChangeMs = 0;
|
||||
if (frameTimestamp < 0x0000ffff && _prevFrameTimestamp > 0xffff0000)
|
||||
{
|
||||
// wrap
|
||||
maxChangeMs = kDelayMaxChangeMsPerS * (frameTimestamp +
|
||||
(static_cast<WebRtc_Word64>(1)<<32) - _prevFrameTimestamp) / 90000;
|
||||
}
|
||||
else
|
||||
{
|
||||
maxChangeMs = kDelayMaxChangeMsPerS *
|
||||
(frameTimestamp - _prevFrameTimestamp) / 90000;
|
||||
}
|
||||
if (maxChangeMs <= 0)
|
||||
{
|
||||
// Any changes less than 1 ms are truncated and
|
||||
// will be postponed. Negative change will be due
|
||||
// to reordering and should be ignored.
|
||||
return;
|
||||
}
|
||||
else if (delayDiffMs < -maxChangeMs)
|
||||
{
|
||||
delayDiffMs = -maxChangeMs;
|
||||
}
|
||||
else if (delayDiffMs > maxChangeMs)
|
||||
{
|
||||
delayDiffMs = maxChangeMs;
|
||||
}
|
||||
_currentDelayMs = _currentDelayMs + static_cast<WebRtc_Word32>(delayDiffMs);
|
||||
}
|
||||
_prevFrameTimestamp = frameTimestamp;
|
||||
}
|
||||
|
||||
void VCMTiming::UpdateCurrentDelay(WebRtc_Word64 renderTimeMs,
|
||||
WebRtc_Word64 actualDecodeTimeMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
|
||||
// Make sure we try to sync with audio
|
||||
if (targetDelayMs < _minTotalDelayMs)
|
||||
{
|
||||
targetDelayMs = _minTotalDelayMs;
|
||||
}
|
||||
WebRtc_Word64 delayedMs = actualDecodeTimeMs -
|
||||
(renderTimeMs - MaxDecodeTimeMs() - _renderDelayMs);
|
||||
if (delayedMs < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
else if (_currentDelayMs + delayedMs <= targetDelayMs)
|
||||
{
|
||||
_currentDelayMs += static_cast<WebRtc_UWord32>(delayedMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentDelayMs = targetDelayMs;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMTiming::StopDecodeTimer(WebRtc_UWord32 timeStamp,
|
||||
WebRtc_Word64 startTimeMs,
|
||||
WebRtc_Word64 nowMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
const WebRtc_Word32 maxDecTime = MaxDecodeTimeMs();
|
||||
WebRtc_Word32 timeDiffMs = _codecTimer.StopTimer(startTimeMs, nowMs);
|
||||
if (timeDiffMs < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Codec timer error: %d", timeDiffMs);
|
||||
return timeDiffMs;
|
||||
}
|
||||
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Frame decoded: timeStamp=%u decTime=%d maxDecTime=%u, at %u",
|
||||
timeStamp, timeDiffMs, maxDecTime, MaskWord64ToUWord32(nowMs));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
VCMTiming::IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 nowMs)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
_tsExtrapolator->Update(nowMs, timeStamp, _master);
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTiming::RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
const WebRtc_Word64 renderTimeMs = RenderTimeMsInternal(frameTimestamp, nowMs);
|
||||
if (renderTimeMs < 0)
|
||||
{
|
||||
return renderTimeMs;
|
||||
}
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Render frame %u at %u. Render delay %u, required delay %u,"
|
||||
" max decode time %u, min total delay %u",
|
||||
frameTimestamp, MaskWord64ToUWord32(renderTimeMs), _renderDelayMs,
|
||||
_requiredDelayMs, MaxDecodeTimeMs(),_minTotalDelayMs);
|
||||
}
|
||||
return renderTimeMs;
|
||||
}
|
||||
|
||||
WebRtc_Word64
|
||||
VCMTiming::RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
WebRtc_Word64 estimatedCompleteTimeMs =
|
||||
_tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
|
||||
if (estimatedCompleteTimeMs - nowMs > kMaxVideoDelayMs)
|
||||
{
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Timestamp arrived 2 seconds early, reset statistics",
|
||||
frameTimestamp, estimatedCompleteTimeMs);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
if (_master)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"ExtrapolateLocalTime(%u)=%u ms",
|
||||
frameTimestamp, MaskWord64ToUWord32(estimatedCompleteTimeMs));
|
||||
}
|
||||
if (estimatedCompleteTimeMs == -1)
|
||||
{
|
||||
estimatedCompleteTimeMs = nowMs;
|
||||
}
|
||||
|
||||
return estimatedCompleteTimeMs + _currentDelayMs;
|
||||
}
|
||||
|
||||
// Must be called from inside a critical section
|
||||
WebRtc_Word32
|
||||
VCMTiming::MaxDecodeTimeMs(FrameType frameType /*= kVideoFrameDelta*/) const
|
||||
{
|
||||
const WebRtc_Word32 decodeTimeMs = _codecTimer.RequiredDecodeTimeMs(frameType);
|
||||
|
||||
if (decodeTimeMs < 0)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
|
||||
"Negative maximum decode time: %d", decodeTimeMs);
|
||||
return -1;
|
||||
}
|
||||
return decodeTimeMs;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
|
||||
const WebRtc_Word64 maxWaitTimeMs = renderTimeMs - nowMs -
|
||||
MaxDecodeTimeMs() - _renderDelayMs;
|
||||
|
||||
if (maxWaitTimeMs < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return static_cast<WebRtc_UWord32>(maxWaitTimeMs);
|
||||
}
|
||||
|
||||
bool
|
||||
VCMTiming::EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
WebRtc_Word32 maxDecodeTimeMs = MaxDecodeTimeMs();
|
||||
if (maxDecodeTimeMs < 0)
|
||||
{
|
||||
// Haven't decoded any frames yet, try decoding one to get an estimate
|
||||
// of the decode time.
|
||||
return true;
|
||||
}
|
||||
else if (maxDecodeTimeMs == 0)
|
||||
{
|
||||
// Decode time is less than 1, set to 1 for now since
|
||||
// we don't have any better precision. Count ticks later?
|
||||
maxDecodeTimeMs = 1;
|
||||
}
|
||||
return static_cast<WebRtc_Word32>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::TargetVideoDelay() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return TargetDelayInternal();
|
||||
}
|
||||
|
||||
WebRtc_UWord32
|
||||
VCMTiming::TargetDelayInternal() const
|
||||
{
|
||||
return _requiredDelayMs + MaxDecodeTimeMs() + _renderDelayMs;
|
||||
}
|
||||
|
||||
}
|
||||
110
modules/video_coding/main/source/timing.h
Normal file
110
modules/video_coding/main/source/timing.h
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
|
||||
#include "typedefs.h"
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "codec_timer.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
|
||||
class VCMTimestampExtrapolator;
|
||||
|
||||
class VCMTiming
|
||||
{
|
||||
public:
|
||||
// The primary timing component should be passed
|
||||
// if this is the dual timing component.
|
||||
VCMTiming(WebRtc_Word32 vcmId = 0,
|
||||
WebRtc_Word32 timingId = 0,
|
||||
VCMTiming* masterTiming = NULL);
|
||||
~VCMTiming();
|
||||
|
||||
// Resets the timing to the initial state.
|
||||
void Reset(WebRtc_Word64 nowMs = -1);
|
||||
void ResetDecodeTime();
|
||||
|
||||
// The amount of time needed to render an image. Defaults to 10 ms.
|
||||
void SetRenderDelay(WebRtc_UWord32 renderDelayMs);
|
||||
|
||||
// The minimum time the video must be delayed on the receiver to
|
||||
// get the desired jitter buffer level.
|
||||
void SetRequiredDelay(WebRtc_UWord32 requiredDelayMs);
|
||||
|
||||
// Minimum total delay required to sync video with audio.
|
||||
void SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs);
|
||||
|
||||
// Increases or decreases the current delay to get closer to the target delay.
|
||||
// Calculates how long it has been since the previous call to this function,
|
||||
// and increases/decreases the delay in proportion to the time difference.
|
||||
void UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp);
|
||||
|
||||
// Increases or decreases the current delay to get closer to the target delay.
|
||||
// Given the actual decode time in ms and the render time in ms for a frame, this
|
||||
// function calculates how late the frame is and increases the delay accordingly.
|
||||
void UpdateCurrentDelay(WebRtc_Word64 renderTimeMs, WebRtc_Word64 actualDecodeTimeMs);
|
||||
|
||||
// Stops the decoder timer, should be called when the decoder returns a frame
|
||||
// or when the decoded frame callback is called.
|
||||
WebRtc_Word32 StopDecodeTimer(WebRtc_UWord32 timeStamp,
|
||||
WebRtc_Word64 startTimeMs,
|
||||
WebRtc_Word64 nowMs);
|
||||
|
||||
// Used to report that a frame is passed to decoding. Updates the timestamp filter
|
||||
// which is used to map between timestamps and receiver system time.
|
||||
void IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 lastPacketTimeMs);
|
||||
|
||||
// Returns the receiver system time when the frame with timestamp frameTimestamp
|
||||
// should be rendered, assuming that the system time currently is nowMs.
|
||||
WebRtc_Word64 RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const;
|
||||
|
||||
// Returns the maximum time in ms that we can wait for a frame to become complete
|
||||
// before we must pass it to the decoder.
|
||||
WebRtc_UWord32 MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const;
|
||||
|
||||
// Returns the current target delay which is required delay + decode time + render
|
||||
// delay.
|
||||
WebRtc_UWord32 TargetVideoDelay() const;
|
||||
|
||||
// Calculates whether or not there is enough time to decode a frame given a
|
||||
// certain amount of processing time.
|
||||
bool EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const;
|
||||
|
||||
enum { kDefaultRenderDelayMs = 10 };
|
||||
enum { kDelayMaxChangeMsPerS = 100 };
|
||||
|
||||
protected:
|
||||
WebRtc_Word32 MaxDecodeTimeMs(FrameType frameType = kVideoFrameDelta) const;
|
||||
WebRtc_Word64 RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp,
|
||||
WebRtc_Word64 nowMs) const;
|
||||
WebRtc_UWord32 TargetDelayInternal() const;
|
||||
|
||||
private:
|
||||
CriticalSectionWrapper& _critSect;
|
||||
WebRtc_Word32 _vcmId;
|
||||
WebRtc_Word32 _timingId;
|
||||
bool _master;
|
||||
VCMTimestampExtrapolator* _tsExtrapolator;
|
||||
VCMCodecTimer _codecTimer;
|
||||
WebRtc_UWord32 _renderDelayMs;
|
||||
WebRtc_UWord32 _minTotalDelayMs;
|
||||
WebRtc_UWord32 _requiredDelayMs;
|
||||
WebRtc_UWord32 _currentDelayMs;
|
||||
WebRtc_UWord32 _prevFrameTimestamp;
|
||||
WebRtc_Word64 _startStoragePlaybackMs;
|
||||
WebRtc_Word64 _firstStoredRenderTimeMs;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
|
||||
103
modules/video_coding/main/source/video_coding.gyp
Normal file
103
modules/video_coding/main/source/video_coding.gyp
Normal file
@ -0,0 +1,103 @@
|
||||
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../../../../common_settings.gypi', # Common settings
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'webrtc_video_coding',
|
||||
'type': '<(library)',
|
||||
'dependencies': [
|
||||
'../../codecs/i420/main/source/i420.gyp:webrtc_i420',
|
||||
'../../codecs/vp8/main/source/vp8.gyp:webrtc_vp8',
|
||||
'../../../../common_video/vplib/main/source/vplib.gyp:webrtc_vplib',
|
||||
'../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
'../../codecs/interface',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../codecs/interface',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
# interfaces
|
||||
'../interface/video_coding.h',
|
||||
'../interface/video_coding_defines.h',
|
||||
|
||||
# headers
|
||||
'codec_database.h',
|
||||
'codec_timer.h',
|
||||
'content_metrics_processing.h',
|
||||
'encoded_frame.h',
|
||||
'er_tables_xor.h',
|
||||
'event.h',
|
||||
'exp_filter.h',
|
||||
'fec_tables_xor.h',
|
||||
'frame_buffer.h',
|
||||
'frame_dropper.h',
|
||||
'frame_list.h',
|
||||
'generic_decoder.h',
|
||||
'generic_encoder.h',
|
||||
'inter_frame_delay.h',
|
||||
'internal_defines.h',
|
||||
'jitter_buffer_common.h',
|
||||
'jitter_buffer.h',
|
||||
'jitter_estimator.h',
|
||||
'media_opt_util.h',
|
||||
'media_optimization.h',
|
||||
'nack_fec_tables.h',
|
||||
'packet.h',
|
||||
'qm_select_data.h',
|
||||
'qm_select.h',
|
||||
'receiver.h',
|
||||
'rtt_filter.h',
|
||||
'session_info.h',
|
||||
'tick_time.h',
|
||||
'timestamp_extrapolator.h',
|
||||
'timestamp_map.h',
|
||||
'timing.h',
|
||||
'video_coding_impl.h',
|
||||
|
||||
# sources
|
||||
'codec_database.cc',
|
||||
'codec_timer.cc',
|
||||
'content_metrics_processing.cc',
|
||||
'encoded_frame.cc',
|
||||
'exp_filter.cc',
|
||||
'frame_buffer.cc',
|
||||
'frame_dropper.cc',
|
||||
'frame_list.cc',
|
||||
'generic_decoder.cc',
|
||||
'generic_encoder.cc',
|
||||
'inter_frame_delay.cc',
|
||||
'jitter_buffer.cc',
|
||||
'jitter_estimator.cc',
|
||||
'media_opt_util.cc',
|
||||
'media_optimization.cc',
|
||||
'packet.cc',
|
||||
'qm_select.cc',
|
||||
'receiver.cc',
|
||||
'rtt_filter.cc',
|
||||
'session_info.cc',
|
||||
'timestamp_extrapolator.cc',
|
||||
'timestamp_map.cc',
|
||||
'timing.cc',
|
||||
'video_coding_impl.cc',
|
||||
], # source
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user