Lint fix for webrtc/modules/video_coding PART 1!
Trying to submit all changes at once proved impossible since there were too many changes in too many files. The changes to PRESUBMIT.py will be uploaded in the last CL. (original CL: https://codereview.webrtc.org/1528503003/) BUG=webrtc:5309 TBR=mflodman@webrtc.org Review URL: https://codereview.webrtc.org/1541803002 Cr-Commit-Position: refs/heads/master@{#11100}
This commit is contained in:
@ -219,7 +219,8 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
||||
// max is one bit per pixel
|
||||
new_send_codec.maxBitrate = (static_cast<int>(send_codec->height) *
|
||||
static_cast<int>(send_codec->width) *
|
||||
static_cast<int>(send_codec->maxFramerate)) / 1000;
|
||||
static_cast<int>(send_codec->maxFramerate)) /
|
||||
1000;
|
||||
if (send_codec->startBitrate > new_send_codec.maxBitrate) {
|
||||
// But if the user tries to set a higher start bit rate we will
|
||||
// increase the max accordingly.
|
||||
@ -282,8 +283,8 @@ VideoCodecType VCMCodecDataBase::SendCodec() const {
|
||||
return send_codec_.codecType;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DeregisterExternalEncoder(
|
||||
uint8_t payload_type, bool* was_send_codec) {
|
||||
bool VCMCodecDataBase::DeregisterExternalEncoder(uint8_t payload_type,
|
||||
bool* was_send_codec) {
|
||||
assert(was_send_codec);
|
||||
*was_send_codec = false;
|
||||
if (encoder_payload_type_ != payload_type) {
|
||||
@ -301,8 +302,7 @@ bool VCMCodecDataBase::DeregisterExternalEncoder(
|
||||
return true;
|
||||
}
|
||||
|
||||
void VCMCodecDataBase::RegisterExternalEncoder(
|
||||
VideoEncoder* external_encoder,
|
||||
void VCMCodecDataBase::RegisterExternalEncoder(VideoEncoder* external_encoder,
|
||||
uint8_t payload_type,
|
||||
bool internal_source) {
|
||||
// Since only one encoder can be used at a given time, only one external
|
||||
@ -372,8 +372,7 @@ bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
|
||||
++i) {
|
||||
if (memcmp(&new_send_codec.simulcastStream[i],
|
||||
&send_codec_.simulcastStream[i],
|
||||
sizeof(new_send_codec.simulcastStream[i])) !=
|
||||
0) {
|
||||
sizeof(new_send_codec.simulcastStream[i])) != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -429,8 +428,7 @@ bool VCMCodecDataBase::DecoderRegistered() const {
|
||||
return !dec_map_.empty();
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::RegisterReceiveCodec(
|
||||
const VideoCodec* receive_codec,
|
||||
bool VCMCodecDataBase::RegisterReceiveCodec(const VideoCodec* receive_codec,
|
||||
int number_of_cores,
|
||||
bool require_key_frame) {
|
||||
if (number_of_cores < 0) {
|
||||
@ -442,14 +440,12 @@ bool VCMCodecDataBase::RegisterReceiveCodec(
|
||||
return false;
|
||||
}
|
||||
VideoCodec* new_receive_codec = new VideoCodec(*receive_codec);
|
||||
dec_map_[receive_codec->plType] = new VCMDecoderMapItem(new_receive_codec,
|
||||
number_of_cores,
|
||||
require_key_frame);
|
||||
dec_map_[receive_codec->plType] = new VCMDecoderMapItem(
|
||||
new_receive_codec, number_of_cores, require_key_frame);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VCMCodecDataBase::DeregisterReceiveCodec(
|
||||
uint8_t payload_type) {
|
||||
bool VCMCodecDataBase::DeregisterReceiveCodec(uint8_t payload_type) {
|
||||
DecoderMap::iterator it = dec_map_.find(payload_type);
|
||||
if (it == dec_map_.end()) {
|
||||
return false;
|
||||
@ -497,9 +493,10 @@ VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
|
||||
return nullptr;
|
||||
}
|
||||
VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
|
||||
if (callback) callback->OnIncomingPayloadType(receive_codec_.plType);
|
||||
if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
|
||||
< 0) {
|
||||
if (callback)
|
||||
callback->OnIncomingPayloadType(receive_codec_.plType);
|
||||
if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
|
||||
0) {
|
||||
ReleaseDecoder(ptr_decoder_);
|
||||
ptr_decoder_ = nullptr;
|
||||
memset(&receive_codec_, 0, sizeof(VideoCodec));
|
||||
|
@ -12,78 +12,54 @@
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
namespace webrtc {
|
||||
|
||||
// The first kIgnoredSampleCount samples will be ignored.
|
||||
static const int32_t kIgnoredSampleCount = 5;
|
||||
|
||||
VCMCodecTimer::VCMCodecTimer()
|
||||
:
|
||||
_filteredMax(0),
|
||||
_ignoredSampleCount(0),
|
||||
_shortMax(0),
|
||||
_history()
|
||||
{
|
||||
: _filteredMax(0), _ignoredSampleCount(0), _shortMax(0), _history() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
void VCMCodecTimer::Reset()
|
||||
{
|
||||
void VCMCodecTimer::Reset() {
|
||||
_filteredMax = 0;
|
||||
_ignoredSampleCount = 0;
|
||||
_shortMax = 0;
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
|
||||
_history[i].shortMax = 0;
|
||||
_history[i].timeMs = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the max-value filter
|
||||
void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs)
|
||||
{
|
||||
if (_ignoredSampleCount >= kIgnoredSampleCount)
|
||||
{
|
||||
void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs) {
|
||||
if (_ignoredSampleCount >= kIgnoredSampleCount) {
|
||||
UpdateMaxHistory(decodeTime, nowMs);
|
||||
ProcessHistory(nowMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
_ignoredSampleCount++;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now)
|
||||
{
|
||||
if (_history[0].timeMs >= 0 &&
|
||||
now - _history[0].timeMs < SHORT_FILTER_MS)
|
||||
{
|
||||
if (decodeTime > _shortMax)
|
||||
{
|
||||
void VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now) {
|
||||
if (_history[0].timeMs >= 0 && now - _history[0].timeMs < SHORT_FILTER_MS) {
|
||||
if (decodeTime > _shortMax) {
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// Only add a new value to the history once a second
|
||||
if(_history[0].timeMs == -1)
|
||||
{
|
||||
if (_history[0].timeMs == -1) {
|
||||
// First, no shift
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// Shift
|
||||
for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
|
||||
{
|
||||
_history[i+1].shortMax = _history[i].shortMax;
|
||||
_history[i+1].timeMs = _history[i].timeMs;
|
||||
for (int i = (MAX_HISTORY_SIZE - 2); i >= 0; i--) {
|
||||
_history[i + 1].shortMax = _history[i].shortMax;
|
||||
_history[i + 1].timeMs = _history[i].timeMs;
|
||||
}
|
||||
}
|
||||
if (_shortMax == 0)
|
||||
{
|
||||
if (_shortMax == 0) {
|
||||
_shortMax = decodeTime;
|
||||
}
|
||||
|
||||
@ -93,27 +69,20 @@ VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
VCMCodecTimer::ProcessHistory(int64_t nowMs)
|
||||
{
|
||||
void VCMCodecTimer::ProcessHistory(int64_t nowMs) {
|
||||
_filteredMax = _shortMax;
|
||||
if (_history[0].timeMs == -1)
|
||||
{
|
||||
if (_history[0].timeMs == -1) {
|
||||
return;
|
||||
}
|
||||
for (int i=0; i < MAX_HISTORY_SIZE; i++)
|
||||
{
|
||||
if (_history[i].timeMs == -1)
|
||||
{
|
||||
for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
|
||||
if (_history[i].timeMs == -1) {
|
||||
break;
|
||||
}
|
||||
if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
|
||||
{
|
||||
if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS) {
|
||||
// This sample (and all samples after this) is too old
|
||||
break;
|
||||
}
|
||||
if (_history[i].shortMax > _filteredMax)
|
||||
{
|
||||
if (_history[i].shortMax > _filteredMax) {
|
||||
// This sample is the largest one this far into the history
|
||||
_filteredMax = _history[i].shortMax;
|
||||
}
|
||||
@ -121,9 +90,7 @@ VCMCodecTimer::ProcessHistory(int64_t nowMs)
|
||||
}
|
||||
|
||||
// Get the maximum observed time within a time window
|
||||
int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
|
||||
{
|
||||
int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const {
|
||||
return _filteredMax;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
@ -14,25 +14,22 @@
|
||||
#include "webrtc/modules/include/module_common_types.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
namespace webrtc {
|
||||
|
||||
// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
|
||||
#define MAX_HISTORY_SIZE 10
|
||||
#define SHORT_FILTER_MS 1000
|
||||
|
||||
class VCMShortMaxSample
|
||||
{
|
||||
public:
|
||||
VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
|
||||
class VCMShortMaxSample {
|
||||
public:
|
||||
VCMShortMaxSample() : shortMax(0), timeMs(-1) {}
|
||||
|
||||
int32_t shortMax;
|
||||
int64_t timeMs;
|
||||
};
|
||||
|
||||
class VCMCodecTimer
|
||||
{
|
||||
public:
|
||||
class VCMCodecTimer {
|
||||
public:
|
||||
VCMCodecTimer();
|
||||
|
||||
// Updates the max filtered decode time.
|
||||
@ -44,7 +41,7 @@ public:
|
||||
// Get the required decode time in ms.
|
||||
int32_t RequiredDecodeTimeMs(FrameType frameType) const;
|
||||
|
||||
private:
|
||||
private:
|
||||
void UpdateMaxHistory(int32_t decodeTime, int64_t now);
|
||||
void ProcessHistory(int64_t nowMs);
|
||||
|
||||
@ -53,7 +50,6 @@ private:
|
||||
int32_t _ignoredSampleCount;
|
||||
int32_t _shortMax;
|
||||
VCMShortMaxSample _history[MAX_HISTORY_SIZE];
|
||||
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -106,8 +106,7 @@ namespace webrtc {
|
||||
H264VideoToolboxDecoder::H264VideoToolboxDecoder()
|
||||
: callback_(nullptr),
|
||||
video_format_(nullptr),
|
||||
decompression_session_(nullptr) {
|
||||
}
|
||||
decompression_session_(nullptr) {}
|
||||
|
||||
H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
|
||||
DestroyDecompressionSession();
|
||||
@ -129,8 +128,7 @@ int H264VideoToolboxDecoder::Decode(
|
||||
|
||||
CMSampleBufferRef sample_buffer = nullptr;
|
||||
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
|
||||
input_image._length,
|
||||
video_format_,
|
||||
input_image._length, video_format_,
|
||||
&sample_buffer)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
@ -206,11 +204,8 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
|
||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
CFNumberRef pixel_format =
|
||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
||||
CFTypeRef values[attributes_size] = {
|
||||
kCFBooleanTrue,
|
||||
io_surface_value,
|
||||
pixel_format
|
||||
};
|
||||
CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
|
||||
pixel_format};
|
||||
CFDictionaryRef attributes =
|
||||
internal::CreateCFDictionary(keys, values, attributes_size);
|
||||
if (io_surface_value) {
|
||||
|
@ -99,11 +99,7 @@ struct FrameEncodeParams {
|
||||
int32_t h,
|
||||
int64_t rtms,
|
||||
uint32_t ts)
|
||||
: callback(cb),
|
||||
width(w),
|
||||
height(h),
|
||||
render_time_ms(rtms),
|
||||
timestamp(ts) {
|
||||
: callback(cb), width(w), height(h), render_time_ms(rtms), timestamp(ts) {
|
||||
if (csi) {
|
||||
codec_specific_info = *csi;
|
||||
} else {
|
||||
@ -146,9 +142,8 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
|
||||
int ret = libyuv::I420ToNV12(
|
||||
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
|
||||
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
|
||||
frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
|
||||
dst_y, dst_stride_y, dst_uv, dst_stride_uv,
|
||||
frame.width(), frame.height());
|
||||
frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
|
||||
dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
|
||||
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
|
||||
if (ret) {
|
||||
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
@ -188,10 +183,8 @@ void VTCompressionOutputCallback(void* encoder,
|
||||
// TODO(tkchin): Allocate buffers through a pool.
|
||||
rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
|
||||
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
|
||||
if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
|
||||
is_keyframe,
|
||||
buffer.get(),
|
||||
header.accept())) {
|
||||
if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe,
|
||||
buffer.get(), header.accept())) {
|
||||
return;
|
||||
}
|
||||
webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
|
||||
@ -215,8 +208,7 @@ void VTCompressionOutputCallback(void* encoder,
|
||||
namespace webrtc {
|
||||
|
||||
H264VideoToolboxEncoder::H264VideoToolboxEncoder()
|
||||
: callback_(nullptr), compression_session_(nullptr) {
|
||||
}
|
||||
: callback_(nullptr), compression_session_(nullptr) {}
|
||||
|
||||
H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
|
||||
DestroyCompressionSession();
|
||||
@ -289,8 +281,8 @@ int H264VideoToolboxEncoder::Encode(
|
||||
CMTimeMake(input_image.render_time_ms(), 1000);
|
||||
CFDictionaryRef frame_properties = nullptr;
|
||||
if (is_keyframe_required) {
|
||||
CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
|
||||
CFTypeRef values[] = { kCFBooleanTrue };
|
||||
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
|
||||
CFTypeRef values[] = {kCFBooleanTrue};
|
||||
frame_properties = internal::CreateCFDictionary(keys, values, 1);
|
||||
}
|
||||
rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
|
||||
@ -359,11 +351,8 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
|
||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
CFNumberRef pixel_format =
|
||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
||||
CFTypeRef values[attributes_size] = {
|
||||
kCFBooleanTrue,
|
||||
io_surface_value,
|
||||
pixel_format
|
||||
};
|
||||
CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
|
||||
pixel_format};
|
||||
CFDictionaryRef source_attributes =
|
||||
internal::CreateCFDictionary(keys, values, attributes_size);
|
||||
if (io_surface_value) {
|
||||
@ -376,15 +365,11 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
|
||||
}
|
||||
OSStatus status = VTCompressionSessionCreate(
|
||||
nullptr, // use default allocator
|
||||
width_,
|
||||
height_,
|
||||
kCMVideoCodecType_H264,
|
||||
width_, height_, kCMVideoCodecType_H264,
|
||||
nullptr, // use default encoder
|
||||
source_attributes,
|
||||
nullptr, // use default compressed data allocator
|
||||
internal::VTCompressionOutputCallback,
|
||||
this,
|
||||
&compression_session_);
|
||||
internal::VTCompressionOutputCallback, this, &compression_session_);
|
||||
if (source_attributes) {
|
||||
CFRelease(source_attributes);
|
||||
source_attributes = nullptr;
|
||||
|
@ -154,8 +154,7 @@ bool H264CMSampleBufferToAnnexBBuffer(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool H264AnnexBBufferToCMSampleBuffer(
|
||||
const uint8_t* annexb_buffer,
|
||||
bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
|
||||
size_t annexb_buffer_size,
|
||||
CMVideoFormatDescriptionRef video_format,
|
||||
CMSampleBufferRef* out_sample_buffer) {
|
||||
|
@ -9,8 +9,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
|
||||
|
||||
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
|
||||
|
||||
@ -39,8 +39,7 @@ bool H264CMSampleBufferToAnnexBBuffer(
|
||||
// If |is_keyframe| is true then |video_format| is ignored since the format will
|
||||
// be read from the buffer. Otherwise |video_format| must be provided.
|
||||
// Caller is responsible for releasing the created sample buffer.
|
||||
bool H264AnnexBBufferToCMSampleBuffer(
|
||||
const uint8_t* annexb_buffer,
|
||||
bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
|
||||
size_t annexb_buffer_size,
|
||||
CMVideoFormatDescriptionRef video_format,
|
||||
CMSampleBufferRef* out_sample_buffer);
|
||||
@ -97,4 +96,4 @@ class AvccBufferWriter final {
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
|
||||
|
@ -21,20 +21,19 @@ const size_t kI420HeaderSize = 4;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
I420Encoder::I420Encoder() : _inited(false), _encodedImage(),
|
||||
_encodedCompleteCallback(NULL) {
|
||||
}
|
||||
I420Encoder::I420Encoder()
|
||||
: _inited(false), _encodedImage(), _encodedCompleteCallback(NULL) {}
|
||||
|
||||
I420Encoder::~I420Encoder() {
|
||||
_inited = false;
|
||||
delete [] _encodedImage._buffer;
|
||||
delete[] _encodedImage._buffer;
|
||||
}
|
||||
|
||||
int I420Encoder::Release() {
|
||||
// Should allocate an encoded frame and then release it here, for that we
|
||||
// actually need an init flag.
|
||||
if (_encodedImage._buffer != NULL) {
|
||||
delete [] _encodedImage._buffer;
|
||||
delete[] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
}
|
||||
_inited = false;
|
||||
@ -53,7 +52,7 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
|
||||
|
||||
// Allocating encoded memory.
|
||||
if (_encodedImage._buffer != NULL) {
|
||||
delete [] _encodedImage._buffer;
|
||||
delete[] _encodedImage._buffer;
|
||||
_encodedImage._buffer = NULL;
|
||||
_encodedImage._size = 0;
|
||||
}
|
||||
@ -101,18 +100,18 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
|
||||
kI420HeaderSize;
|
||||
if (_encodedImage._size > req_length) {
|
||||
// Reallocate buffer.
|
||||
delete [] _encodedImage._buffer;
|
||||
delete[] _encodedImage._buffer;
|
||||
|
||||
_encodedImage._buffer = new uint8_t[req_length];
|
||||
_encodedImage._size = req_length;
|
||||
}
|
||||
|
||||
uint8_t *buffer = _encodedImage._buffer;
|
||||
uint8_t* buffer = _encodedImage._buffer;
|
||||
|
||||
buffer = InsertHeader(buffer, width, height);
|
||||
|
||||
int ret_length = ExtractBuffer(inputImage, req_length - kI420HeaderSize,
|
||||
buffer);
|
||||
int ret_length =
|
||||
ExtractBuffer(inputImage, req_length - kI420HeaderSize, buffer);
|
||||
if (ret_length < 0)
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
_encodedImage._length = ret_length + kI420HeaderSize;
|
||||
@ -121,7 +120,8 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
|
||||
uint8_t* I420Encoder::InsertHeader(uint8_t* buffer,
|
||||
uint16_t width,
|
||||
uint16_t height) {
|
||||
*buffer++ = static_cast<uint8_t>(width >> 8);
|
||||
*buffer++ = static_cast<uint8_t>(width & 0xFF);
|
||||
@ -130,29 +130,28 @@ uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
|
||||
return buffer;
|
||||
}
|
||||
|
||||
int
|
||||
I420Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback) {
|
||||
int I420Encoder::RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) {
|
||||
_encodedCompleteCallback = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
I420Decoder::I420Decoder() : _decodedImage(), _width(0), _height(0),
|
||||
_inited(false), _decodeCompleteCallback(NULL) {
|
||||
}
|
||||
I420Decoder::I420Decoder()
|
||||
: _decodedImage(),
|
||||
_width(0),
|
||||
_height(0),
|
||||
_inited(false),
|
||||
_decodeCompleteCallback(NULL) {}
|
||||
|
||||
I420Decoder::~I420Decoder() {
|
||||
Release();
|
||||
}
|
||||
|
||||
int
|
||||
I420Decoder::Reset() {
|
||||
int I420Decoder::Reset() {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
I420Decoder::InitDecode(const VideoCodec* codecSettings,
|
||||
int I420Decoder::InitDecode(const VideoCodec* codecSettings,
|
||||
int /*numberOfCores */) {
|
||||
if (codecSettings == NULL) {
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
@ -165,7 +164,8 @@ I420Decoder::InitDecode(const VideoCodec* codecSettings,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
|
||||
int I420Decoder::Decode(const EncodedImage& inputImage,
|
||||
bool /*missingFrames*/,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
int64_t /*renderTimeMs*/) {
|
||||
@ -203,8 +203,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
|
||||
}
|
||||
// Set decoded image parameters.
|
||||
int half_width = (_width + 1) / 2;
|
||||
_decodedImage.CreateEmptyFrame(_width, _height,
|
||||
_width, half_width, half_width);
|
||||
_decodedImage.CreateEmptyFrame(_width, _height, _width, half_width,
|
||||
half_width);
|
||||
// Converting from buffer to plane representation.
|
||||
int ret = ConvertToI420(kI420, buffer, 0, 0, _width, _height, 0,
|
||||
kVideoRotation_0, &_decodedImage);
|
||||
@ -218,7 +218,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
|
||||
}
|
||||
|
||||
const uint8_t* I420Decoder::ExtractHeader(const uint8_t* buffer,
|
||||
uint16_t* width, uint16_t* height) {
|
||||
uint16_t* width,
|
||||
uint16_t* height) {
|
||||
*width = static_cast<uint16_t>(*buffer++) << 8;
|
||||
*width |= *buffer++;
|
||||
*height = static_cast<uint16_t>(*buffer++) << 8;
|
||||
|
@ -8,8 +8,8 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
@ -24,45 +24,45 @@ class I420Encoder : public VideoEncoder {
|
||||
|
||||
virtual ~I420Encoder();
|
||||
|
||||
// Initialize the encoder with the information from the VideoCodec.
|
||||
//
|
||||
// Input:
|
||||
// - codecSettings : Codec settings.
|
||||
// - numberOfCores : Number of cores available for the encoder.
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
// Initialize the encoder with the information from the VideoCodec.
|
||||
//
|
||||
// Input:
|
||||
// - codecSettings : Codec settings.
|
||||
// - numberOfCores : Number of cores available for the encoder.
|
||||
// - maxPayloadSize : The maximum size each payload is allowed
|
||||
// to have. Usually MTU - overhead.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
int InitEncode(const VideoCodec* codecSettings,
|
||||
int /*numberOfCores*/,
|
||||
size_t /*maxPayloadSize*/) override;
|
||||
|
||||
// "Encode" an I420 image (as a part of a video stream). The encoded image
|
||||
// will be returned to the user via the encode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Image to be encoded.
|
||||
// - codecSpecificInfo : Pointer to codec specific data.
|
||||
// - frameType : Frame type to be sent (Key /Delta).
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
// "Encode" an I420 image (as a part of a video stream). The encoded image
|
||||
// will be returned to the user via the encode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Image to be encoded.
|
||||
// - codecSpecificInfo : Pointer to codec specific data.
|
||||
// - frameType : Frame type to be sent (Key /Delta).
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
int Encode(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
const std::vector<FrameType>* /*frame_types*/) override;
|
||||
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles encoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles encoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
|
||||
// Free encoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
// Free encoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
int Release() override;
|
||||
|
||||
int SetRates(uint32_t /*newBitRate*/, uint32_t /*frameRate*/) override {
|
||||
@ -76,7 +76,8 @@ class I420Encoder : public VideoEncoder {
|
||||
void OnDroppedFrame() override {}
|
||||
|
||||
private:
|
||||
static uint8_t* InsertHeader(uint8_t* buffer, uint16_t width,
|
||||
static uint8_t* InsertHeader(uint8_t* buffer,
|
||||
uint16_t width,
|
||||
uint16_t height);
|
||||
|
||||
bool _inited;
|
||||
@ -90,50 +91,50 @@ class I420Decoder : public VideoDecoder {
|
||||
|
||||
virtual ~I420Decoder();
|
||||
|
||||
// Initialize the decoder.
|
||||
// The user must notify the codec of width and height values.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Errors
|
||||
// Initialize the decoder.
|
||||
// The user must notify the codec of width and height values.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Errors
|
||||
int InitDecode(const VideoCodec* codecSettings,
|
||||
int /*numberOfCores*/) override;
|
||||
|
||||
// Decode encoded image (as a part of a video stream). The decoded image
|
||||
// will be returned to the user through the decode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Encoded image to be decoded
|
||||
// - missingFrames : True if one or more frames have been lost
|
||||
// since the previous decode call.
|
||||
// - codecSpecificInfo : pointer to specific codec data
|
||||
// - renderTimeMs : Render time in Ms
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
// Decode encoded image (as a part of a video stream). The decoded image
|
||||
// will be returned to the user through the decode complete callback.
|
||||
//
|
||||
// Input:
|
||||
// - inputImage : Encoded image to be decoded
|
||||
// - missingFrames : True if one or more frames have been lost
|
||||
// since the previous decode call.
|
||||
// - codecSpecificInfo : pointer to specific codec data
|
||||
// - renderTimeMs : Render time in Ms
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
|
||||
// <0 - Error
|
||||
int Decode(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
int64_t /*renderTimeMs*/) override;
|
||||
|
||||
// Register a decode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles decoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
// Register a decode complete callback object.
|
||||
//
|
||||
// Input:
|
||||
// - callback : Callback object which handles decoded images.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
|
||||
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
|
||||
|
||||
// Free decoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
// Free decoder memory.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
|
||||
// <0 - Error
|
||||
int Release() override;
|
||||
|
||||
// Reset decoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Error
|
||||
// Reset decoder state and prepare for a new call.
|
||||
//
|
||||
// Return value : WEBRTC_VIDEO_CODEC_OK.
|
||||
// <0 - Error
|
||||
int Reset() override;
|
||||
|
||||
private:
|
||||
@ -150,4 +151,4 @@ class I420Decoder : public VideoDecoder {
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
|
||||
|
@ -8,12 +8,13 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; use video_coding/include")
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
|
||||
"use video_coding/include")
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||
@ -23,15 +24,17 @@ namespace webrtc {
|
||||
|
||||
class MockEncodedImageCallback : public EncodedImageCallback {
|
||||
public:
|
||||
MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
|
||||
MOCK_METHOD3(Encoded,
|
||||
int32_t(const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentation));
|
||||
};
|
||||
|
||||
class MockVideoEncoder : public VideoEncoder {
|
||||
public:
|
||||
MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
|
||||
MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
|
||||
MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
|
||||
MOCK_METHOD3(InitEncode,
|
||||
int32_t(const VideoCodec* codecSettings,
|
||||
int32_t numberOfCores,
|
||||
size_t maxPayloadSize));
|
||||
MOCK_METHOD3(Encode,
|
||||
@ -49,20 +52,20 @@ class MockVideoEncoder : public VideoEncoder {
|
||||
|
||||
class MockDecodedImageCallback : public DecodedImageCallback {
|
||||
public:
|
||||
MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));
|
||||
MOCK_METHOD2(Decoded, int32_t(VideoFrame& decodedImage,
|
||||
int64_t decode_time_ms));
|
||||
MOCK_METHOD1(Decoded, int32_t(const VideoFrame& decodedImage));
|
||||
MOCK_METHOD2(Decoded,
|
||||
int32_t(const VideoFrame& decodedImage, int64_t decode_time_ms));
|
||||
MOCK_METHOD1(ReceivedDecodedReferenceFrame,
|
||||
int32_t(const uint64_t pictureId));
|
||||
MOCK_METHOD1(ReceivedDecodedFrame,
|
||||
int32_t(const uint64_t pictureId));
|
||||
MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
|
||||
};
|
||||
|
||||
class MockVideoDecoder : public VideoDecoder {
|
||||
public:
|
||||
MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings,
|
||||
int32_t numberOfCores));
|
||||
MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage,
|
||||
MOCK_METHOD2(InitDecode,
|
||||
int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
|
||||
MOCK_METHOD5(Decode,
|
||||
int32_t(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
@ -76,4 +79,4 @@ class MockVideoDecoder : public VideoDecoder {
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
|
||||
|
@ -8,11 +8,11 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
|
||||
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; use video_coding/include")
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
|
||||
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
|
||||
"use video_coding/include")
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
@ -23,8 +23,7 @@
|
||||
#include "webrtc/video_encoder.h"
|
||||
#include "webrtc/video_frame.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
namespace webrtc {
|
||||
|
||||
class RTPFragmentationHeader; // forward declaration
|
||||
|
||||
@ -92,12 +91,11 @@ union CodecSpecificInfoUnion {
|
||||
// Note: if any pointers are added to this struct or its sub-structs, it
|
||||
// must be fitted with a copy-constructor. This is because it is copied
|
||||
// in the copy-constructor of VCMEncodedFrame.
|
||||
struct CodecSpecificInfo
|
||||
{
|
||||
struct CodecSpecificInfo {
|
||||
VideoCodecType codecType;
|
||||
CodecSpecificInfoUnion codecSpecific;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
|
||||
|
@ -8,10 +8,11 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
|
||||
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; use video_coding/include")
|
||||
#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
|
||||
"use video_coding/include")
|
||||
|
||||
// NOTE: in sync with video_coding_module_defines.h
|
||||
|
||||
@ -31,4 +32,4 @@
|
||||
#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
|
||||
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
|
||||
|
@ -91,9 +91,9 @@ inline double PacketManipulatorImpl::RandomUniform() {
|
||||
// get the same behavior as long as we're using a fixed initial seed.
|
||||
critsect_->Enter();
|
||||
srand(random_seed_);
|
||||
random_seed_ = rand();
|
||||
random_seed_ = rand(); // NOLINT (rand_r instead of rand)
|
||||
critsect_->Leave();
|
||||
return (random_seed_ + 1.0)/(RAND_MAX + 1.0);
|
||||
return (random_seed_ + 1.0) / (RAND_MAX + 1.0);
|
||||
}
|
||||
|
||||
const char* PacketLossModeToStr(PacketLossMode e) {
|
||||
@ -109,4 +109,4 @@ const char* PacketLossModeToStr(PacketLossMode e) {
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtcc
|
||||
} // namespace webrtc
|
||||
|
@ -36,10 +36,11 @@ const char* PacketLossModeToStr(PacketLossMode e);
|
||||
// scenarios caused by network interference.
|
||||
struct NetworkingConfig {
|
||||
NetworkingConfig()
|
||||
: packet_size_in_bytes(1500), max_payload_size_in_bytes(1440),
|
||||
packet_loss_mode(kUniform), packet_loss_probability(0.0),
|
||||
packet_loss_burst_length(1) {
|
||||
}
|
||||
: packet_size_in_bytes(1500),
|
||||
max_payload_size_in_bytes(1440),
|
||||
packet_loss_mode(kUniform),
|
||||
packet_loss_probability(0.0),
|
||||
packet_loss_burst_length(1) {}
|
||||
|
||||
// Packet size in bytes. Default: 1500 bytes.
|
||||
size_t packet_size_in_bytes;
|
||||
@ -93,9 +94,11 @@ class PacketManipulatorImpl : public PacketManipulator {
|
||||
virtual ~PacketManipulatorImpl();
|
||||
int ManipulatePackets(webrtc::EncodedImage* encoded_image) override;
|
||||
virtual void InitializeRandomSeed(unsigned int seed);
|
||||
|
||||
protected:
|
||||
// Returns a uniformly distributed random value between 0.0 and 1.0
|
||||
virtual double RandomUniform();
|
||||
|
||||
private:
|
||||
PacketReader* packet_reader_;
|
||||
const NetworkingConfig& config_;
|
||||
|
@ -25,7 +25,7 @@ const double kNeverDropProbability = 0.0;
|
||||
const double kAlwaysDropProbability = 1.0;
|
||||
const int kBurstLength = 1;
|
||||
|
||||
class PacketManipulatorTest: public PacketRelatedTest {
|
||||
class PacketManipulatorTest : public PacketRelatedTest {
|
||||
protected:
|
||||
PacketReader packet_reader_;
|
||||
EncodedImage image_;
|
||||
@ -50,19 +50,15 @@ class PacketManipulatorTest: public PacketRelatedTest {
|
||||
|
||||
virtual ~PacketManipulatorTest() {}
|
||||
|
||||
void SetUp() {
|
||||
PacketRelatedTest::SetUp();
|
||||
}
|
||||
void SetUp() { PacketRelatedTest::SetUp(); }
|
||||
|
||||
void TearDown() {
|
||||
PacketRelatedTest::TearDown();
|
||||
}
|
||||
void TearDown() { PacketRelatedTest::TearDown(); }
|
||||
|
||||
void VerifyPacketLoss(int expected_nbr_packets_dropped,
|
||||
int actual_nbr_packets_dropped,
|
||||
size_t expected_packet_data_length,
|
||||
uint8_t* expected_packet_data,
|
||||
EncodedImage& actual_image) {
|
||||
const EncodedImage& actual_image) {
|
||||
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
|
||||
EXPECT_EQ(expected_packet_data_length, image_._length);
|
||||
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
|
||||
@ -77,8 +73,8 @@ TEST_F(PacketManipulatorTest, Constructor) {
|
||||
TEST_F(PacketManipulatorTest, DropNone) {
|
||||
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
|
||||
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
|
||||
VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength,
|
||||
packet_data_, image_);
|
||||
VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength, packet_data_,
|
||||
image_);
|
||||
}
|
||||
|
||||
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
|
||||
@ -87,15 +83,14 @@ TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
|
||||
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
|
||||
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
|
||||
|
||||
VerifyPacketLoss(0, nbr_packets_dropped, data_length,
|
||||
packet_data_, image_);
|
||||
VerifyPacketLoss(0, nbr_packets_dropped, data_length, packet_data_, image_);
|
||||
}
|
||||
|
||||
TEST_F(PacketManipulatorTest, UniformDropAll) {
|
||||
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
|
||||
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
|
||||
VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped,
|
||||
0, packet_data_, image_);
|
||||
VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped, 0,
|
||||
packet_data_, image_);
|
||||
}
|
||||
|
||||
// Use our customized test class to make the second packet being lost
|
||||
|
@ -19,13 +19,11 @@ namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
PredictivePacketManipulator::PredictivePacketManipulator(
|
||||
PacketReader* packet_reader, const NetworkingConfig& config)
|
||||
: PacketManipulatorImpl(packet_reader, config, false) {
|
||||
}
|
||||
|
||||
PredictivePacketManipulator::~PredictivePacketManipulator() {
|
||||
}
|
||||
PacketReader* packet_reader,
|
||||
const NetworkingConfig& config)
|
||||
: PacketManipulatorImpl(packet_reader, config, false) {}
|
||||
|
||||
PredictivePacketManipulator::~PredictivePacketManipulator() {}
|
||||
|
||||
void PredictivePacketManipulator::AddRandomResult(double result) {
|
||||
assert(result >= 0.0 && result <= 1.0);
|
||||
@ -33,8 +31,9 @@ void PredictivePacketManipulator::AddRandomResult(double result) {
|
||||
}
|
||||
|
||||
double PredictivePacketManipulator::RandomUniform() {
|
||||
if(random_results_.size() == 0u) {
|
||||
fprintf(stderr, "No more stored results, please make sure AddRandomResult()"
|
||||
if (random_results_.size() == 0u) {
|
||||
fprintf(stderr,
|
||||
"No more stored results, please make sure AddRandomResult()"
|
||||
"is called same amount of times you're going to invoke the "
|
||||
"RandomUniform() function, i.e. once per packet.\n");
|
||||
assert(false);
|
||||
@ -45,4 +44,4 @@ double PredictivePacketManipulator::RandomUniform() {
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtcc
|
||||
} // namespace webrtc
|
||||
|
@ -31,6 +31,7 @@ class PredictivePacketManipulator : public PacketManipulatorImpl {
|
||||
// FIFO queue so they will be returned in the same order they were added.
|
||||
// Result parameter must be 0.0 to 1.0.
|
||||
void AddRandomResult(double result);
|
||||
|
||||
protected:
|
||||
// Returns a uniformly distributed random value between 0.0 and 1.0
|
||||
double RandomUniform() override;
|
||||
|
@ -78,8 +78,7 @@ void Stats::PrintSummary() {
|
||||
size_t nbr_keyframes = 0;
|
||||
size_t nbr_nonkeyframes = 0;
|
||||
|
||||
for (FrameStatisticsIterator it = stats_.begin();
|
||||
it != stats_.end(); ++it) {
|
||||
for (FrameStatisticsIterator it = stats_.begin(); it != stats_.end(); ++it) {
|
||||
total_encoding_time_in_us += it->encode_time_in_us;
|
||||
total_decoding_time_in_us += it->decode_time_in_us;
|
||||
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
|
||||
@ -96,15 +95,13 @@ void Stats::PrintSummary() {
|
||||
|
||||
// ENCODING
|
||||
printf("Encoding time:\n");
|
||||
frame = std::min_element(stats_.begin(),
|
||||
stats_.end(), LessForEncodeTime);
|
||||
printf(" Min : %7d us (frame %d)\n",
|
||||
frame->encode_time_in_us, frame->frame_number);
|
||||
frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodeTime);
|
||||
printf(" Min : %7d us (frame %d)\n", frame->encode_time_in_us,
|
||||
frame->frame_number);
|
||||
|
||||
frame = std::max_element(stats_.begin(),
|
||||
stats_.end(), LessForEncodeTime);
|
||||
printf(" Max : %7d us (frame %d)\n",
|
||||
frame->encode_time_in_us, frame->frame_number);
|
||||
frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodeTime);
|
||||
printf(" Max : %7d us (frame %d)\n", frame->encode_time_in_us,
|
||||
frame->frame_number);
|
||||
|
||||
printf(" Average : %7d us\n",
|
||||
static_cast<int>(total_encoding_time_in_us / stats_.size()));
|
||||
@ -123,15 +120,15 @@ void Stats::PrintSummary() {
|
||||
if (decoded_frames.size() == 0) {
|
||||
printf("No successfully decoded frames exist in this statistics.\n");
|
||||
} else {
|
||||
frame = std::min_element(decoded_frames.begin(),
|
||||
decoded_frames.end(), LessForDecodeTime);
|
||||
printf(" Min : %7d us (frame %d)\n",
|
||||
frame->decode_time_in_us, frame->frame_number);
|
||||
frame = std::min_element(decoded_frames.begin(), decoded_frames.end(),
|
||||
LessForDecodeTime);
|
||||
printf(" Min : %7d us (frame %d)\n", frame->decode_time_in_us,
|
||||
frame->frame_number);
|
||||
|
||||
frame = std::max_element(decoded_frames.begin(),
|
||||
decoded_frames.end(), LessForDecodeTime);
|
||||
printf(" Max : %7d us (frame %d)\n",
|
||||
frame->decode_time_in_us, frame->frame_number);
|
||||
frame = std::max_element(decoded_frames.begin(), decoded_frames.end(),
|
||||
LessForDecodeTime);
|
||||
printf(" Max : %7d us (frame %d)\n", frame->decode_time_in_us,
|
||||
frame->frame_number);
|
||||
|
||||
printf(" Average : %7d us\n",
|
||||
static_cast<int>(total_decoding_time_in_us / decoded_frames.size()));
|
||||
@ -141,13 +138,11 @@ void Stats::PrintSummary() {
|
||||
|
||||
// SIZE
|
||||
printf("Frame sizes:\n");
|
||||
frame = std::min_element(stats_.begin(),
|
||||
stats_.end(), LessForEncodedSize);
|
||||
frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodedSize);
|
||||
printf(" Min : %7" PRIuS " bytes (frame %d)\n",
|
||||
frame->encoded_frame_length_in_bytes, frame->frame_number);
|
||||
|
||||
frame = std::max_element(stats_.begin(),
|
||||
stats_.end(), LessForEncodedSize);
|
||||
frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodedSize);
|
||||
printf(" Max : %7" PRIuS " bytes (frame %d)\n",
|
||||
frame->encoded_frame_length_in_bytes, frame->frame_number);
|
||||
|
||||
@ -167,21 +162,17 @@ void Stats::PrintSummary() {
|
||||
|
||||
// BIT RATE
|
||||
printf("Bit rates:\n");
|
||||
frame = std::min_element(stats_.begin(),
|
||||
stats_.end(), LessForBitRate);
|
||||
printf(" Min bit rate: %7d kbps (frame %d)\n",
|
||||
frame->bit_rate_in_kbps, frame->frame_number);
|
||||
frame = std::min_element(stats_.begin(), stats_.end(), LessForBitRate);
|
||||
printf(" Min bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
|
||||
frame->frame_number);
|
||||
|
||||
frame = std::max_element(stats_.begin(),
|
||||
stats_.end(), LessForBitRate);
|
||||
printf(" Max bit rate: %7d kbps (frame %d)\n",
|
||||
frame->bit_rate_in_kbps, frame->frame_number);
|
||||
frame = std::max_element(stats_.begin(), stats_.end(), LessForBitRate);
|
||||
printf(" Max bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
|
||||
frame->frame_number);
|
||||
|
||||
printf("\n");
|
||||
printf("Total encoding time : %7d ms.\n",
|
||||
total_encoding_time_in_us / 1000);
|
||||
printf("Total decoding time : %7d ms.\n",
|
||||
total_decoding_time_in_us / 1000);
|
||||
printf("Total encoding time : %7d ms.\n", total_encoding_time_in_us / 1000);
|
||||
printf("Total decoding time : %7d ms.\n", total_decoding_time_in_us / 1000);
|
||||
printf("Total processing time: %7d ms.\n",
|
||||
(total_encoding_time_in_us + total_decoding_time_in_us) / 1000);
|
||||
}
|
||||
|
@ -16,21 +16,15 @@
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
class StatsTest: public testing::Test {
|
||||
class StatsTest : public testing::Test {
|
||||
protected:
|
||||
StatsTest() {
|
||||
}
|
||||
StatsTest() {}
|
||||
|
||||
virtual ~StatsTest() {
|
||||
}
|
||||
virtual ~StatsTest() {}
|
||||
|
||||
void SetUp() {
|
||||
stats_ = new Stats();
|
||||
}
|
||||
void SetUp() { stats_ = new Stats(); }
|
||||
|
||||
void TearDown() {
|
||||
delete stats_;
|
||||
}
|
||||
void TearDown() { delete stats_; }
|
||||
|
||||
Stats* stats_;
|
||||
};
|
||||
|
@ -93,14 +93,18 @@ bool VideoProcessorImpl::Init() {
|
||||
int32_t register_result =
|
||||
encoder_->RegisterEncodeCompleteCallback(encode_callback_);
|
||||
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
|
||||
fprintf(stderr, "Failed to register encode complete callback, return code: "
|
||||
"%d\n", register_result);
|
||||
fprintf(stderr,
|
||||
"Failed to register encode complete callback, return code: "
|
||||
"%d\n",
|
||||
register_result);
|
||||
return false;
|
||||
}
|
||||
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_);
|
||||
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
|
||||
fprintf(stderr, "Failed to register decode complete callback, return code: "
|
||||
"%d\n", register_result);
|
||||
fprintf(stderr,
|
||||
"Failed to register decode complete callback, return code: "
|
||||
"%d\n",
|
||||
register_result);
|
||||
return false;
|
||||
}
|
||||
// Init the encoder and decoder
|
||||
@ -146,13 +150,14 @@ VideoProcessorImpl::~VideoProcessorImpl() {
|
||||
delete decode_callback_;
|
||||
}
|
||||
|
||||
|
||||
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
|
||||
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate);
|
||||
assert(set_rates_result >= 0);
|
||||
if (set_rates_result < 0) {
|
||||
fprintf(stderr, "Failed to update encoder with new rate %d, "
|
||||
"return code: %d\n", bit_rate, set_rates_result);
|
||||
fprintf(stderr,
|
||||
"Failed to update encoder with new rate %d, "
|
||||
"return code: %d\n",
|
||||
bit_rate, set_rates_result);
|
||||
}
|
||||
num_dropped_frames_ = 0;
|
||||
num_spatial_resizes_ = 0;
|
||||
@ -175,7 +180,7 @@ int VideoProcessorImpl::NumberSpatialResizes() {
|
||||
}
|
||||
|
||||
bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
||||
assert(frame_number >=0);
|
||||
assert(frame_number >= 0);
|
||||
if (!initialized_) {
|
||||
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n");
|
||||
return false;
|
||||
@ -186,10 +191,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
||||
}
|
||||
if (frame_reader_->ReadFrame(source_buffer_)) {
|
||||
// Copy the source frame to the newly read frame data.
|
||||
source_frame_.CreateFrame(source_buffer_,
|
||||
config_.codec_settings->width,
|
||||
config_.codec_settings->height,
|
||||
kVideoRotation_0);
|
||||
source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width,
|
||||
config_.codec_settings->height, kVideoRotation_0);
|
||||
|
||||
// Ensure we have a new statistics data object we can fill:
|
||||
FrameStatistic& stat = stats_->NewFrame(frame_number);
|
||||
@ -224,8 +227,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
||||
|
||||
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
|
||||
// Timestamp is frame number, so this gives us #dropped frames.
|
||||
int num_dropped_from_prev_encode = encoded_image._timeStamp -
|
||||
prev_time_stamp_ - 1;
|
||||
int num_dropped_from_prev_encode =
|
||||
encoded_image._timeStamp - prev_time_stamp_ - 1;
|
||||
num_dropped_frames_ += num_dropped_from_prev_encode;
|
||||
prev_time_stamp_ = encoded_image._timeStamp;
|
||||
if (num_dropped_from_prev_encode > 0) {
|
||||
@ -244,15 +247,16 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
|
||||
TickTime encode_stop = TickTime::Now();
|
||||
int frame_number = encoded_image._timeStamp;
|
||||
FrameStatistic& stat = stats_->stats_[frame_number];
|
||||
stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
|
||||
encode_stop);
|
||||
stat.encode_time_in_us =
|
||||
GetElapsedTimeMicroseconds(encode_start_, encode_stop);
|
||||
stat.encoding_successful = true;
|
||||
stat.encoded_frame_length_in_bytes = encoded_image._length;
|
||||
stat.frame_number = encoded_image._timeStamp;
|
||||
stat.frame_type = encoded_image._frameType;
|
||||
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
|
||||
stat.total_packets = encoded_image._length /
|
||||
config_.networking_config.packet_size_in_bytes + 1;
|
||||
stat.total_packets =
|
||||
encoded_image._length / config_.networking_config.packet_size_in_bytes +
|
||||
1;
|
||||
|
||||
// Perform packet loss if criteria is fullfilled:
|
||||
bool exclude_this_frame = false;
|
||||
@ -305,13 +309,13 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
|
||||
int frame_number = image.timestamp();
|
||||
// Report stats
|
||||
FrameStatistic& stat = stats_->stats_[frame_number];
|
||||
stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_,
|
||||
decode_stop);
|
||||
stat.decode_time_in_us =
|
||||
GetElapsedTimeMicroseconds(decode_start_, decode_stop);
|
||||
stat.decoding_successful = true;
|
||||
|
||||
// Check for resize action (either down or up):
|
||||
if (static_cast<int>(image.width()) != last_encoder_frame_width_ ||
|
||||
static_cast<int>(image.height()) != last_encoder_frame_height_ ) {
|
||||
static_cast<int>(image.height()) != last_encoder_frame_height_) {
|
||||
++num_spatial_resizes_;
|
||||
last_encoder_frame_width_ = image.width();
|
||||
last_encoder_frame_height_ = image.height();
|
||||
@ -321,10 +325,9 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
|
||||
if (image.width() != config_.codec_settings->width ||
|
||||
image.height() != config_.codec_settings->height) {
|
||||
VideoFrame up_image;
|
||||
int ret_val = scaler_.Set(image.width(), image.height(),
|
||||
config_.codec_settings->width,
|
||||
config_.codec_settings->height,
|
||||
kI420, kI420, kScaleBilinear);
|
||||
int ret_val = scaler_.Set(
|
||||
image.width(), image.height(), config_.codec_settings->width,
|
||||
config_.codec_settings->height, kI420, kI420, kScaleBilinear);
|
||||
assert(ret_val >= 0);
|
||||
if (ret_val < 0) {
|
||||
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
|
||||
@ -366,7 +369,8 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
|
||||
}
|
||||
|
||||
int VideoProcessorImpl::GetElapsedTimeMicroseconds(
|
||||
const webrtc::TickTime& start, const webrtc::TickTime& stop) {
|
||||
const webrtc::TickTime& start,
|
||||
const webrtc::TickTime& stop) {
|
||||
uint64_t encode_time = (stop - start).Microseconds();
|
||||
assert(encode_time <
|
||||
static_cast<unsigned int>(std::numeric_limits<int>::max()));
|
||||
@ -404,8 +408,7 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
|
||||
}
|
||||
|
||||
// Callbacks
|
||||
int32_t
|
||||
VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
|
||||
int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
|
||||
const EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) {
|
||||
|
@ -246,11 +246,10 @@ class VideoProcessorImpl : public VideoProcessor {
|
||||
: public webrtc::DecodedImageCallback {
|
||||
public:
|
||||
explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
|
||||
: video_processor_(vp) {
|
||||
}
|
||||
: video_processor_(vp) {}
|
||||
int32_t Decoded(webrtc::VideoFrame& image) override;
|
||||
int32_t Decoded(
|
||||
webrtc::VideoFrame& image, int64_t decode_time_ms) override {
|
||||
int32_t Decoded(webrtc::VideoFrame& image,
|
||||
int64_t decode_time_ms) override {
|
||||
RTC_NOTREACHED();
|
||||
return -1;
|
||||
}
|
||||
|
@ -81,7 +81,6 @@ struct RateControlMetrics {
|
||||
int num_key_frames;
|
||||
};
|
||||
|
||||
|
||||
// Sequence used is foreman (CIF): may be better to use VGA for resize test.
|
||||
const int kCIFWidth = 352;
|
||||
const int kCIFHeight = 288;
|
||||
@ -101,7 +100,7 @@ const float kScaleKeyFrameSize = 0.5f;
|
||||
// dropping/spatial resize, and temporal layers. The limits for the rate
|
||||
// control metrics are set to be fairly conservative, so failure should only
|
||||
// happen when some significant regression or breakdown occurs.
|
||||
class VideoProcessorIntegrationTest: public testing::Test {
|
||||
class VideoProcessorIntegrationTest : public testing::Test {
|
||||
protected:
|
||||
VideoEncoder* encoder_;
|
||||
VideoDecoder* decoder_;
|
||||
@ -148,7 +147,6 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
bool frame_dropper_on_;
|
||||
bool spatial_resize_on_;
|
||||
|
||||
|
||||
VideoProcessorIntegrationTest() {}
|
||||
virtual ~VideoProcessorIntegrationTest() {}
|
||||
|
||||
@ -165,14 +163,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
|
||||
// CIF is currently used for all tests below.
|
||||
// Setup the TestConfig struct for processing of a clip in CIF resolution.
|
||||
config_.input_filename =
|
||||
webrtc::test::ResourcePath("foreman_cif", "yuv");
|
||||
config_.input_filename = webrtc::test::ResourcePath("foreman_cif", "yuv");
|
||||
|
||||
// Generate an output filename in a safe way.
|
||||
config_.output_filename = webrtc::test::TempFilename(
|
||||
webrtc::test::OutputPath(), "videoprocessor_integrationtest");
|
||||
config_.frame_length_in_bytes = CalcBufferSize(kI420,
|
||||
kCIFWidth, kCIFHeight);
|
||||
config_.frame_length_in_bytes =
|
||||
CalcBufferSize(kI420, kCIFWidth, kCIFHeight);
|
||||
config_.verbose = false;
|
||||
// Only allow encoder/decoder to use single core, for predictability.
|
||||
config_.use_single_core = true;
|
||||
@ -191,8 +188,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
case kVideoCodecVP8:
|
||||
config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
|
||||
error_concealment_on_;
|
||||
config_.codec_settings->codecSpecific.VP8.denoisingOn =
|
||||
denoising_on_;
|
||||
config_.codec_settings->codecSpecific.VP8.denoisingOn = denoising_on_;
|
||||
config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
|
||||
num_temporal_layers_;
|
||||
config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
|
||||
@ -203,8 +199,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
kBaseKeyFrameInterval;
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
config_.codec_settings->codecSpecific.VP9.denoisingOn =
|
||||
denoising_on_;
|
||||
config_.codec_settings->codecSpecific.VP9.denoisingOn = denoising_on_;
|
||||
config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
|
||||
num_temporal_layers_;
|
||||
config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
|
||||
@ -218,21 +213,17 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
frame_reader_ =
|
||||
new webrtc::test::FrameReaderImpl(config_.input_filename,
|
||||
config_.frame_length_in_bytes);
|
||||
frame_writer_ =
|
||||
new webrtc::test::FrameWriterImpl(config_.output_filename,
|
||||
config_.frame_length_in_bytes);
|
||||
frame_reader_ = new webrtc::test::FrameReaderImpl(
|
||||
config_.input_filename, config_.frame_length_in_bytes);
|
||||
frame_writer_ = new webrtc::test::FrameWriterImpl(
|
||||
config_.output_filename, config_.frame_length_in_bytes);
|
||||
ASSERT_TRUE(frame_reader_->Init());
|
||||
ASSERT_TRUE(frame_writer_->Init());
|
||||
|
||||
packet_manipulator_ = new webrtc::test::PacketManipulatorImpl(
|
||||
&packet_reader_, config_.networking_config, config_.verbose);
|
||||
processor_ = new webrtc::test::VideoProcessorImpl(encoder_, decoder_,
|
||||
frame_reader_,
|
||||
frame_writer_,
|
||||
packet_manipulator_,
|
||||
processor_ = new webrtc::test::VideoProcessorImpl(
|
||||
encoder_, decoder_, frame_reader_, frame_writer_, packet_manipulator_,
|
||||
config_, &stats_);
|
||||
ASSERT_TRUE(processor_->Init());
|
||||
}
|
||||
@ -274,14 +265,14 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
// Update rate mismatch relative to per-frame bandwidth for delta frames.
|
||||
if (frame_type == kVideoFrameDelta) {
|
||||
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
|
||||
sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
|
||||
per_frame_bandwidth_[layer_]) /
|
||||
sum_frame_size_mismatch_[layer_] +=
|
||||
fabs(encoded_size_kbits - per_frame_bandwidth_[layer_]) /
|
||||
per_frame_bandwidth_[layer_];
|
||||
} else {
|
||||
float target_size = (frame_num == 1) ? target_size_key_frame_initial_ :
|
||||
target_size_key_frame_;
|
||||
sum_key_frame_size_mismatch_ += fabs(encoded_size_kbits - target_size) /
|
||||
target_size;
|
||||
float target_size = (frame_num == 1) ? target_size_key_frame_initial_
|
||||
: target_size_key_frame_;
|
||||
sum_key_frame_size_mismatch_ +=
|
||||
fabs(encoded_size_kbits - target_size) / target_size;
|
||||
num_key_frames_ += 1;
|
||||
}
|
||||
sum_encoded_frame_size_[layer_] += encoded_size_kbits;
|
||||
@ -292,10 +283,10 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
num_frames_per_update_[layer_];
|
||||
// Total encoding rate: from the start of the update/run to current frame.
|
||||
sum_encoded_frame_size_total_ += encoded_size_kbits;
|
||||
encoding_bitrate_total_ = sum_encoded_frame_size_total_ * frame_rate_ /
|
||||
num_frames_total_;
|
||||
perc_encoding_rate_mismatch_ = 100 * fabs(encoding_bitrate_total_ -
|
||||
bit_rate_) / bit_rate_;
|
||||
encoding_bitrate_total_ =
|
||||
sum_encoded_frame_size_total_ * frame_rate_ / num_frames_total_;
|
||||
perc_encoding_rate_mismatch_ =
|
||||
100 * fabs(encoding_bitrate_total_ - bit_rate_) / bit_rate_;
|
||||
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
|
||||
!encoding_rate_within_target_) {
|
||||
num_frames_to_hit_target_ = num_frames_total_;
|
||||
@ -314,34 +305,38 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
int num_key_frames) {
|
||||
int num_dropped_frames = processor_->NumberDroppedFrames();
|
||||
int num_resize_actions = processor_->NumberSpatialResizes();
|
||||
printf("For update #: %d,\n "
|
||||
printf(
|
||||
"For update #: %d,\n "
|
||||
" Target Bitrate: %d,\n"
|
||||
" Encoding bitrate: %f,\n"
|
||||
" Frame rate: %d \n",
|
||||
update_index, bit_rate_, encoding_bitrate_total_, frame_rate_);
|
||||
printf(" Number of frames to approach target rate = %d, \n"
|
||||
printf(
|
||||
" Number of frames to approach target rate = %d, \n"
|
||||
" Number of dropped frames = %d, \n"
|
||||
" Number of spatial resizes = %d, \n",
|
||||
num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
|
||||
EXPECT_LE(perc_encoding_rate_mismatch_, max_encoding_rate_mismatch);
|
||||
if (num_key_frames_ > 0) {
|
||||
int perc_key_frame_size_mismatch = 100 * sum_key_frame_size_mismatch_ /
|
||||
num_key_frames_;
|
||||
printf(" Number of Key frames: %d \n"
|
||||
int perc_key_frame_size_mismatch =
|
||||
100 * sum_key_frame_size_mismatch_ / num_key_frames_;
|
||||
printf(
|
||||
" Number of Key frames: %d \n"
|
||||
" Key frame rate mismatch: %d \n",
|
||||
num_key_frames_, perc_key_frame_size_mismatch);
|
||||
EXPECT_LE(perc_key_frame_size_mismatch, max_key_frame_size_mismatch);
|
||||
}
|
||||
printf("\n");
|
||||
printf("Rates statistics for Layer data \n");
|
||||
for (int i = 0; i < num_temporal_layers_ ; i++) {
|
||||
for (int i = 0; i < num_temporal_layers_; i++) {
|
||||
printf("Layer #%d \n", i);
|
||||
int perc_frame_size_mismatch = 100 * sum_frame_size_mismatch_[i] /
|
||||
num_frames_per_update_[i];
|
||||
int perc_encoding_rate_mismatch = 100 * fabs(encoding_bitrate_[i] -
|
||||
bit_rate_layer_[i]) /
|
||||
int perc_frame_size_mismatch =
|
||||
100 * sum_frame_size_mismatch_[i] / num_frames_per_update_[i];
|
||||
int perc_encoding_rate_mismatch =
|
||||
100 * fabs(encoding_bitrate_[i] - bit_rate_layer_[i]) /
|
||||
bit_rate_layer_[i];
|
||||
printf(" Target Layer Bit rate: %f \n"
|
||||
printf(
|
||||
" Target Layer Bit rate: %f \n"
|
||||
" Layer frame rate: %f, \n"
|
||||
" Layer per frame bandwidth: %f, \n"
|
||||
" Layer Encoding bit rate: %f, \n"
|
||||
@ -391,20 +386,20 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
|
||||
// Set the bitrate and frame rate per layer, for up to 3 layers.
|
||||
void SetLayerRates() {
|
||||
assert(num_temporal_layers_<= 3);
|
||||
assert(num_temporal_layers_ <= 3);
|
||||
for (int i = 0; i < num_temporal_layers_; i++) {
|
||||
float bit_rate_ratio =
|
||||
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i];
|
||||
if (i > 0) {
|
||||
float bit_rate_delta_ratio = kVp8LayerRateAlloction
|
||||
[num_temporal_layers_ - 1][i] -
|
||||
float bit_rate_delta_ratio =
|
||||
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i] -
|
||||
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i - 1];
|
||||
bit_rate_layer_[i] = bit_rate_ * bit_rate_delta_ratio;
|
||||
} else {
|
||||
bit_rate_layer_[i] = bit_rate_ * bit_rate_ratio;
|
||||
}
|
||||
frame_rate_layer_[i] = frame_rate_ / static_cast<float>(
|
||||
1 << (num_temporal_layers_ - 1));
|
||||
frame_rate_layer_[i] =
|
||||
frame_rate_ / static_cast<float>(1 << (num_temporal_layers_ - 1));
|
||||
}
|
||||
if (num_temporal_layers_ == 3) {
|
||||
frame_rate_layer_[2] = frame_rate_ / 2.0f;
|
||||
@ -441,8 +436,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
frame_rate_ = rate_profile.input_frame_rate[0];
|
||||
SetLayerRates();
|
||||
// Set the initial target size for key frame.
|
||||
target_size_key_frame_initial_ = 0.5 * kInitialBufferSize *
|
||||
bit_rate_layer_[0];
|
||||
target_size_key_frame_initial_ =
|
||||
0.5 * kInitialBufferSize * bit_rate_layer_[0];
|
||||
processor_->SetRates(bit_rate_, frame_rate_);
|
||||
// Process each frame, up to |num_frames|.
|
||||
int num_frames = rate_profile.num_frames;
|
||||
@ -468,8 +463,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
if (frame_number ==
|
||||
rate_profile.frame_index_rate_update[update_index + 1]) {
|
||||
VerifyRateControl(
|
||||
update_index,
|
||||
rc_metrics[update_index].max_key_frame_size_mismatch,
|
||||
update_index, rc_metrics[update_index].max_key_frame_size_mismatch,
|
||||
rc_metrics[update_index].max_delta_frame_size_mismatch,
|
||||
rc_metrics[update_index].max_encoding_rate_mismatch,
|
||||
rc_metrics[update_index].max_time_hit_target,
|
||||
@ -481,13 +475,12 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
bit_rate_ = rate_profile.target_bit_rate[update_index];
|
||||
frame_rate_ = rate_profile.input_frame_rate[update_index];
|
||||
SetLayerRates();
|
||||
ResetRateControlMetrics(rate_profile.
|
||||
frame_index_rate_update[update_index + 1]);
|
||||
ResetRateControlMetrics(
|
||||
rate_profile.frame_index_rate_update[update_index + 1]);
|
||||
processor_->SetRates(bit_rate_, frame_rate_);
|
||||
}
|
||||
}
|
||||
VerifyRateControl(
|
||||
update_index,
|
||||
VerifyRateControl(update_index,
|
||||
rc_metrics[update_index].max_key_frame_size_mismatch,
|
||||
rc_metrics[update_index].max_delta_frame_size_mismatch,
|
||||
rc_metrics[update_index].max_encoding_rate_mismatch,
|
||||
@ -507,16 +500,14 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
|
||||
// TODO(marpan): should compute these quality metrics per SetRates update.
|
||||
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
|
||||
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
|
||||
config_.input_filename.c_str(),
|
||||
config_.output_filename.c_str(),
|
||||
config_.codec_settings->width,
|
||||
config_.codec_settings->height,
|
||||
&psnr_result,
|
||||
&ssim_result));
|
||||
EXPECT_EQ(
|
||||
0, webrtc::test::I420MetricsFromFiles(
|
||||
config_.input_filename.c_str(), config_.output_filename.c_str(),
|
||||
config_.codec_settings->width, config_.codec_settings->height,
|
||||
&psnr_result, &ssim_result));
|
||||
printf("PSNR avg: %f, min: %f SSIM avg: %f, min: %f\n",
|
||||
psnr_result.average, psnr_result.min,
|
||||
ssim_result.average, ssim_result.min);
|
||||
psnr_result.average, psnr_result.min, ssim_result.average,
|
||||
ssim_result.min);
|
||||
stats_.PrintSummary();
|
||||
EXPECT_GT(psnr_result.average, quality_metrics.minimum_avg_psnr);
|
||||
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
|
||||
@ -608,9 +599,7 @@ TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossVP9) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -632,13 +621,10 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLossVP9) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
|
||||
// VP9: Run with no packet loss, with varying bitrate (3 rate updates):
|
||||
// low to high to medium. Check that quality and encoder response to the new
|
||||
// target rate/per-frame bandwidth (for each rate update) is within limits.
|
||||
@ -663,9 +649,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRateVP9) {
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0, 1);
|
||||
SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0, 0);
|
||||
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0, 0);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -698,9 +682,7 @@ TEST_F(VideoProcessorIntegrationTest,
|
||||
SetRateControlMetrics(rc_metrics, 0, 35, 50, 75, 15, 45, 0, 1);
|
||||
SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0, 0);
|
||||
SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0, 0);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -721,9 +703,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossDenoiserOnVP9) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -739,17 +719,15 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
|
||||
rate_profile.num_frames = kNbrFramesLong;
|
||||
// Codec/network settings.
|
||||
CodecConfigPars process_settings;
|
||||
SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1,
|
||||
1, false, false, true, true);
|
||||
SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1, 1, false,
|
||||
false, true, true);
|
||||
// Metrics for expected quality.
|
||||
QualityMetrics quality_metrics;
|
||||
SetQualityMetrics(&quality_metrics, 25.0, 13.0, 0.70, 0.37);
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 225, 70, 160, 15, 80, 1, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -775,9 +753,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -799,9 +775,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -823,9 +797,7 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -863,9 +835,7 @@ TEST_F(VideoProcessorIntegrationTest,
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0, 1);
|
||||
SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0, 0);
|
||||
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0, 0);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -898,9 +868,7 @@ TEST_F(VideoProcessorIntegrationTest,
|
||||
SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0, 1);
|
||||
SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0, 0);
|
||||
SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0, 0);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -916,17 +884,15 @@ TEST_F(VideoProcessorIntegrationTest,
|
||||
rate_profile.num_frames = kNbrFramesLong;
|
||||
// Codec/network settings.
|
||||
CodecConfigPars process_settings;
|
||||
SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1,
|
||||
1, false, true, true, true);
|
||||
SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
|
||||
true, true, true);
|
||||
// Metrics for expected quality.
|
||||
QualityMetrics quality_metrics;
|
||||
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
|
||||
// Metrics for rate control.
|
||||
RateControlMetrics rc_metrics[1];
|
||||
SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1, 2);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
|
||||
@ -955,9 +921,7 @@ TEST_F(VideoProcessorIntegrationTest,
|
||||
RateControlMetrics rc_metrics[2];
|
||||
SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0, 1);
|
||||
SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0, 0);
|
||||
ProcessFramesAndVerify(quality_metrics,
|
||||
rate_profile,
|
||||
process_settings,
|
||||
ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
|
||||
rc_metrics);
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
@ -29,7 +29,7 @@ namespace test {
|
||||
|
||||
// Very basic testing for VideoProcessor. It's mostly tested by running the
|
||||
// video_quality_measurement program.
|
||||
class VideoProcessorTest: public testing::Test {
|
||||
class VideoProcessorTest : public testing::Test {
|
||||
protected:
|
||||
MockVideoEncoder encoder_mock_;
|
||||
MockVideoDecoder decoder_mock_;
|
||||
@ -53,44 +53,34 @@ class VideoProcessorTest: public testing::Test {
|
||||
void TearDown() {}
|
||||
|
||||
void ExpectInit() {
|
||||
EXPECT_CALL(encoder_mock_, InitEncode(_, _, _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
|
||||
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_))
|
||||
.Times(AtLeast(1));
|
||||
EXPECT_CALL(decoder_mock_, InitDecode(_, _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(decoder_mock_, InitDecode(_, _)).Times(1);
|
||||
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_))
|
||||
.Times(AtLeast(1));
|
||||
EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
|
||||
.WillOnce(Return(1));
|
||||
EXPECT_CALL(frame_reader_mock_, FrameLength())
|
||||
.WillOnce(Return(152064));
|
||||
EXPECT_CALL(frame_reader_mock_, NumberOfFrames()).WillOnce(Return(1));
|
||||
EXPECT_CALL(frame_reader_mock_, FrameLength()).WillOnce(Return(152064));
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(VideoProcessorTest, Init) {
|
||||
ExpectInit();
|
||||
VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
|
||||
&frame_reader_mock_,
|
||||
&frame_writer_mock_,
|
||||
&packet_manipulator_mock_, config_,
|
||||
&stats_);
|
||||
VideoProcessorImpl video_processor(
|
||||
&encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
|
||||
&packet_manipulator_mock_, config_, &stats_);
|
||||
ASSERT_TRUE(video_processor.Init());
|
||||
}
|
||||
|
||||
TEST_F(VideoProcessorTest, ProcessFrame) {
|
||||
ExpectInit();
|
||||
EXPECT_CALL(encoder_mock_, Encode(_, _, _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(frame_reader_mock_, ReadFrame(_))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(encoder_mock_, Encode(_, _, _)).Times(1);
|
||||
EXPECT_CALL(frame_reader_mock_, ReadFrame(_)).WillOnce(Return(true));
|
||||
// Since we don't return any callback from the mock, the decoder will not
|
||||
// be more than initialized...
|
||||
VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
|
||||
&frame_reader_mock_,
|
||||
&frame_writer_mock_,
|
||||
&packet_manipulator_mock_, config_,
|
||||
&stats_);
|
||||
VideoProcessorImpl video_processor(
|
||||
&encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
|
||||
&packet_manipulator_mock_, config_, &stats_);
|
||||
ASSERT_TRUE(video_processor.Init());
|
||||
video_processor.ProcessFrame(0);
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <sys/stat.h> // To check for directory existence.
|
||||
|
||||
#ifndef S_ISDIR // Not defined in stat.h on Windows.
|
||||
#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
|
||||
#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
|
||||
#endif
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
@ -34,68 +34,102 @@
|
||||
#include "webrtc/test/testsupport/packet_reader.h"
|
||||
|
||||
DEFINE_string(test_name, "Quality test", "The name of the test to run. ");
|
||||
DEFINE_string(test_description, "", "A more detailed description about what "
|
||||
DEFINE_string(test_description,
|
||||
"",
|
||||
"A more detailed description about what "
|
||||
"the current test is about.");
|
||||
DEFINE_string(input_filename, "", "Input file. "
|
||||
DEFINE_string(input_filename,
|
||||
"",
|
||||
"Input file. "
|
||||
"The source video file to be encoded and decoded. Must be in "
|
||||
".yuv format");
|
||||
DEFINE_int32(width, -1, "Width in pixels of the frames in the input file.");
|
||||
DEFINE_int32(height, -1, "Height in pixels of the frames in the input file.");
|
||||
DEFINE_int32(framerate, 30, "Frame rate of the input file, in FPS "
|
||||
DEFINE_int32(framerate,
|
||||
30,
|
||||
"Frame rate of the input file, in FPS "
|
||||
"(frames-per-second). ");
|
||||
DEFINE_string(output_dir, ".", "Output directory. "
|
||||
DEFINE_string(output_dir,
|
||||
".",
|
||||
"Output directory. "
|
||||
"The directory where the output file will be put. Must already "
|
||||
"exist.");
|
||||
DEFINE_bool(use_single_core, false, "Force using a single core. If set to "
|
||||
DEFINE_bool(use_single_core,
|
||||
false,
|
||||
"Force using a single core. If set to "
|
||||
"true, only one core will be used for processing. Using a single "
|
||||
"core is necessary to get a deterministic behavior for the"
|
||||
"encoded frames - using multiple cores will produce different "
|
||||
"encoded frames since multiple cores are competing to consume the "
|
||||
"byte budget for each frame in parallel. If set to false, "
|
||||
"the maximum detected number of cores will be used. ");
|
||||
DEFINE_bool(disable_fixed_random_seed , false, "Set this flag to disable the"
|
||||
DEFINE_bool(disable_fixed_random_seed,
|
||||
false,
|
||||
"Set this flag to disable the"
|
||||
"usage of a fixed random seed for the random generator used "
|
||||
"for packet loss. Disabling this will cause consecutive runs "
|
||||
"loose packets at different locations, which is bad for "
|
||||
"reproducibility.");
|
||||
DEFINE_string(output_filename, "", "Output file. "
|
||||
DEFINE_string(output_filename,
|
||||
"",
|
||||
"Output file. "
|
||||
"The name of the output video file resulting of the processing "
|
||||
"of the source file. By default this is the same name as the "
|
||||
"input file with '_out' appended before the extension.");
|
||||
DEFINE_int32(bitrate, 500, "Bit rate in kilobits/second.");
|
||||
DEFINE_int32(keyframe_interval, 0, "Forces a keyframe every Nth frame. "
|
||||
DEFINE_int32(keyframe_interval,
|
||||
0,
|
||||
"Forces a keyframe every Nth frame. "
|
||||
"0 means the encoder decides when to insert keyframes. Note that "
|
||||
"the encoder may create a keyframe in other locations in addition "
|
||||
"to the interval that is set using this parameter.");
|
||||
DEFINE_int32(temporal_layers, 0, "The number of temporal layers to use "
|
||||
DEFINE_int32(temporal_layers,
|
||||
0,
|
||||
"The number of temporal layers to use "
|
||||
"(VP8 specific codec setting). Must be 0-4.");
|
||||
DEFINE_int32(packet_size, 1500, "Simulated network packet size in bytes (MTU). "
|
||||
DEFINE_int32(packet_size,
|
||||
1500,
|
||||
"Simulated network packet size in bytes (MTU). "
|
||||
"Used for packet loss simulation.");
|
||||
DEFINE_int32(max_payload_size, 1440, "Max payload size in bytes for the "
|
||||
DEFINE_int32(max_payload_size,
|
||||
1440,
|
||||
"Max payload size in bytes for the "
|
||||
"encoder.");
|
||||
DEFINE_string(packet_loss_mode, "uniform", "Packet loss mode. Two different "
|
||||
DEFINE_string(packet_loss_mode,
|
||||
"uniform",
|
||||
"Packet loss mode. Two different "
|
||||
"packet loss models are supported: uniform or burst. This "
|
||||
"setting has no effect unless packet_loss_rate is >0. ");
|
||||
DEFINE_double(packet_loss_probability, 0.0, "Packet loss probability. A value "
|
||||
DEFINE_double(packet_loss_probability,
|
||||
0.0,
|
||||
"Packet loss probability. A value "
|
||||
"between 0.0 and 1.0 that defines the probability of a packet "
|
||||
"being lost. 0.1 means 10% and so on.");
|
||||
DEFINE_int32(packet_loss_burst_length, 1, "Packet loss burst length. Defines "
|
||||
DEFINE_int32(packet_loss_burst_length,
|
||||
1,
|
||||
"Packet loss burst length. Defines "
|
||||
"how many packets will be lost in a burst when a packet has been "
|
||||
"decided to be lost. Must be >=1.");
|
||||
DEFINE_bool(csv, false, "CSV output. Enabling this will output all frame "
|
||||
DEFINE_bool(csv,
|
||||
false,
|
||||
"CSV output. Enabling this will output all frame "
|
||||
"statistics at the end of execution. Recommended to run combined "
|
||||
"with --noverbose to avoid mixing output.");
|
||||
DEFINE_bool(python, false, "Python output. Enabling this will output all frame "
|
||||
DEFINE_bool(python,
|
||||
false,
|
||||
"Python output. Enabling this will output all frame "
|
||||
"statistics as a Python script at the end of execution. "
|
||||
"Recommended to run combine with --noverbose to avoid mixing "
|
||||
"output.");
|
||||
DEFINE_bool(verbose, true, "Verbose mode. Prints a lot of debugging info. "
|
||||
DEFINE_bool(verbose,
|
||||
true,
|
||||
"Verbose mode. Prints a lot of debugging info. "
|
||||
"Suitable for tracking progress but not for capturing output. "
|
||||
"Disable with --noverbose flag.");
|
||||
|
||||
// Custom log method that only prints if the verbose flag is given.
|
||||
// Supports all the standard printf parameters and formatting (just forwarded).
|
||||
int Log(const char *format, ...) {
|
||||
int Log(const char* format, ...) {
|
||||
int result = 0;
|
||||
if (FLAGS_verbose) {
|
||||
va_list args;
|
||||
@ -148,16 +182,16 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
|
||||
startIndex = 0;
|
||||
}
|
||||
FLAGS_output_filename =
|
||||
FLAGS_input_filename.substr(startIndex,
|
||||
FLAGS_input_filename.find_last_of(".")
|
||||
- startIndex) + "_out.yuv";
|
||||
FLAGS_input_filename.substr(
|
||||
startIndex, FLAGS_input_filename.find_last_of(".") - startIndex) +
|
||||
"_out.yuv";
|
||||
}
|
||||
|
||||
// Verify output file can be written.
|
||||
if (FLAGS_output_dir == ".") {
|
||||
config->output_filename = FLAGS_output_filename;
|
||||
} else {
|
||||
config->output_filename = FLAGS_output_dir + "/"+ FLAGS_output_filename;
|
||||
config->output_filename = FLAGS_output_dir + "/" + FLAGS_output_filename;
|
||||
}
|
||||
test_file = fopen(config->output_filename.c_str(), "wb");
|
||||
if (test_file == NULL) {
|
||||
@ -232,7 +266,8 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
|
||||
// Check packet loss settings
|
||||
if (FLAGS_packet_loss_mode != "uniform" &&
|
||||
FLAGS_packet_loss_mode != "burst") {
|
||||
fprintf(stderr, "Unsupported packet loss mode, must be 'uniform' or "
|
||||
fprintf(stderr,
|
||||
"Unsupported packet loss mode, must be 'uniform' or "
|
||||
"'burst'\n.");
|
||||
return 10;
|
||||
}
|
||||
@ -243,16 +278,20 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
|
||||
|
||||
if (FLAGS_packet_loss_probability < 0.0 ||
|
||||
FLAGS_packet_loss_probability > 1.0) {
|
||||
fprintf(stderr, "Invalid packet loss probability. Must be 0.0 - 1.0, "
|
||||
"was: %f\n", FLAGS_packet_loss_probability);
|
||||
fprintf(stderr,
|
||||
"Invalid packet loss probability. Must be 0.0 - 1.0, "
|
||||
"was: %f\n",
|
||||
FLAGS_packet_loss_probability);
|
||||
return 11;
|
||||
}
|
||||
config->networking_config.packet_loss_probability =
|
||||
FLAGS_packet_loss_probability;
|
||||
|
||||
if (FLAGS_packet_loss_burst_length < 1) {
|
||||
fprintf(stderr, "Invalid packet loss burst length, must be >=1, "
|
||||
"was: %d\n", FLAGS_packet_loss_burst_length);
|
||||
fprintf(stderr,
|
||||
"Invalid packet loss burst length, must be >=1, "
|
||||
"was: %d\n",
|
||||
FLAGS_packet_loss_burst_length);
|
||||
return 12;
|
||||
}
|
||||
config->networking_config.packet_loss_burst_length =
|
||||
@ -264,10 +303,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
|
||||
void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
|
||||
webrtc::test::QualityMetricsResult* result) {
|
||||
Log("Calculating SSIM...\n");
|
||||
I420SSIMFromFiles(config->input_filename.c_str(),
|
||||
config->output_filename.c_str(),
|
||||
config->codec_settings->width,
|
||||
config->codec_settings->height, result);
|
||||
I420SSIMFromFiles(
|
||||
config->input_filename.c_str(), config->output_filename.c_str(),
|
||||
config->codec_settings->width, config->codec_settings->height, result);
|
||||
Log(" Average: %3.2f\n", result->average);
|
||||
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
|
||||
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
|
||||
@ -276,10 +314,9 @@ void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
|
||||
void CalculatePsnrVideoMetrics(webrtc::test::TestConfig* config,
|
||||
webrtc::test::QualityMetricsResult* result) {
|
||||
Log("Calculating PSNR...\n");
|
||||
I420PSNRFromFiles(config->input_filename.c_str(),
|
||||
config->output_filename.c_str(),
|
||||
config->codec_settings->width,
|
||||
config->codec_settings->height, result);
|
||||
I420PSNRFromFiles(
|
||||
config->input_filename.c_str(), config->output_filename.c_str(),
|
||||
config->codec_settings->width, config->codec_settings->height, result);
|
||||
Log(" Average: %3.2f\n", result->average);
|
||||
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
|
||||
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
|
||||
@ -309,9 +346,11 @@ void PrintConfigurationSummary(const webrtc::test::TestConfig& config) {
|
||||
void PrintCsvOutput(const webrtc::test::Stats& stats,
|
||||
const webrtc::test::QualityMetricsResult& ssim_result,
|
||||
const webrtc::test::QualityMetricsResult& psnr_result) {
|
||||
Log("\nCSV output (recommended to run with --noverbose to skip the "
|
||||
Log(
|
||||
"\nCSV output (recommended to run with --noverbose to skip the "
|
||||
"above output)\n");
|
||||
printf("frame_number encoding_successful decoding_successful "
|
||||
printf(
|
||||
"frame_number encoding_successful decoding_successful "
|
||||
"encode_return_code decode_return_code "
|
||||
"encode_time_in_us decode_time_in_us "
|
||||
"bit_rate_in_kbps encoded_frame_length_in_bytes frame_type "
|
||||
@ -322,22 +361,13 @@ void PrintCsvOutput(const webrtc::test::Stats& stats,
|
||||
const webrtc::test::FrameStatistic& f = stats.stats_[i];
|
||||
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
|
||||
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
|
||||
printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS ", %d, %2d, %2"
|
||||
PRIuS ", %5.3f, %5.2f\n",
|
||||
f.frame_number,
|
||||
f.encoding_successful,
|
||||
f.decoding_successful,
|
||||
f.encode_return_code,
|
||||
f.decode_return_code,
|
||||
f.encode_time_in_us,
|
||||
f.decode_time_in_us,
|
||||
f.bit_rate_in_kbps,
|
||||
f.encoded_frame_length_in_bytes,
|
||||
f.frame_type,
|
||||
f.packets_dropped,
|
||||
f.total_packets,
|
||||
ssim.value,
|
||||
psnr.value);
|
||||
printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS
|
||||
", %d, %2d, %2" PRIuS ", %5.3f, %5.2f\n",
|
||||
f.frame_number, f.encoding_successful, f.decoding_successful,
|
||||
f.encode_return_code, f.decode_return_code, f.encode_time_in_us,
|
||||
f.decode_time_in_us, f.bit_rate_in_kbps,
|
||||
f.encoded_frame_length_in_bytes, f.frame_type, f.packets_dropped,
|
||||
f.total_packets, ssim.value, psnr.value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,22 +375,27 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
|
||||
const webrtc::test::Stats& stats,
|
||||
const webrtc::test::QualityMetricsResult& ssim_result,
|
||||
const webrtc::test::QualityMetricsResult& psnr_result) {
|
||||
Log("\nPython output (recommended to run with --noverbose to skip the "
|
||||
Log(
|
||||
"\nPython output (recommended to run with --noverbose to skip the "
|
||||
"above output)\n");
|
||||
printf("test_configuration = ["
|
||||
printf(
|
||||
"test_configuration = ["
|
||||
"{'name': 'name', 'value': '%s'},\n"
|
||||
"{'name': 'description', 'value': '%s'},\n"
|
||||
"{'name': 'test_number', 'value': '%d'},\n"
|
||||
"{'name': 'input_filename', 'value': '%s'},\n"
|
||||
"{'name': 'output_filename', 'value': '%s'},\n"
|
||||
"{'name': 'output_dir', 'value': '%s'},\n"
|
||||
"{'name': 'packet_size_in_bytes', 'value': '%" PRIuS "'},\n"
|
||||
"{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS "'},\n"
|
||||
"{'name': 'packet_size_in_bytes', 'value': '%" PRIuS
|
||||
"'},\n"
|
||||
"{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS
|
||||
"'},\n"
|
||||
"{'name': 'packet_loss_mode', 'value': '%s'},\n"
|
||||
"{'name': 'packet_loss_probability', 'value': '%f'},\n"
|
||||
"{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
|
||||
"{'name': 'exclude_frame_types', 'value': '%s'},\n"
|
||||
"{'name': 'frame_length_in_bytes', 'value': '%" PRIuS "'},\n"
|
||||
"{'name': 'frame_length_in_bytes', 'value': '%" PRIuS
|
||||
"'},\n"
|
||||
"{'name': 'use_single_core', 'value': '%s'},\n"
|
||||
"{'name': 'keyframe_interval;', 'value': '%d'},\n"
|
||||
"{'name': 'video_codec_type', 'value': '%s'},\n"
|
||||
@ -368,26 +403,21 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
|
||||
"{'name': 'height', 'value': '%d'},\n"
|
||||
"{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
|
||||
"]\n",
|
||||
config.name.c_str(),
|
||||
config.description.c_str(),
|
||||
config.test_number,
|
||||
config.input_filename.c_str(),
|
||||
config.output_filename.c_str(),
|
||||
config.output_dir.c_str(),
|
||||
config.networking_config.packet_size_in_bytes,
|
||||
config.name.c_str(), config.description.c_str(), config.test_number,
|
||||
config.input_filename.c_str(), config.output_filename.c_str(),
|
||||
config.output_dir.c_str(), config.networking_config.packet_size_in_bytes,
|
||||
config.networking_config.max_payload_size_in_bytes,
|
||||
PacketLossModeToStr(config.networking_config.packet_loss_mode),
|
||||
config.networking_config.packet_loss_probability,
|
||||
config.networking_config.packet_loss_burst_length,
|
||||
ExcludeFrameTypesToStr(config.exclude_frame_types),
|
||||
config.frame_length_in_bytes,
|
||||
config.use_single_core ? "True " : "False",
|
||||
config.frame_length_in_bytes, config.use_single_core ? "True " : "False",
|
||||
config.keyframe_interval,
|
||||
webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
|
||||
config.codec_settings->width,
|
||||
config.codec_settings->height,
|
||||
config.codec_settings->width, config.codec_settings->height,
|
||||
config.codec_settings->startBitrate);
|
||||
printf("frame_data_types = {"
|
||||
printf(
|
||||
"frame_data_types = {"
|
||||
"'frame_number': ('number', 'Frame number'),\n"
|
||||
"'encoding_successful': ('boolean', 'Encoding successful?'),\n"
|
||||
"'decoding_successful': ('boolean', 'Decoding successful?'),\n"
|
||||
@ -409,27 +439,21 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
|
||||
const webrtc::test::FrameStatistic& f = stats.stats_[i];
|
||||
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
|
||||
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
|
||||
printf("{'frame_number': %d, "
|
||||
printf(
|
||||
"{'frame_number': %d, "
|
||||
"'encoding_successful': %s, 'decoding_successful': %s, "
|
||||
"'encode_time': %d, 'decode_time': %d, "
|
||||
"'encode_return_code': %d, 'decode_return_code': %d, "
|
||||
"'bit_rate': %d, 'encoded_frame_length': %" PRIuS ", "
|
||||
"'bit_rate': %d, 'encoded_frame_length': %" PRIuS
|
||||
", "
|
||||
"'frame_type': %s, 'packets_dropped': %d, "
|
||||
"'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
|
||||
f.frame_number,
|
||||
f.encoding_successful ? "True " : "False",
|
||||
f.decoding_successful ? "True " : "False",
|
||||
f.encode_time_in_us,
|
||||
f.decode_time_in_us,
|
||||
f.encode_return_code,
|
||||
f.decode_return_code,
|
||||
f.bit_rate_in_kbps,
|
||||
f.encoded_frame_length_in_bytes,
|
||||
f.frame_number, f.encoding_successful ? "True " : "False",
|
||||
f.decoding_successful ? "True " : "False", f.encode_time_in_us,
|
||||
f.decode_time_in_us, f.encode_return_code, f.decode_return_code,
|
||||
f.bit_rate_in_kbps, f.encoded_frame_length_in_bytes,
|
||||
f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
|
||||
f.packets_dropped,
|
||||
f.total_packets,
|
||||
ssim.value,
|
||||
psnr.value);
|
||||
f.packets_dropped, f.total_packets, ssim.value, psnr.value);
|
||||
}
|
||||
printf("]\n");
|
||||
}
|
||||
@ -438,9 +462,13 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
|
||||
// The input file must be in YUV format.
|
||||
int main(int argc, char* argv[]) {
|
||||
std::string program_name = argv[0];
|
||||
std::string usage = "Quality test application for video comparisons.\n"
|
||||
"Run " + program_name + " --helpshort for usage.\n"
|
||||
"Example usage:\n" + program_name +
|
||||
std::string usage =
|
||||
"Quality test application for video comparisons.\n"
|
||||
"Run " +
|
||||
program_name +
|
||||
" --helpshort for usage.\n"
|
||||
"Example usage:\n" +
|
||||
program_name +
|
||||
" --input_filename=filename.yuv --width=352 --height=288\n";
|
||||
google::SetUsageMessage(usage);
|
||||
|
||||
@ -478,10 +506,8 @@ int main(int argc, char* argv[]) {
|
||||
packet_manipulator.InitializeRandomSeed(time(NULL));
|
||||
}
|
||||
webrtc::test::VideoProcessor* processor =
|
||||
new webrtc::test::VideoProcessorImpl(encoder, decoder,
|
||||
&frame_reader,
|
||||
&frame_writer,
|
||||
&packet_manipulator,
|
||||
new webrtc::test::VideoProcessorImpl(encoder, decoder, &frame_reader,
|
||||
&frame_writer, &packet_manipulator,
|
||||
config, &stats);
|
||||
processor->Init();
|
||||
|
||||
|
@ -41,7 +41,7 @@ int DefaultTemporalLayers::CurrentLayerId() const {
|
||||
int index = pattern_idx_ % temporal_ids_length_;
|
||||
assert(index >= 0);
|
||||
return temporal_ids_[index];
|
||||
}
|
||||
}
|
||||
|
||||
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
|
||||
int max_bitrate_kbit,
|
||||
@ -56,8 +56,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
|
||||
cfg->ts_periodicity = temporal_ids_length_;
|
||||
cfg->ts_target_bitrate[0] = bitrateKbit;
|
||||
cfg->ts_rate_decimator[0] = 1;
|
||||
memcpy(cfg->ts_layer_id,
|
||||
temporal_ids_,
|
||||
memcpy(cfg->ts_layer_id, temporal_ids_,
|
||||
sizeof(unsigned int) * temporal_ids_length_);
|
||||
temporal_pattern_length_ = 1;
|
||||
temporal_pattern_[0] = kTemporalUpdateLastRefAll;
|
||||
@ -74,8 +73,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
|
||||
cfg->ts_target_bitrate[1] = bitrateKbit;
|
||||
cfg->ts_rate_decimator[0] = 2;
|
||||
cfg->ts_rate_decimator[1] = 1;
|
||||
memcpy(cfg->ts_layer_id,
|
||||
temporal_ids_,
|
||||
memcpy(cfg->ts_layer_id, temporal_ids_,
|
||||
sizeof(unsigned int) * temporal_ids_length_);
|
||||
temporal_pattern_length_ = 8;
|
||||
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
|
||||
@ -103,8 +101,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
|
||||
cfg->ts_rate_decimator[0] = 4;
|
||||
cfg->ts_rate_decimator[1] = 2;
|
||||
cfg->ts_rate_decimator[2] = 1;
|
||||
memcpy(cfg->ts_layer_id,
|
||||
temporal_ids_,
|
||||
memcpy(cfg->ts_layer_id, temporal_ids_,
|
||||
sizeof(unsigned int) * temporal_ids_length_);
|
||||
temporal_pattern_length_ = 8;
|
||||
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
|
||||
@ -138,8 +135,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
|
||||
cfg->ts_rate_decimator[1] = 4;
|
||||
cfg->ts_rate_decimator[2] = 2;
|
||||
cfg->ts_rate_decimator[3] = 1;
|
||||
memcpy(cfg->ts_layer_id,
|
||||
temporal_ids_,
|
||||
memcpy(cfg->ts_layer_id, temporal_ids_,
|
||||
sizeof(unsigned int) * temporal_ids_length_);
|
||||
temporal_pattern_length_ = 16;
|
||||
temporal_pattern_[0] = kTemporalUpdateLast;
|
||||
@ -243,7 +239,7 @@ int DefaultTemporalLayers::EncodeFlags(uint32_t timestamp) {
|
||||
|
||||
void DefaultTemporalLayers::PopulateCodecSpecific(
|
||||
bool base_layer_sync,
|
||||
CodecSpecificInfoVP8 *vp8_info,
|
||||
CodecSpecificInfoVP8* vp8_info,
|
||||
uint32_t timestamp) {
|
||||
assert(number_of_temporal_layers_ > 0);
|
||||
assert(0 < temporal_ids_length_);
|
||||
|
@ -8,7 +8,6 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h"
|
||||
@ -19,47 +18,36 @@
|
||||
namespace webrtc {
|
||||
|
||||
enum {
|
||||
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_REF_ARF,
|
||||
kTemporalUpdateGoldenWithoutDependency = VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
kTemporalUpdateGoldenWithoutDependency =
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateGolden = VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
kTemporalUpdateGolden =
|
||||
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateAltrefWithoutDependency =
|
||||
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateAltrefWithoutDependency = VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF |
|
||||
kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef = VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateLastRefAltRef = VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF,
|
||||
kTemporalUpdateLastAndGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
|
||||
kTemporalUpdateLastRefAltRef =
|
||||
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
|
||||
kTemporalUpdateLastAndGoldenRefAltRef =
|
||||
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
|
||||
};
|
||||
|
||||
TEST(TemporalLayersTest, 2Layers) {
|
||||
@ -68,7 +56,8 @@ TEST(TemporalLayersTest, 2Layers) {
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.ConfigureBitrates(500, 500, 30, &cfg);
|
||||
|
||||
int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
int expected_flags[16] = {
|
||||
kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
@ -85,12 +74,12 @@ TEST(TemporalLayersTest, 2Layers) {
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
};
|
||||
int expected_temporal_idx[16] =
|
||||
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
|
||||
int expected_temporal_idx[16] = {0, 1, 0, 1, 0, 1, 0, 1,
|
||||
0, 1, 0, 1, 0, 1, 0, 1};
|
||||
|
||||
bool expected_layer_sync[16] =
|
||||
{ false, true, false, false, false, false, false, false,
|
||||
false, true, false, false, false, false, false, false };
|
||||
bool expected_layer_sync[16] = {false, true, false, false, false, false,
|
||||
false, false, false, true, false, false,
|
||||
false, false, false, false};
|
||||
|
||||
uint32_t timestamp = 0;
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
@ -108,7 +97,8 @@ TEST(TemporalLayersTest, 3Layers) {
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.ConfigureBitrates(500, 500, 30, &cfg);
|
||||
|
||||
int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
int expected_flags[16] = {
|
||||
kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
kTemporalUpdateNoneNoRefGolden,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
@ -125,12 +115,12 @@ TEST(TemporalLayersTest, 3Layers) {
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
};
|
||||
int expected_temporal_idx[16] =
|
||||
{ 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2 };
|
||||
int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
|
||||
0, 2, 1, 2, 0, 2, 1, 2};
|
||||
|
||||
bool expected_layer_sync[16] =
|
||||
{ false, true, true, false, false, false, false, false,
|
||||
false, true, true, false, false, false, false, false };
|
||||
bool expected_layer_sync[16] = {false, true, true, false, false, false,
|
||||
false, false, false, true, true, false,
|
||||
false, false, false, false};
|
||||
|
||||
unsigned int timestamp = 0;
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
@ -165,12 +155,12 @@ TEST(TemporalLayersTest, 4Layers) {
|
||||
kTemporalUpdateAltref,
|
||||
kTemporalUpdateNone,
|
||||
};
|
||||
int expected_temporal_idx[16] =
|
||||
{ 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3 };
|
||||
int expected_temporal_idx[16] = {0, 3, 2, 3, 1, 3, 2, 3,
|
||||
0, 3, 2, 3, 1, 3, 2, 3};
|
||||
|
||||
bool expected_layer_sync[16] =
|
||||
{ false, true, true, true, true, true, false, true,
|
||||
false, true, false, true, false, true, false, true };
|
||||
bool expected_layer_sync[16] = {false, true, true, true, true, true,
|
||||
false, true, false, true, false, true,
|
||||
false, true, false, true};
|
||||
|
||||
uint32_t timestamp = 0;
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
@ -198,8 +188,7 @@ TEST(TemporalLayersTest, KeyFrame) {
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
};
|
||||
int expected_temporal_idx[8] =
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 2};
|
||||
int expected_temporal_idx[8] = {0, 0, 0, 0, 0, 0, 0, 2};
|
||||
|
||||
uint32_t timestamp = 0;
|
||||
for (int i = 0; i < 7; ++i) {
|
||||
|
@ -21,15 +21,14 @@ class VP8Encoder : public VideoEncoder {
|
||||
public:
|
||||
static VP8Encoder* Create();
|
||||
|
||||
virtual ~VP8Encoder() {};
|
||||
virtual ~VP8Encoder() {}
|
||||
}; // end of VP8Encoder class
|
||||
|
||||
|
||||
class VP8Decoder : public VideoDecoder {
|
||||
public:
|
||||
static VP8Decoder* Create();
|
||||
|
||||
virtual ~VP8Decoder() {};
|
||||
virtual ~VP8Decoder() {}
|
||||
}; // end of VP8Decoder class
|
||||
} // namespace webrtc
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
|
||||
@ -26,4 +26,4 @@ static const float
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
|
||||
|
@ -23,7 +23,8 @@ namespace webrtc {
|
||||
namespace {
|
||||
enum {
|
||||
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
|
||||
VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_REF_ARF,
|
||||
|
||||
kTemporalUpdateGolden =
|
||||
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
|
||||
@ -37,13 +38,15 @@ enum {
|
||||
kTemporalUpdateAltref | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF,
|
||||
|
||||
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
|
||||
kTemporalUpdateNoneNoRefAltref = kTemporalUpdateNone | VP8_EFLAG_NO_REF_ARF,
|
||||
|
||||
kTemporalUpdateNoneNoRefGoldenRefAltRef =
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
VP8_EFLAG_NO_UPD_LAST |
|
||||
VP8_EFLAG_NO_UPD_ENTROPY,
|
||||
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
|
||||
@ -135,10 +138,12 @@ class RealTimeTemporalLayers : public TemporalLayers {
|
||||
static const int encode_flags[] = {
|
||||
kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef, kTemporalUpdateNone
|
||||
};
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateNone};
|
||||
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
|
||||
encode_flags_ = encode_flags;
|
||||
|
||||
@ -155,10 +160,12 @@ class RealTimeTemporalLayers : public TemporalLayers {
|
||||
static const int encode_flags[] = {
|
||||
kTemporalUpdateLastAndGoldenRefAltRef,
|
||||
kTemporalUpdateNoneNoRefGoldenRefAltRef,
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef, kTemporalUpdateNone,
|
||||
kTemporalUpdateLastRefAltRef, kTemporalUpdateNone,
|
||||
kTemporalUpdateGoldenRefAltRef, kTemporalUpdateNone
|
||||
};
|
||||
kTemporalUpdateGoldenWithoutDependencyRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
kTemporalUpdateLastRefAltRef,
|
||||
kTemporalUpdateNone,
|
||||
kTemporalUpdateGoldenRefAltRef,
|
||||
kTemporalUpdateNone};
|
||||
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
|
||||
encode_flags_ = encode_flags;
|
||||
|
||||
@ -172,8 +179,8 @@ class RealTimeTemporalLayers : public TemporalLayers {
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
memcpy(
|
||||
cfg->ts_layer_id, layer_ids_, sizeof(unsigned int) * layer_ids_length_);
|
||||
memcpy(cfg->ts_layer_id, layer_ids_,
|
||||
sizeof(unsigned int) * layer_ids_length_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,7 @@ ReferencePictureSelection::ReferencePictureSelection()
|
||||
last_sent_ref_update_time_(0),
|
||||
established_ref_picture_id_(0),
|
||||
last_refresh_time_(0),
|
||||
rtt_(0) {
|
||||
}
|
||||
rtt_(0) {}
|
||||
|
||||
void ReferencePictureSelection::Init() {
|
||||
update_golden_next_ = true;
|
||||
@ -62,7 +61,8 @@ bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
|
||||
return send_refresh;
|
||||
}
|
||||
|
||||
int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
|
||||
int ReferencePictureSelection::EncodeFlags(int picture_id,
|
||||
bool send_refresh,
|
||||
uint32_t now_ts) {
|
||||
int flags = 0;
|
||||
// We can't refresh the decoder until we have established the key frame.
|
||||
|
@ -22,25 +22,19 @@ static const uint32_t kMinUpdateInterval = 10;
|
||||
// Should match the values set in reference_picture_selection.h
|
||||
static const int kRtt = 10;
|
||||
|
||||
static const int kNoPropagationGolden = VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF;
|
||||
static const int kNoPropagationAltRef = VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF;
|
||||
static const int kPropagateGolden = VP8_EFLAG_FORCE_GF |
|
||||
VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF |
|
||||
VP8_EFLAG_NO_REF_LAST;
|
||||
static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF |
|
||||
VP8_EFLAG_NO_UPD_GF |
|
||||
static const int kNoPropagationGolden =
|
||||
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
|
||||
static const int kNoPropagationAltRef =
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
|
||||
static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
|
||||
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
|
||||
static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
|
||||
VP8_EFLAG_NO_REF_ARF |
|
||||
VP8_EFLAG_NO_REF_LAST;
|
||||
static const int kRefreshFromGolden = VP8_EFLAG_NO_REF_LAST |
|
||||
VP8_EFLAG_NO_REF_ARF;
|
||||
static const int kRefreshFromAltRef = VP8_EFLAG_NO_REF_LAST |
|
||||
VP8_EFLAG_NO_REF_GF;
|
||||
|
||||
static const int kRefreshFromGolden =
|
||||
VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
|
||||
static const int kRefreshFromAltRef =
|
||||
VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
|
||||
|
||||
class TestRPS : public ::testing::Test {
|
||||
protected:
|
||||
@ -84,15 +78,15 @@ TEST_F(TestRPS, TestDecoderRefresh) {
|
||||
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
|
||||
// Enough time have elapsed since the previous reference propagation, we will
|
||||
// therefore get both a refresh from golden and a propagation of alt-ref.
|
||||
EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time), kRefreshFromGolden |
|
||||
kPropagateAltRef);
|
||||
EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
|
||||
kRefreshFromGolden | kPropagateAltRef);
|
||||
rps_.ReceivedRPSI(5);
|
||||
time += kRtt + 1;
|
||||
// Enough time for a new refresh, but not enough time for a reference
|
||||
// propagation.
|
||||
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
|
||||
EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time), kRefreshFromAltRef |
|
||||
kNoPropagationAltRef);
|
||||
EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
|
||||
kRefreshFromAltRef | kNoPropagationAltRef);
|
||||
}
|
||||
|
||||
TEST_F(TestRPS, TestWrap) {
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "vpx/vpx_encoder.h"
|
||||
#include "vpx/vp8cx.h"
|
||||
@ -188,7 +190,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
|
||||
}
|
||||
|
||||
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
|
||||
CodecSpecificInfoVP8 *vp8_info,
|
||||
CodecSpecificInfoVP8* vp8_info,
|
||||
uint32_t timestamp) {
|
||||
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
|
||||
if (number_of_temporal_layers_ == 1) {
|
||||
|
@ -215,9 +215,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
|
||||
}
|
||||
|
||||
VideoEncoder* encoder = factory_->Create();
|
||||
ret = encoder->InitEncode(&stream_codec,
|
||||
number_of_cores,
|
||||
max_payload_size);
|
||||
ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
|
||||
if (ret < 0) {
|
||||
Release();
|
||||
return ret;
|
||||
@ -284,35 +282,25 @@ int SimulcastEncoderAdapter::Encode(
|
||||
// scale it to match what the encoder expects (below).
|
||||
if ((dst_width == src_width && dst_height == src_height) ||
|
||||
input_image.IsZeroSize()) {
|
||||
streaminfos_[stream_idx].encoder->Encode(input_image,
|
||||
codec_specific_info,
|
||||
streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info,
|
||||
&stream_frame_types);
|
||||
} else {
|
||||
VideoFrame dst_frame;
|
||||
// Making sure that destination frame is of sufficient size.
|
||||
// Aligning stride values based on width.
|
||||
dst_frame.CreateEmptyFrame(dst_width, dst_height,
|
||||
dst_width, (dst_width + 1) / 2,
|
||||
(dst_width + 1) / 2);
|
||||
libyuv::I420Scale(input_image.buffer(kYPlane),
|
||||
input_image.stride(kYPlane),
|
||||
input_image.buffer(kUPlane),
|
||||
input_image.stride(kUPlane),
|
||||
input_image.buffer(kVPlane),
|
||||
input_image.stride(kVPlane),
|
||||
src_width, src_height,
|
||||
dst_frame.buffer(kYPlane),
|
||||
dst_frame.stride(kYPlane),
|
||||
dst_frame.buffer(kUPlane),
|
||||
dst_frame.stride(kUPlane),
|
||||
dst_frame.buffer(kVPlane),
|
||||
dst_frame.stride(kVPlane),
|
||||
dst_width, dst_height,
|
||||
libyuv::kFilterBilinear);
|
||||
dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
|
||||
(dst_width + 1) / 2, (dst_width + 1) / 2);
|
||||
libyuv::I420Scale(
|
||||
input_image.buffer(kYPlane), input_image.stride(kYPlane),
|
||||
input_image.buffer(kUPlane), input_image.stride(kUPlane),
|
||||
input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
|
||||
src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
|
||||
dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
|
||||
dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
|
||||
dst_height, libyuv::kFilterBilinear);
|
||||
dst_frame.set_timestamp(input_image.timestamp());
|
||||
dst_frame.set_render_time_ms(input_image.render_time_ms());
|
||||
streaminfos_[stream_idx].encoder->Encode(dst_frame,
|
||||
codec_specific_info,
|
||||
streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
|
||||
&stream_frame_types);
|
||||
}
|
||||
}
|
||||
@ -426,7 +414,8 @@ uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
|
||||
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
|
||||
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
|
||||
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
|
||||
if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
|
||||
if (new_bitrate_kbit >=
|
||||
SumStreamTargetBitrate(stream_idx + 1, codec_) +
|
||||
codec_.simulcastStream[stream_idx + 1].minBitrate) {
|
||||
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
|
||||
}
|
||||
|
@ -71,8 +71,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
|
||||
send_stream(true) {}
|
||||
StreamInfo(VideoEncoder* encoder,
|
||||
EncodedImageCallback* callback,
|
||||
unsigned short width,
|
||||
unsigned short height,
|
||||
uint16_t width,
|
||||
uint16_t height,
|
||||
bool send_stream)
|
||||
: encoder(encoder),
|
||||
callback(callback),
|
||||
@ -83,8 +83,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
|
||||
// Deleted by SimulcastEncoderAdapter::Release().
|
||||
VideoEncoder* encoder;
|
||||
EncodedImageCallback* callback;
|
||||
unsigned short width;
|
||||
unsigned short height;
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
bool key_frame_request;
|
||||
bool send_stream;
|
||||
};
|
||||
@ -118,4 +118,3 @@ class SimulcastEncoderAdapter : public VP8Encoder {
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
|
||||
|
||||
|
@ -27,12 +27,10 @@ static VP8Encoder* CreateTestEncoderAdapter() {
|
||||
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
|
||||
public:
|
||||
TestSimulcastEncoderAdapter()
|
||||
: TestVp8Simulcast(CreateTestEncoderAdapter(),
|
||||
VP8Decoder::Create()) {}
|
||||
: TestVp8Simulcast(CreateTestEncoderAdapter(), VP8Decoder::Create()) {}
|
||||
|
||||
protected:
|
||||
virtual void SetUp() {
|
||||
TestVp8Simulcast::SetUp();
|
||||
}
|
||||
virtual void SetUp() { TestVp8Simulcast::SetUp(); }
|
||||
virtual void TearDown() {
|
||||
TestVp8Simulcast::TearDown();
|
||||
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
|
||||
@ -97,8 +95,7 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
|
||||
|
||||
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
|
||||
// implemented for SimulcastEncoderAdapter.
|
||||
TEST_F(TestSimulcastEncoderAdapter,
|
||||
DISABLED_TestSkipEncodingUnusedStreams) {
|
||||
TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestSkipEncodingUnusedStreams) {
|
||||
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
|
||||
}
|
||||
|
||||
@ -127,23 +124,17 @@ class MockVideoEncoder : public VideoEncoder {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t Release() override {
|
||||
return 0;
|
||||
}
|
||||
int32_t Release() override { return 0; }
|
||||
|
||||
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
MOCK_METHOD2(SetChannelParameters,
|
||||
int32_t(uint32_t packetLoss, int64_t rtt));
|
||||
MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
|
||||
|
||||
bool SupportsNativeHandle() const override {
|
||||
return supports_native_handle_;
|
||||
}
|
||||
bool SupportsNativeHandle() const override { return supports_native_handle_; }
|
||||
|
||||
virtual ~MockVideoEncoder() {
|
||||
}
|
||||
virtual ~MockVideoEncoder() {}
|
||||
|
||||
const VideoCodec& codec() const { return codec_; }
|
||||
|
||||
@ -200,7 +191,8 @@ class TestSimulcastEncoderAdapterFakeHelper {
|
||||
EXPECT_TRUE(!factory_->encoders().empty());
|
||||
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
|
||||
EXPECT_CALL(*factory_->encoders()[i],
|
||||
SetChannelParameters(packetLoss, rtt)).Times(1);
|
||||
SetChannelParameters(packetLoss, rtt))
|
||||
.Times(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,8 +241,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
|
||||
|
||||
void SetupCodec() {
|
||||
TestVp8Simulcast::DefaultSettings(
|
||||
&codec_,
|
||||
static_cast<const int*>(kTestTemporalLayerProfile));
|
||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||
adapter_->RegisterEncodeCompleteCallback(this);
|
||||
}
|
||||
|
@ -13,18 +13,14 @@
|
||||
namespace webrtc {
|
||||
namespace testing {
|
||||
|
||||
class TestVp8Impl
|
||||
: public TestVp8Simulcast {
|
||||
class TestVp8Impl : public TestVp8Simulcast {
|
||||
public:
|
||||
TestVp8Impl()
|
||||
: TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
|
||||
|
||||
protected:
|
||||
virtual void SetUp() {
|
||||
TestVp8Simulcast::SetUp();
|
||||
}
|
||||
virtual void TearDown() {
|
||||
TestVp8Simulcast::TearDown();
|
||||
}
|
||||
virtual void SetUp() { TestVp8Simulcast::SetUp(); }
|
||||
virtual void TearDown() { TestVp8Simulcast::TearDown(); }
|
||||
};
|
||||
|
||||
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
|
||||
|
@ -44,10 +44,8 @@ const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
|
||||
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
|
||||
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
|
||||
|
||||
template<typename T> void SetExpectedValues3(T value0,
|
||||
T value1,
|
||||
T value2,
|
||||
T* expected_values) {
|
||||
template <typename T>
|
||||
void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
|
||||
expected_values[0] = value0;
|
||||
expected_values[1] = value1;
|
||||
expected_values[2] = value2;
|
||||
@ -55,15 +53,14 @@ template<typename T> void SetExpectedValues3(T value0,
|
||||
|
||||
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
public:
|
||||
Vp8TestEncodedImageCallback()
|
||||
: picture_id_(-1) {
|
||||
Vp8TestEncodedImageCallback() : picture_id_(-1) {
|
||||
memset(temporal_layer_, -1, sizeof(temporal_layer_));
|
||||
memset(layer_sync_, false, sizeof(layer_sync_));
|
||||
}
|
||||
|
||||
~Vp8TestEncodedImageCallback() {
|
||||
delete [] encoded_key_frame_._buffer;
|
||||
delete [] encoded_frame_._buffer;
|
||||
delete[] encoded_key_frame_._buffer;
|
||||
delete[] encoded_frame_._buffer;
|
||||
}
|
||||
|
||||
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
||||
@ -72,22 +69,20 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
// Only store the base layer.
|
||||
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
delete [] encoded_key_frame_._buffer;
|
||||
delete[] encoded_key_frame_._buffer;
|
||||
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
|
||||
encoded_key_frame_._size = encoded_image._size;
|
||||
encoded_key_frame_._length = encoded_image._length;
|
||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_key_frame_._buffer,
|
||||
encoded_image._buffer,
|
||||
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
} else {
|
||||
delete [] encoded_frame_._buffer;
|
||||
delete[] encoded_frame_._buffer;
|
||||
encoded_frame_._buffer = new uint8_t[encoded_image._size];
|
||||
encoded_frame_._size = encoded_image._size;
|
||||
encoded_frame_._length = encoded_image._length;
|
||||
memcpy(encoded_frame_._buffer,
|
||||
encoded_image._buffer,
|
||||
memcpy(encoded_frame_._buffer, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
}
|
||||
}
|
||||
@ -98,8 +93,10 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
codec_specific_info->codecSpecific.VP8.temporalIdx;
|
||||
return 0;
|
||||
}
|
||||
void GetLastEncodedFrameInfo(int* picture_id, int* temporal_layer,
|
||||
bool* layer_sync, int stream) {
|
||||
void GetLastEncodedFrameInfo(int* picture_id,
|
||||
int* temporal_layer,
|
||||
bool* layer_sync,
|
||||
int stream) {
|
||||
*picture_id = picture_id_;
|
||||
*temporal_layer = temporal_layer_[stream];
|
||||
*layer_sync = layer_sync_[stream];
|
||||
@ -121,9 +118,7 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
|
||||
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
|
||||
public:
|
||||
Vp8TestDecodedImageCallback()
|
||||
: decoded_frames_(0) {
|
||||
}
|
||||
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
|
||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||
for (int i = 0; i < decoded_image.width(); ++i) {
|
||||
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
|
||||
@ -141,9 +136,7 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
|
||||
RTC_NOTREACHED();
|
||||
return -1;
|
||||
}
|
||||
int DecodedFrames() {
|
||||
return decoded_frames_;
|
||||
}
|
||||
int DecodedFrames() { return decoded_frames_; }
|
||||
|
||||
private:
|
||||
int decoded_frames_;
|
||||
@ -166,8 +159,7 @@ class SkipEncodingUnusedStreamsTest {
|
||||
std::vector<unsigned int> configured_bitrates;
|
||||
for (std::vector<TemporalLayers*>::const_iterator it =
|
||||
spy_factory->spying_layers_.begin();
|
||||
it != spy_factory->spying_layers_.end();
|
||||
++it) {
|
||||
it != spy_factory->spying_layers_.end(); ++it) {
|
||||
configured_bitrates.push_back(
|
||||
static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
|
||||
}
|
||||
@ -190,8 +182,8 @@ class SkipEncodingUnusedStreamsTest {
|
||||
int framerate,
|
||||
vpx_codec_enc_cfg_t* cfg) override {
|
||||
configured_bitrate_ = bitrate_kbit;
|
||||
return layers_->ConfigureBitrates(
|
||||
bitrate_kbit, max_bitrate_kbit, framerate, cfg);
|
||||
return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
|
||||
framerate, cfg);
|
||||
}
|
||||
|
||||
void PopulateCodecSpecific(bool base_layer_sync,
|
||||
@ -233,16 +225,15 @@ class SkipEncodingUnusedStreamsTest {
|
||||
class TestVp8Simulcast : public ::testing::Test {
|
||||
public:
|
||||
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
|
||||
: encoder_(encoder),
|
||||
decoder_(decoder) {}
|
||||
: encoder_(encoder), decoder_(decoder) {}
|
||||
|
||||
// Creates an VideoFrame from |plane_colors|.
|
||||
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
|
||||
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
|
||||
int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
|
||||
frame->width());
|
||||
int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
|
||||
frame->height());
|
||||
int width =
|
||||
(plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
|
||||
int height =
|
||||
(plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
|
||||
PlaneType plane_type = static_cast<PlaneType>(plane_num);
|
||||
uint8_t* data = frame->buffer(plane_type);
|
||||
// Setting allocated area to zero - setting only image size to
|
||||
@ -272,24 +263,15 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
settings->height = kDefaultHeight;
|
||||
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
|
||||
ASSERT_EQ(3, kNumberOfSimulcastStreams);
|
||||
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4,
|
||||
kMaxBitrates[0],
|
||||
kMinBitrates[0],
|
||||
kTargetBitrates[0],
|
||||
&settings->simulcastStream[0],
|
||||
temporal_layer_profile[0]);
|
||||
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2,
|
||||
kMaxBitrates[1],
|
||||
kMinBitrates[1],
|
||||
kTargetBitrates[1],
|
||||
&settings->simulcastStream[1],
|
||||
temporal_layer_profile[1]);
|
||||
ConfigureStream(kDefaultWidth, kDefaultHeight,
|
||||
kMaxBitrates[2],
|
||||
kMinBitrates[2],
|
||||
kTargetBitrates[2],
|
||||
&settings->simulcastStream[2],
|
||||
temporal_layer_profile[2]);
|
||||
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
|
||||
kMinBitrates[0], kTargetBitrates[0],
|
||||
&settings->simulcastStream[0], temporal_layer_profile[0]);
|
||||
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
|
||||
kMinBitrates[1], kTargetBitrates[1],
|
||||
&settings->simulcastStream[1], temporal_layer_profile[1]);
|
||||
ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
|
||||
kMinBitrates[2], kTargetBitrates[2],
|
||||
&settings->simulcastStream[2], temporal_layer_profile[2]);
|
||||
settings->codecSpecific.VP8.resilience = kResilientStream;
|
||||
settings->codecSpecific.VP8.denoisingOn = true;
|
||||
settings->codecSpecific.VP8.errorConcealmentOn = false;
|
||||
@ -317,9 +299,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void SetUp() {
|
||||
SetUpCodec(kDefaultTemporalLayerProfile);
|
||||
}
|
||||
virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
|
||||
|
||||
virtual void SetUpCodec(const int* temporal_layer_profile) {
|
||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
|
||||
@ -328,8 +308,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
|
||||
int half_width = (kDefaultWidth + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
|
||||
kDefaultWidth, half_width, half_width);
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
|
||||
half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
@ -347,28 +327,34 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
ASSERT_GE(expected_video_streams, 0);
|
||||
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
||||
if (expected_video_streams >= 1) {
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), _, _)
|
||||
)
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
if (expected_video_streams >= 2) {
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), _, _)
|
||||
)
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
if (expected_video_streams >= 3) {
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight)), _, _))
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
@ -482,8 +468,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingOneStreamTwoMaxedOut() {
|
||||
// We are just below limit of sending third stream, so we should get
|
||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] - 1, 30);
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
@ -496,8 +482,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
|
||||
void TestSendAllStreams() {
|
||||
// We have just enough to send all streams.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2], 30);
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
@ -510,8 +496,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
|
||||
void TestDisablingStreams() {
|
||||
// We should get three media streams.
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
|
||||
kMaxBitrates[2], 30);
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
@ -522,8 +507,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] / 2, 30);
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
@ -542,16 +527,16 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] / 2, 30);
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should get all three streams.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kTargetBitrates[2], 30);
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
@ -583,13 +568,13 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
|
||||
// The for loop above did not set the bitrate of the highest layer.
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
|
||||
maxBitrate = 0;
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
|
||||
.maxBitrate = 0;
|
||||
// The highest layer has to correspond to the non-simulcast resolution.
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
|
||||
width = settings_.width;
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
|
||||
height = settings_.height;
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
|
||||
settings_.width;
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
|
||||
settings_.height;
|
||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||
|
||||
// Encode one frame and verify.
|
||||
@ -625,13 +610,9 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestSwitchingToOneStream() {
|
||||
SwitchingToOneStream(1024, 768);
|
||||
}
|
||||
void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
|
||||
|
||||
void TestSwitchingToOneOddStream() {
|
||||
SwitchingToOneStream(1023, 769);
|
||||
}
|
||||
void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
|
||||
|
||||
void TestRPSIEncoder() {
|
||||
Vp8TestEncodedImageCallback encoder_callback;
|
||||
@ -782,67 +763,55 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
||||
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
|
||||
int expected_temporal_idx[3] = { -1, -1, -1};
|
||||
int expected_temporal_idx[3] = {-1, -1, -1};
|
||||
bool expected_layer_sync[3] = {false, false, false};
|
||||
|
||||
// First frame: #0.
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
}
|
||||
|
||||
// Test the layer pattern and sync flag for various spatial-temporal patterns.
|
||||
@ -863,67 +832,55 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
||||
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
|
||||
int expected_temporal_idx[3] = { -1, -1, -1};
|
||||
int expected_temporal_idx[3] = {-1, -1, -1};
|
||||
bool expected_layer_sync[3] = {false, false, false};
|
||||
|
||||
// First frame: #0.
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
|
||||
expected_temporal_idx,
|
||||
expected_layer_sync,
|
||||
3);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
}
|
||||
|
||||
void TestStrideEncodeDecode() {
|
||||
@ -937,8 +894,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// 1. stride > width 2. stride_y != stride_uv/2
|
||||
int stride_y = kDefaultWidth + 20;
|
||||
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
|
||||
stride_y, stride_uv, stride_uv);
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
|
||||
stride_uv, stride_uv);
|
||||
// Set color.
|
||||
int plane_offset[kNumOfPlanes];
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
@ -968,8 +925,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestSkipEncodingUnusedStreams() {
|
||||
SkipEncodingUnusedStreamsTest test;
|
||||
std::vector<unsigned int> configured_bitrate =
|
||||
test.RunTest(encoder_.get(),
|
||||
&settings_,
|
||||
test.RunTest(encoder_.get(), &settings_,
|
||||
1); // Target bit rate 1, to force all streams but the
|
||||
// base one to be exceeding bandwidth constraints.
|
||||
EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
|
||||
@ -980,8 +936,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
int stream = 0;
|
||||
for (std::vector<unsigned int>::const_iterator it =
|
||||
configured_bitrate.begin();
|
||||
it != configured_bitrate.end();
|
||||
++it) {
|
||||
it != configured_bitrate.end(); ++it) {
|
||||
if (stream == 0) {
|
||||
EXPECT_EQ(min_bitrate, *it);
|
||||
} else {
|
||||
|
@ -32,4 +32,3 @@ class VP8EncoderFactoryConfig {
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
|
||||
|
||||
|
@ -68,8 +68,7 @@ std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
|
||||
std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
|
||||
// Allocate min -> target bitrates as long as we have bitrate to spend.
|
||||
size_t last_active_stream = 0;
|
||||
for (size_t i = 0;
|
||||
i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
|
||||
for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
|
||||
bitrate_to_allocate_kbps >=
|
||||
static_cast<int>(codec.simulcastStream[i].minBitrate);
|
||||
++i) {
|
||||
@ -132,7 +131,7 @@ bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int NumStreamsDisabled(std::vector<bool>& streams) {
|
||||
int NumStreamsDisabled(const std::vector<bool>& streams) {
|
||||
int num_disabled = 0;
|
||||
for (bool stream : streams) {
|
||||
if (!stream)
|
||||
@ -183,7 +182,7 @@ int VP8EncoderImpl::Release() {
|
||||
|
||||
while (!encoded_images_.empty()) {
|
||||
EncodedImage& image = encoded_images_.back();
|
||||
delete [] image._buffer;
|
||||
delete[] image._buffer;
|
||||
encoded_images_.pop_back();
|
||||
}
|
||||
while (!encoders_.empty()) {
|
||||
@ -289,10 +288,8 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
|
||||
target_bitrate = tl0_bitrate;
|
||||
}
|
||||
configurations_[i].rc_target_bitrate = target_bitrate;
|
||||
temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
|
||||
max_bitrate,
|
||||
framerate,
|
||||
&configurations_[i]);
|
||||
temporal_layers_[stream_idx]->ConfigureBitrates(
|
||||
target_bitrate, max_bitrate, framerate, &configurations_[i]);
|
||||
if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
@ -334,7 +331,8 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
|
||||
for (int i = 0; i < num_streams; ++i) {
|
||||
// TODO(andresp): crash if layers is invalid.
|
||||
int layers = codec.simulcastStream[i].numberOfTemporalLayers;
|
||||
if (layers < 1) layers = 1;
|
||||
if (layers < 1)
|
||||
layers = 1;
|
||||
temporal_layers_.push_back(tl_factory.Create(layers, rand()));
|
||||
}
|
||||
}
|
||||
@ -379,12 +377,13 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
int num_temporal_layers = doing_simulcast ?
|
||||
inst->simulcastStream[0].numberOfTemporalLayers :
|
||||
inst->codecSpecific.VP8.numberOfTemporalLayers;
|
||||
int num_temporal_layers =
|
||||
doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
|
||||
: inst->codecSpecific.VP8.numberOfTemporalLayers;
|
||||
|
||||
// TODO(andresp): crash if num temporal layers is bananas.
|
||||
if (num_temporal_layers < 1) num_temporal_layers = 1;
|
||||
if (num_temporal_layers < 1)
|
||||
num_temporal_layers = 1;
|
||||
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
|
||||
|
||||
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
|
||||
@ -414,7 +413,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
int idx = number_of_streams - 1;
|
||||
for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
|
||||
int gcd = GCD(inst->simulcastStream[idx].width,
|
||||
inst->simulcastStream[idx-1].width);
|
||||
inst->simulcastStream[idx - 1].width);
|
||||
downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
|
||||
downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
|
||||
send_stream_[i] = false;
|
||||
@ -426,20 +425,20 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
}
|
||||
for (int i = 0; i < number_of_streams; ++i) {
|
||||
// Random start, 16 bits is enough.
|
||||
picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;
|
||||
picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
|
||||
last_key_frame_picture_id_[i] = -1;
|
||||
// allocate memory for encoded image
|
||||
if (encoded_images_[i]._buffer != NULL) {
|
||||
delete [] encoded_images_[i]._buffer;
|
||||
delete[] encoded_images_[i]._buffer;
|
||||
}
|
||||
encoded_images_[i]._size = CalcBufferSize(kI420,
|
||||
codec_.width, codec_.height);
|
||||
encoded_images_[i]._size =
|
||||
CalcBufferSize(kI420, codec_.width, codec_.height);
|
||||
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
|
||||
encoded_images_[i]._completeFrame = true;
|
||||
}
|
||||
// populate encoder configuration with default values
|
||||
if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(),
|
||||
&configurations_[0], 0)) {
|
||||
if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
|
||||
0)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
// setting the time base of the codec
|
||||
@ -463,8 +462,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
break;
|
||||
case kResilientFrames:
|
||||
#ifdef INDEPENDENT_PARTITIONS
|
||||
configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
|
||||
VPX_ERROR_RESILIENT_PARTITIONS;
|
||||
configurations_[0] - g_error_resilient =
|
||||
VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
|
||||
break;
|
||||
#else
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
|
||||
@ -540,20 +539,18 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
|
||||
// Determine number of threads based on the image size and #cores.
|
||||
// TODO(fbarchard): Consider number of Simulcast layers.
|
||||
configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w,
|
||||
configurations_[0].g_h,
|
||||
number_of_cores);
|
||||
configurations_[0].g_threads = NumberOfThreads(
|
||||
configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
|
||||
|
||||
// Creating a wrapper to the image - setting image data to NULL.
|
||||
// Actual pointer will be set in encode. Setting align to 1, as it
|
||||
// is meaningless (no memory allocation is done here).
|
||||
vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height,
|
||||
1, NULL);
|
||||
vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
|
||||
NULL);
|
||||
|
||||
if (encoders_.size() == 1) {
|
||||
configurations_[0].rc_target_bitrate = inst->startBitrate;
|
||||
temporal_layers_[0]->ConfigureBitrates(inst->startBitrate,
|
||||
inst->maxBitrate,
|
||||
temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
|
||||
inst->maxFramerate,
|
||||
&configurations_[0]);
|
||||
} else {
|
||||
@ -645,20 +642,15 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
|
||||
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
|
||||
|
||||
if (encoders_.size() > 1) {
|
||||
int error = vpx_codec_enc_init_multi(&encoders_[0],
|
||||
vpx_codec_vp8_cx(),
|
||||
&configurations_[0],
|
||||
encoders_.size(),
|
||||
flags,
|
||||
&downsampling_factors_[0]);
|
||||
int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
|
||||
&configurations_[0], encoders_.size(),
|
||||
flags, &downsampling_factors_[0]);
|
||||
if (error) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
} else {
|
||||
if (vpx_codec_enc_init(&encoders_[0],
|
||||
vpx_codec_vp8_cx(),
|
||||
&configurations_[0],
|
||||
flags)) {
|
||||
if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
|
||||
&configurations_[0], flags)) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
}
|
||||
@ -675,13 +667,13 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
|
||||
#else
|
||||
denoiser_state = kDenoiserOnAdaptive;
|
||||
#endif
|
||||
vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
|
||||
codec_.codecSpecific.VP8.denoisingOn ?
|
||||
denoiser_state : kDenoiserOff);
|
||||
vpx_codec_control(
|
||||
&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
|
||||
codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
|
||||
if (encoders_.size() > 2) {
|
||||
vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
|
||||
codec_.codecSpecific.VP8.denoisingOn ?
|
||||
denoiser_state : kDenoiserOff);
|
||||
vpx_codec_control(
|
||||
&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
|
||||
codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
|
||||
}
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
// Allow more screen content to be detected as static.
|
||||
@ -714,7 +706,7 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
|
||||
|
||||
// Don't go below 3 times the per frame bandwidth.
|
||||
const uint32_t minIntraTh = 300;
|
||||
return (targetPct < minIntraTh) ? minIntraTh: targetPct;
|
||||
return (targetPct < minIntraTh) ? minIntraTh : targetPct;
|
||||
}
|
||||
|
||||
int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
@ -762,17 +754,17 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
for (size_t i = 1; i < encoders_.size(); ++i) {
|
||||
// Scale the image down a number of times by downsampling factor
|
||||
libyuv::I420Scale(
|
||||
raw_images_[i-1].planes[VPX_PLANE_Y],
|
||||
raw_images_[i-1].stride[VPX_PLANE_Y],
|
||||
raw_images_[i-1].planes[VPX_PLANE_U],
|
||||
raw_images_[i-1].stride[VPX_PLANE_U],
|
||||
raw_images_[i-1].planes[VPX_PLANE_V],
|
||||
raw_images_[i-1].stride[VPX_PLANE_V],
|
||||
raw_images_[i-1].d_w, raw_images_[i-1].d_h,
|
||||
raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y],
|
||||
raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U],
|
||||
raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V],
|
||||
raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear);
|
||||
raw_images_[i - 1].planes[VPX_PLANE_Y],
|
||||
raw_images_[i - 1].stride[VPX_PLANE_Y],
|
||||
raw_images_[i - 1].planes[VPX_PLANE_U],
|
||||
raw_images_[i - 1].stride[VPX_PLANE_U],
|
||||
raw_images_[i - 1].planes[VPX_PLANE_V],
|
||||
raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
|
||||
raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
|
||||
raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
|
||||
raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
|
||||
raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
|
||||
raw_images_[i].d_h, libyuv::kFilterBilinear);
|
||||
}
|
||||
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
@ -807,8 +799,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
if (send_key_frame) {
|
||||
// Adapt the size of the key frame when in screenshare with 1 temporal
|
||||
// layer.
|
||||
if (encoders_.size() == 1 && codec_.mode == kScreensharing
|
||||
&& codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
|
||||
if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
|
||||
codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
|
||||
const uint32_t forceKeyFrameIntraTh = 100;
|
||||
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
|
||||
forceKeyFrameIntraTh);
|
||||
@ -825,8 +817,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
// Handle RPSI and SLI messages and set up the appropriate encode flags.
|
||||
bool sendRefresh = false;
|
||||
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
|
||||
rps_.ReceivedRPSI(
|
||||
codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
|
||||
rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
|
||||
}
|
||||
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
|
||||
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
|
||||
@ -878,8 +869,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
}
|
||||
|
||||
vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
|
||||
vpx_codec_control(&encoders_[i],
|
||||
VP8E_SET_TEMPORAL_LAYER_ID,
|
||||
vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
|
||||
temporal_layers_[stream_idx]->CurrentLayerId());
|
||||
}
|
||||
// TODO(holmer): Ideally the duration should be the timestamp diff of this
|
||||
@ -915,8 +905,7 @@ int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
|
||||
codec_.simulcastStream[0].height = input_image.height();
|
||||
}
|
||||
// Update the cpu_speed setting for resolution change.
|
||||
vpx_codec_control(&(encoders_[0]),
|
||||
VP8E_SET_CPUUSED,
|
||||
vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
|
||||
SetCpuSpeed(codec_.width, codec_.height));
|
||||
raw_images_[0].w = codec_.width;
|
||||
raw_images_[0].h = codec_.height;
|
||||
@ -949,13 +938,12 @@ void VP8EncoderImpl::PopulateCodecSpecific(
|
||||
}
|
||||
vp8Info->simulcastIdx = stream_idx;
|
||||
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
|
||||
vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ?
|
||||
true : false;
|
||||
vp8Info->nonReference =
|
||||
(pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
|
||||
bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
|
||||
only_predicting_from_key_frame;
|
||||
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
|
||||
vp8Info,
|
||||
timestamp);
|
||||
vp8Info, timestamp);
|
||||
// Prepare next.
|
||||
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
|
||||
}
|
||||
@ -975,18 +963,17 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
|
||||
RTPFragmentationHeader frag_info;
|
||||
// token_partitions_ is number of bits used.
|
||||
frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
|
||||
+ 1);
|
||||
frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
|
||||
1);
|
||||
CodecSpecificInfo codec_specific;
|
||||
const vpx_codec_cx_pkt_t *pkt = NULL;
|
||||
while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx],
|
||||
&iter)) != NULL) {
|
||||
const vpx_codec_cx_pkt_t* pkt = NULL;
|
||||
while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
|
||||
NULL) {
|
||||
switch (pkt->kind) {
|
||||
case VPX_CODEC_CX_FRAME_PKT: {
|
||||
uint32_t length = encoded_images_[encoder_idx]._length;
|
||||
memcpy(&encoded_images_[encoder_idx]._buffer[length],
|
||||
pkt->data.frame.buf,
|
||||
pkt->data.frame.sz);
|
||||
pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
frag_info.fragmentationOffset[part_idx] = length;
|
||||
frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
|
||||
frag_info.fragmentationPlType[part_idx] = 0; // not known here
|
||||
@ -1065,7 +1052,6 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback(
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
VP8DecoderImpl::VP8DecoderImpl()
|
||||
: decode_complete_callback_(NULL),
|
||||
inited_(false),
|
||||
@ -1077,8 +1063,7 @@ VP8DecoderImpl::VP8DecoderImpl()
|
||||
propagation_cnt_(-1),
|
||||
last_frame_width_(0),
|
||||
last_frame_height_(0),
|
||||
key_frame_required_(true) {
|
||||
}
|
||||
key_frame_required_(true) {}
|
||||
|
||||
VP8DecoderImpl::~VP8DecoderImpl() {
|
||||
inited_ = true; // in order to do the actual release
|
||||
@ -1094,8 +1079,7 @@ int VP8DecoderImpl::Reset() {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
|
||||
int number_of_cores) {
|
||||
int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
int ret_val = Release();
|
||||
if (ret_val < 0) {
|
||||
return ret_val;
|
||||
@ -1111,7 +1095,7 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
|
||||
cfg.threads = 1;
|
||||
cfg.h = cfg.w = 0; // set after decode
|
||||
|
||||
vpx_codec_flags_t flags = 0;
|
||||
vpx_codec_flags_t flags = 0;
|
||||
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
|
||||
flags = VPX_CODEC_USE_POSTPROC;
|
||||
#ifdef INDEPENDENT_PARTITIONS
|
||||
@ -1244,7 +1228,7 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
|
||||
const uint32_t bytes_to_copy = input_image._length;
|
||||
if (last_keyframe_._size < bytes_to_copy) {
|
||||
delete [] last_keyframe_._buffer;
|
||||
delete[] last_keyframe_._buffer;
|
||||
last_keyframe_._buffer = NULL;
|
||||
last_keyframe_._size = 0;
|
||||
}
|
||||
@ -1302,7 +1286,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
if (picture_id > -1) {
|
||||
if (((reference_updates & VP8_GOLD_FRAME) ||
|
||||
(reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
|
||||
(reference_updates & VP8_ALTR_FRAME)) &&
|
||||
!corrupted) {
|
||||
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
|
||||
}
|
||||
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
|
||||
@ -1325,14 +1310,10 @@ int VP8DecoderImpl::DecodePartitions(
|
||||
const EncodedImage& input_image,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
|
||||
const uint8_t* partition = input_image._buffer +
|
||||
fragmentation->fragmentationOffset[i];
|
||||
const uint32_t partition_length =
|
||||
fragmentation->fragmentationLength[i];
|
||||
if (vpx_codec_decode(decoder_,
|
||||
partition,
|
||||
partition_length,
|
||||
0,
|
||||
const uint8_t* partition =
|
||||
input_image._buffer + fragmentation->fragmentationOffset[i];
|
||||
const uint32_t partition_length = fragmentation->fragmentationLength[i];
|
||||
if (vpx_codec_decode(decoder_, partition, partition_length, 0,
|
||||
VPX_DL_REALTIME)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
@ -1356,8 +1337,7 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
// Allocate memory for decoded image.
|
||||
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
|
||||
timestamp, 0, kVideoRotation_0);
|
||||
libyuv::I420Copy(
|
||||
img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
|
||||
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
|
||||
decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
|
||||
@ -1382,7 +1362,7 @@ int VP8DecoderImpl::RegisterDecodeCompleteCallback(
|
||||
|
||||
int VP8DecoderImpl::Release() {
|
||||
if (last_keyframe_._buffer != NULL) {
|
||||
delete [] last_keyframe_._buffer;
|
||||
delete[] last_keyframe_._buffer;
|
||||
last_keyframe_._buffer = NULL;
|
||||
}
|
||||
if (decoder_ != NULL) {
|
||||
@ -1409,12 +1389,12 @@ const char* VP8DecoderImpl::ImplementationName() const {
|
||||
int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
|
||||
// The type of frame to copy should be set in ref_frame_->frame_type
|
||||
// before the call to this function.
|
||||
if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
|
||||
!= VPX_CODEC_OK) {
|
||||
if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
|
||||
VPX_CODEC_OK) {
|
||||
return -1;
|
||||
}
|
||||
if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
|
||||
!= VPX_CODEC_OK) {
|
||||
if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
|
||||
VPX_CODEC_OK) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -61,7 +61,8 @@ class VP8EncoderImpl : public VP8Encoder {
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
private:
|
||||
void SetupTemporalLayers(int num_streams, int num_temporal_layers,
|
||||
void SetupTemporalLayers(int num_streams,
|
||||
int num_temporal_layers,
|
||||
const VideoCodec& codec);
|
||||
|
||||
// Set the cpu_speed setting for encoder based on resolution and/or platform.
|
||||
@ -169,4 +170,3 @@ class VP8DecoderImpl : public VP8Decoder {
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
@ -23,8 +23,7 @@
|
||||
class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
|
||||
public:
|
||||
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
|
||||
: encoded_file_(encoded_file),
|
||||
encoded_bytes_(0) {}
|
||||
: encoded_file_(encoded_file), encoded_bytes_(0) {}
|
||||
~Vp8SequenceCoderEncodeCallback();
|
||||
int Encoded(const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
@ -32,6 +31,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
|
||||
// Returns the encoded image.
|
||||
webrtc::EncodedImage encoded_image() { return encoded_image_; }
|
||||
size_t encoded_bytes() { return encoded_bytes_; }
|
||||
|
||||
private:
|
||||
webrtc::EncodedImage encoded_image_;
|
||||
FILE* encoded_file_;
|
||||
@ -39,7 +39,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
|
||||
};
|
||||
|
||||
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
|
||||
delete [] encoded_image_._buffer;
|
||||
delete[] encoded_image_._buffer;
|
||||
encoded_image_._buffer = NULL;
|
||||
}
|
||||
int Vp8SequenceCoderEncodeCallback::Encoded(
|
||||
@ -47,7 +47,7 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) {
|
||||
if (encoded_image_._size < encoded_image._size) {
|
||||
delete [] encoded_image_._buffer;
|
||||
delete[] encoded_image_._buffer;
|
||||
encoded_image_._buffer = NULL;
|
||||
encoded_image_._buffer = new uint8_t[encoded_image._size];
|
||||
encoded_image_._size = encoded_image._size;
|
||||
@ -72,7 +72,7 @@ class Vp8SequenceCoderDecodeCallback : public webrtc::DecodedImageCallback {
|
||||
int32_t Decoded(webrtc::VideoFrame& frame) override;
|
||||
int32_t Decoded(webrtc::VideoFrame& frame, int64_t decode_time_ms) override {
|
||||
RTC_NOTREACHED();
|
||||
return -1;;
|
||||
return -1;
|
||||
}
|
||||
bool DecodeComplete();
|
||||
|
||||
@ -85,16 +85,16 @@ int Vp8SequenceCoderDecodeCallback::Decoded(webrtc::VideoFrame& image) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int SequenceCoder(webrtc::test::CommandLineParser& parser) {
|
||||
int width = strtol((parser.GetFlag("w")).c_str(), NULL, 10);
|
||||
int height = strtol((parser.GetFlag("h")).c_str(), NULL, 10);
|
||||
int framerate = strtol((parser.GetFlag("f")).c_str(), NULL, 10);
|
||||
int SequenceCoder(webrtc::test::CommandLineParser* parser) {
|
||||
int width = strtol((parser->GetFlag("w")).c_str(), NULL, 10);
|
||||
int height = strtol((parser->GetFlag("h")).c_str(), NULL, 10);
|
||||
int framerate = strtol((parser->GetFlag("f")).c_str(), NULL, 10);
|
||||
|
||||
if (width <= 0 || height <= 0 || framerate <= 0) {
|
||||
fprintf(stderr, "Error: Resolution cannot be <= 0!\n");
|
||||
return -1;
|
||||
}
|
||||
int target_bitrate = strtol((parser.GetFlag("b")).c_str(), NULL, 10);
|
||||
int target_bitrate = strtol((parser->GetFlag("b")).c_str(), NULL, 10);
|
||||
if (target_bitrate <= 0) {
|
||||
fprintf(stderr, "Error: Bit-rate cannot be <= 0!\n");
|
||||
return -1;
|
||||
@ -102,20 +102,20 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
|
||||
|
||||
// SetUp
|
||||
// Open input file.
|
||||
std::string encoded_file_name = parser.GetFlag("encoded_file");
|
||||
std::string encoded_file_name = parser->GetFlag("encoded_file");
|
||||
FILE* encoded_file = fopen(encoded_file_name.c_str(), "wb");
|
||||
if (encoded_file == NULL) {
|
||||
fprintf(stderr, "Error: Cannot open encoded file\n");
|
||||
return -1;
|
||||
}
|
||||
std::string input_file_name = parser.GetFlag("input_file");
|
||||
std::string input_file_name = parser->GetFlag("input_file");
|
||||
FILE* input_file = fopen(input_file_name.c_str(), "rb");
|
||||
if (input_file == NULL) {
|
||||
fprintf(stderr, "Error: Cannot open input file\n");
|
||||
return -1;
|
||||
}
|
||||
// Open output file.
|
||||
std::string output_file_name = parser.GetFlag("output_file");
|
||||
std::string output_file_name = parser->GetFlag("output_file");
|
||||
FILE* output_file = fopen(output_file_name.c_str(), "wb");
|
||||
if (output_file == NULL) {
|
||||
fprintf(stderr, "Error: Cannot open output file\n");
|
||||
@ -123,8 +123,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
|
||||
}
|
||||
|
||||
// Get range of frames: will encode num_frames following start_frame).
|
||||
int start_frame = strtol((parser.GetFlag("start_frame")).c_str(), NULL, 10);
|
||||
int num_frames = strtol((parser.GetFlag("num_frames")).c_str(), NULL, 10);
|
||||
int start_frame = strtol((parser->GetFlag("start_frame")).c_str(), NULL, 10);
|
||||
int num_frames = strtol((parser->GetFlag("num_frames")).c_str(), NULL, 10);
|
||||
|
||||
// Codec SetUp.
|
||||
webrtc::VideoCodec inst;
|
||||
@ -185,19 +185,21 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
|
||||
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
|
||||
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
|
||||
input_file_name.c_str(), output_file_name.c_str(),
|
||||
inst.width, inst.height,
|
||||
&psnr_result, &ssim_result));
|
||||
inst.width, inst.height, &psnr_result, &ssim_result));
|
||||
printf("PSNR avg: %f[dB], min: %f[dB]\nSSIM avg: %f, min: %f\n",
|
||||
psnr_result.average, psnr_result.min,
|
||||
ssim_result.average, ssim_result.min);
|
||||
psnr_result.average, psnr_result.min, ssim_result.average,
|
||||
ssim_result.min);
|
||||
return frame_cnt;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
std::string program_name = argv[0];
|
||||
std::string usage = "Encode and decodes a video sequence, and writes"
|
||||
std::string usage =
|
||||
"Encode and decodes a video sequence, and writes"
|
||||
"results to a file.\n"
|
||||
"Example usage:\n" + program_name + " functionality"
|
||||
"Example usage:\n" +
|
||||
program_name +
|
||||
" functionality"
|
||||
" --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
|
||||
" Command line flags:\n"
|
||||
" - width(int): The width of the input file. Default: 352\n"
|
||||
@ -228,8 +230,8 @@ int main(int argc, char** argv) {
|
||||
parser.SetFlag("output_file", webrtc::test::OutputPath() + "vp8_decoded.yuv");
|
||||
parser.SetFlag("encoded_file",
|
||||
webrtc::test::OutputPath() + "vp8_encoded.vp8");
|
||||
parser.SetFlag("input_file", webrtc::test::ResourcePath("foreman_cif",
|
||||
"yuv"));
|
||||
parser.SetFlag("input_file",
|
||||
webrtc::test::ResourcePath("foreman_cif", "yuv"));
|
||||
parser.SetFlag("help", "false");
|
||||
|
||||
parser.ProcessFlags();
|
||||
@ -239,5 +241,5 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
parser.PrintEnteredFlags();
|
||||
|
||||
return SequenceCoder(parser);
|
||||
return SequenceCoder(&parser);
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ class VP9Encoder : public VideoEncoder {
|
||||
virtual ~VP9Encoder() {}
|
||||
};
|
||||
|
||||
|
||||
class VP9Decoder : public VideoDecoder {
|
||||
public:
|
||||
static VP9Decoder* Create();
|
||||
|
@ -54,7 +54,7 @@ VP9Encoder* VP9Encoder::Create() {
|
||||
|
||||
void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
|
||||
void* user_data) {
|
||||
VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data);
|
||||
VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data);
|
||||
enc->GetEncodedLayerFrame(pkt);
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ VP9EncoderImpl::~VP9EncoderImpl() {
|
||||
|
||||
int VP9EncoderImpl::Release() {
|
||||
if (encoded_image_._buffer != NULL) {
|
||||
delete [] encoded_image_._buffer;
|
||||
delete[] encoded_image_._buffer;
|
||||
encoded_image_._buffer = NULL;
|
||||
}
|
||||
if (encoder_ != NULL) {
|
||||
@ -267,10 +267,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
num_temporal_layers_ = 1;
|
||||
|
||||
// Random start 16 bits is enough.
|
||||
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
|
||||
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
|
||||
// Allocate memory for encoded image
|
||||
if (encoded_image_._buffer != NULL) {
|
||||
delete [] encoded_image_._buffer;
|
||||
delete[] encoded_image_._buffer;
|
||||
}
|
||||
encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
|
||||
encoded_image_._buffer = new uint8_t[encoded_image_._size];
|
||||
@ -278,8 +278,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
// Creating a wrapper to the image - setting image data to NULL. Actual
|
||||
// pointer will be set in encode. Setting align to 1, as it is meaningless
|
||||
// (actual memory is not allocated).
|
||||
raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
|
||||
1, NULL);
|
||||
raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1,
|
||||
NULL);
|
||||
// Populate encoder configuration with default values.
|
||||
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
@ -294,8 +294,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
config_->g_lag_in_frames = 0; // 0- no frame lagging
|
||||
config_->g_threads = 1;
|
||||
// Rate control settings.
|
||||
config_->rc_dropframe_thresh = inst->codecSpecific.VP9.frameDroppingOn ?
|
||||
30 : 0;
|
||||
config_->rc_dropframe_thresh =
|
||||
inst->codecSpecific.VP9.frameDroppingOn ? 30 : 0;
|
||||
config_->rc_end_usage = VPX_CBR;
|
||||
config_->g_pass = VPX_RC_ONE_PASS;
|
||||
config_->rc_min_quantizer = 2;
|
||||
@ -316,12 +316,11 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
} else {
|
||||
config_->kf_mode = VPX_KF_DISABLED;
|
||||
}
|
||||
config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ?
|
||||
1 : 0;
|
||||
config_->rc_resize_allowed =
|
||||
inst->codecSpecific.VP9.automaticResizeOn ? 1 : 0;
|
||||
// Determine number of threads based on the image size and #cores.
|
||||
config_->g_threads = NumberOfThreads(config_->g_w,
|
||||
config_->g_h,
|
||||
number_of_cores);
|
||||
config_->g_threads =
|
||||
NumberOfThreads(config_->g_w, config_->g_h, number_of_cores);
|
||||
|
||||
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
|
||||
|
||||
@ -365,7 +364,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
tl0_pic_idx_ = static_cast<uint8_t>(rand());
|
||||
tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
|
||||
|
||||
return InitAndSetControlSettings(inst);
|
||||
}
|
||||
@ -432,8 +431,10 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
|
||||
}
|
||||
// Register callback for getting each spatial layer.
|
||||
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
|
||||
VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)};
|
||||
vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp));
|
||||
VP9EncoderImpl::EncoderOutputCodedPacketCallback,
|
||||
reinterpret_cast<void*>(this)};
|
||||
vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
|
||||
reinterpret_cast<void*>(&cbp));
|
||||
|
||||
// Control function to set the number of column tiles in encoding a frame, in
|
||||
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
|
||||
@ -468,7 +469,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
|
||||
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
|
||||
// Don't go below 3 times the per frame bandwidth.
|
||||
const uint32_t min_intra_size = 300;
|
||||
return (target_pct < min_intra_size) ? min_intra_size: target_pct;
|
||||
return (target_pct < min_intra_size) ? min_intra_size : target_pct;
|
||||
}
|
||||
|
||||
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
@ -551,7 +552,7 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
uint32_t timestamp) {
|
||||
assert(codec_specific != NULL);
|
||||
codec_specific->codecType = kVideoCodecVP9;
|
||||
CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9);
|
||||
CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
|
||||
// TODO(asapersson): Set correct value.
|
||||
vp9_info->inter_pic_predicted =
|
||||
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true;
|
||||
@ -910,10 +911,8 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
|
||||
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
|
||||
if (vpx_codec_decode(decoder_,
|
||||
buffer,
|
||||
static_cast<unsigned int>(input_image._length),
|
||||
0,
|
||||
if (vpx_codec_decode(decoder_, buffer,
|
||||
static_cast<unsigned int>(input_image._length), 0,
|
||||
VPX_DL_REALTIME)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
@ -943,10 +942,10 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
|
||||
// using a WrappedI420Buffer.
|
||||
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
|
||||
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
|
||||
img->d_w, img->d_h,
|
||||
img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
|
||||
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
|
||||
img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
|
||||
img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
|
||||
img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
|
||||
img->stride[VPX_PLANE_V],
|
||||
// WrappedI420Buffer's mechanism for allowing the release of its frame
|
||||
// buffer is through a callback function. This is where we should
|
||||
// release |img_buffer|.
|
||||
|
@ -9,8 +9,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
|
||||
@ -129,7 +131,6 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
rtc::scoped_ptr<ScreenshareLayersVP9> spatial_layer_;
|
||||
};
|
||||
|
||||
|
||||
class VP9DecoderImpl : public VP9Decoder {
|
||||
public:
|
||||
VP9DecoderImpl();
|
||||
@ -165,4 +166,4 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
|
||||
|
Reference in New Issue
Block a user