Move src/ -> webrtc/

TBR=niklas.enbom@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/915006

git-svn-id: http://webrtc.googlecode.com/svn/trunk@2963 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2012-10-22 18:19:23 +00:00
parent 24a419c0c7
commit 14b43beb7c
1888 changed files with 23 additions and 23 deletions

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include <string>
#include "common_video/interface/video_image.h"
#include "gmock/gmock.h"
#include "typedefs.h"
namespace webrtc {
namespace test {
class MockPacketManipulator : public PacketManipulator {
public:
MOCK_METHOD1(ManipulatePackets, int(webrtc::EncodedImage* encoded_image));
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include <cassert>
#include <cstdio>
namespace webrtc {
namespace test {
PacketManipulatorImpl::PacketManipulatorImpl(PacketReader* packet_reader,
const NetworkingConfig& config,
bool verbose)
: packet_reader_(packet_reader),
config_(config),
active_burst_packets_(0),
critsect_(CriticalSectionWrapper::CreateCriticalSection()),
random_seed_(1),
verbose_(verbose) {
assert(packet_reader);
}
PacketManipulatorImpl::~PacketManipulatorImpl() {
delete critsect_;
}
int PacketManipulatorImpl::ManipulatePackets(
webrtc::EncodedImage* encoded_image) {
assert(encoded_image);
int nbr_packets_dropped = 0;
// There's no need to build a copy of the image data since viewing an
// EncodedImage object, setting the length to a new lower value represents
// that everything is dropped after that position in the byte array.
// EncodedImage._size is the allocated bytes.
// EncodedImage._length is how many that are filled with data.
int new_length = 0;
packet_reader_->InitializeReading(encoded_image->_buffer,
encoded_image->_length,
config_.packet_size_in_bytes);
WebRtc_UWord8* packet = NULL;
int nbr_bytes_to_read;
// keep track of if we've lost any packets, since then we shall loose
// the remains of the current frame:
bool packet_loss_has_occurred = false;
while ((nbr_bytes_to_read = packet_reader_->NextPacket(&packet)) > 0) {
// Check if we're currently in a packet loss burst that is not completed:
if (active_burst_packets_ > 0) {
active_burst_packets_--;
nbr_packets_dropped++;
} else if (RandomUniform() < config_.packet_loss_probability ||
packet_loss_has_occurred) {
packet_loss_has_occurred = true;
nbr_packets_dropped++;
if (config_.packet_loss_mode == kBurst) {
// Initiate a new burst
active_burst_packets_ = config_.packet_loss_burst_length - 1;
}
} else {
new_length += nbr_bytes_to_read;
}
}
encoded_image->_length = new_length;
if (nbr_packets_dropped > 0) {
// Must set completeFrame to false to inform the decoder about this:
encoded_image->_completeFrame = false;
if (verbose_) {
printf("Dropped %d packets for frame %d (frame length: %d)\n",
nbr_packets_dropped, encoded_image->_timeStamp,
encoded_image->_length);
}
}
return nbr_packets_dropped;
}
void PacketManipulatorImpl::InitializeRandomSeed(unsigned int seed) {
random_seed_ = seed;
}
inline double PacketManipulatorImpl::RandomUniform() {
// Use the previous result as new seed before each rand() call. Doing this
// it doesn't matter if other threads are calling rand() since we'll always
// get the same behavior as long as we're using a fixed initial seed.
critsect_->Enter();
srand(random_seed_);
random_seed_ = std::rand();
critsect_->Leave();
return (random_seed_ + 1.0)/(RAND_MAX + 1.0);
}
const char* PacketLossModeToStr(PacketLossMode e) {
switch (e) {
case kUniform:
return "Uniform";
case kBurst:
return "Burst";
default:
assert(false);
return "Unknown";
}
}
} // namespace test
} // namespace webrtcc

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_
#include <cstdlib>
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
#include "system_wrappers/interface/critical_section_wrapper.h"
#include "testsupport/packet_reader.h"
namespace webrtc {
namespace test {
// Which mode the packet loss shall be performed according to.
enum PacketLossMode {
// Drops packets with a configured probability independently for each packet
kUniform,
// Drops packets similar to uniform but when a packet is being dropped,
// the number of lost packets in a row is equal to the configured burst
// length.
kBurst
};
// Returns a string representation of the enum value.
const char* PacketLossModeToStr(PacketLossMode e);
// Contains configurations related to networking and simulation of
// scenarios caused by network interference.
struct NetworkingConfig {
NetworkingConfig()
: packet_size_in_bytes(1500), max_payload_size_in_bytes(1440),
packet_loss_mode(kUniform), packet_loss_probability(0.0),
packet_loss_burst_length(1) {
}
// Packet size in bytes. Default: 1500 bytes.
int packet_size_in_bytes;
// Encoder specific setting of maximum size in bytes of each payload.
// Default: 1440 bytes.
int max_payload_size_in_bytes;
// Packet loss mode. Two different packet loss models are supported:
// uniform or burst. This setting has no effect unless
// packet_loss_probability is >0.
// Default: uniform.
PacketLossMode packet_loss_mode;
// Packet loss probability. A value between 0.0 and 1.0 that defines the
// probability of a packet being lost. 0.1 means 10% and so on.
// Default: 0 (no loss).
double packet_loss_probability;
// Packet loss burst length. Defines how many packets will be lost in a burst
// when a packet has been decided to be lost. Must be >=1. Default: 1.
int packet_loss_burst_length;
};
// Class for simulating packet loss on the encoded frame data.
// When a packet loss has occurred in a frame, the remaining data in that
// frame is lost (even if burst length is only a single packet).
// TODO(kjellander): Support discarding only individual packets in the frame
// when CL 172001 has been submitted. This also requires a correct
// fragmentation header to be passed to the decoder.
//
// To get a repeatable packet drop pattern, re-initialize the random seed
// using InitializeRandomSeed before each test run.
class PacketManipulator {
public:
virtual ~PacketManipulator() {}
// Manipulates the data of the encoded_image to simulate parts being lost
// during transport.
// If packets are dropped from frame data, the completedFrame field will be
// set to false.
// Returns the number of packets being dropped.
virtual int
ManipulatePackets(webrtc::EncodedImage* encoded_image) = 0;
};
class PacketManipulatorImpl : public PacketManipulator {
public:
PacketManipulatorImpl(PacketReader* packet_reader,
const NetworkingConfig& config,
bool verbose);
virtual ~PacketManipulatorImpl();
virtual int ManipulatePackets(webrtc::EncodedImage* encoded_image);
virtual void InitializeRandomSeed(unsigned int seed);
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
private:
PacketReader* packet_reader_;
const NetworkingConfig& config_;
// Used to simulate a burst over several frames.
int active_burst_packets_;
CriticalSectionWrapper* critsect_;
unsigned int random_seed_;
bool verbose_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include <queue>
#include "gtest/gtest.h"
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
#include "modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include "testsupport/unittest_utils.h"
#include "typedefs.h"
namespace webrtc {
namespace test {
const double kNeverDropProbability = 0.0;
const double kAlwaysDropProbability = 1.0;
const int kBurstLength = 1;
class PacketManipulatorTest: public PacketRelatedTest {
protected:
PacketReader packet_reader_;
EncodedImage image_;
NetworkingConfig drop_config_;
NetworkingConfig no_drop_config_;
PacketManipulatorTest() {
image_._buffer = packet_data_;
image_._length = kPacketDataLength;
image_._size = kPacketDataLength;
drop_config_.packet_size_in_bytes = kPacketSizeInBytes;
drop_config_.packet_loss_probability = kAlwaysDropProbability;
drop_config_.packet_loss_burst_length = kBurstLength;
drop_config_.packet_loss_mode = kUniform;
no_drop_config_.packet_size_in_bytes = kPacketSizeInBytes;
no_drop_config_.packet_loss_probability = kNeverDropProbability;
no_drop_config_.packet_loss_burst_length = kBurstLength;
no_drop_config_.packet_loss_mode = kUniform;
}
virtual ~PacketManipulatorTest() {}
void SetUp() {
PacketRelatedTest::SetUp();
}
void TearDown() {
PacketRelatedTest::TearDown();
}
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
int expected_packet_data_length,
WebRtc_UWord8* expected_packet_data,
EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
EXPECT_EQ(expected_packet_data_length, static_cast<int>(image_._length));
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
expected_packet_data_length));
}
};
TEST_F(PacketManipulatorTest, Constructor) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
}
TEST_F(PacketManipulatorTest, DropNone) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength,
packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
int data_length = 400; // smaller than the packet size
image_._length = data_length;
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(0, nbr_packets_dropped, data_length,
packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropAll) {
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped,
0, packet_data_, image_);
}
// Use our customized test class to make the second packet being lost
TEST_F(PacketManipulatorTest, UniformDropSinglePacket) {
drop_config_.packet_loss_probability = 0.5;
PredictivePacketManipulator manipulator(&packet_reader_, drop_config_);
manipulator.AddRandomResult(1.0);
manipulator.AddRandomResult(0.3); // less than 0.5 will cause packet loss
manipulator.AddRandomResult(1.0);
// Execute the test target method:
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
// Since we setup the predictive packet manipulator, it will throw away the
// second packet. The third packet is also lost because when we have lost one,
// the remains shall also be discarded (in the current implementation).
VerifyPacketLoss(2, nbr_packets_dropped, kPacketSizeInBytes, packet1_,
image_);
}
// Use our customized test class to make the second packet being lost
TEST_F(PacketManipulatorTest, BurstDropNinePackets) {
// Create a longer packet data structure (10 packets)
const int kNbrPackets = 10;
const int kDataLength = kPacketSizeInBytes * kNbrPackets;
WebRtc_UWord8 data[kDataLength];
WebRtc_UWord8* data_pointer = data;
// Fill with 0s, 1s and so on to be able to easily verify which were dropped:
for (int i = 0; i < kNbrPackets; ++i) {
memset(data_pointer + i * kPacketSizeInBytes, i, kPacketSizeInBytes);
}
// Overwrite the defaults from the test fixture:
image_._buffer = data;
image_._length = kDataLength;
image_._size = kDataLength;
drop_config_.packet_loss_probability = 0.5;
drop_config_.packet_loss_burst_length = 5;
drop_config_.packet_loss_mode = kBurst;
PredictivePacketManipulator manipulator(&packet_reader_, drop_config_);
manipulator.AddRandomResult(1.0);
manipulator.AddRandomResult(0.3); // less than 0.5 will cause packet loss
for (int i = 0; i < kNbrPackets - 2; ++i) {
manipulator.AddRandomResult(1.0);
}
// Execute the test target method:
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
// Should discard every packet after the first one.
VerifyPacketLoss(9, nbr_packets_dropped, kPacketSizeInBytes, data, image_);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include <cassert>
#include <cstdio>
#include "testsupport/packet_reader.h"
namespace webrtc {
namespace test {
PredictivePacketManipulator::PredictivePacketManipulator(
PacketReader* packet_reader, const NetworkingConfig& config)
: PacketManipulatorImpl(packet_reader, config, false) {
}
PredictivePacketManipulator::~PredictivePacketManipulator() {
}
void PredictivePacketManipulator::AddRandomResult(double result) {
assert(result >= 0.0 && result <= 1.0);
random_results_.push(result);
}
double PredictivePacketManipulator::RandomUniform() {
if(random_results_.size() == 0u) {
fprintf(stderr, "No more stored results, please make sure AddRandomResult()"
"is called same amount of times you're going to invoke the "
"RandomUniform() function, i.e. once per packet.\n");
assert(false);
}
double result = random_results_.front();
random_results_.pop();
return result;
}
} // namespace test
} // namespace webrtcc

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_
#include <queue>
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include "testsupport/packet_reader.h"
namespace webrtc {
namespace test {
// Predictive packet manipulator that allows for setup of the result of
// the random invocations.
class PredictivePacketManipulator : public PacketManipulatorImpl {
public:
PredictivePacketManipulator(PacketReader* packet_reader,
const NetworkingConfig& config);
virtual ~PredictivePacketManipulator();
// Adds a result. You must add at least the same number of results as the
// expected calls to the RandomUniform method. The results are added to a
// FIFO queue so they will be returned in the same order they were added.
// Result parameter must be 0.0 to 1.0.
void AddRandomResult(double result);
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
private:
std::queue<double> random_results_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/stats.h"
#include <algorithm> // min_element, max_element
#include <cassert>
#include <cstdio>
namespace webrtc {
namespace test {
Stats::Stats() {}
Stats::~Stats() {}
bool LessForEncodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
return s1.encode_time_in_us < s2.encode_time_in_us;
}
bool LessForDecodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
return s1.decode_time_in_us < s2.decode_time_in_us;
}
bool LessForEncodedSize(const FrameStatistic& s1, const FrameStatistic& s2) {
return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
}
bool LessForBitRate(const FrameStatistic& s1, const FrameStatistic& s2) {
return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
}
FrameStatistic& Stats::NewFrame(int frame_number) {
assert(frame_number >= 0);
FrameStatistic stat;
stat.frame_number = frame_number;
stats_.push_back(stat);
return stats_[frame_number];
}
void Stats::PrintSummary() {
printf("Processing summary:\n");
if (stats_.size() == 0) {
printf("No frame statistics have been logged yet.\n");
return;
}
// Calculate min, max, average and total encoding time
int total_encoding_time_in_us = 0;
int total_decoding_time_in_us = 0;
int total_encoded_frames_lengths = 0;
int total_encoded_key_frames_lengths = 0;
int total_encoded_nonkey_frames_lengths = 0;
int nbr_keyframes = 0;
int nbr_nonkeyframes = 0;
for (FrameStatisticsIterator it = stats_.begin();
it != stats_.end(); ++it) {
total_encoding_time_in_us += it->encode_time_in_us;
total_decoding_time_in_us += it->decode_time_in_us;
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
if (it->frame_type == webrtc::kKeyFrame) {
total_encoded_key_frames_lengths += it->encoded_frame_length_in_bytes;
nbr_keyframes++;
} else {
total_encoded_nonkey_frames_lengths += it->encoded_frame_length_in_bytes;
nbr_nonkeyframes++;
}
}
FrameStatisticsIterator frame;
// ENCODING
printf("Encoding time:\n");
frame = std::min_element(stats_.begin(),
stats_.end(), LessForEncodeTime);
printf(" Min : %7d us (frame %d)\n",
frame->encode_time_in_us, frame->frame_number);
frame = std::max_element(stats_.begin(),
stats_.end(), LessForEncodeTime);
printf(" Max : %7d us (frame %d)\n",
frame->encode_time_in_us, frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_encoding_time_in_us / stats_.size()));
// DECODING
printf("Decoding time:\n");
// only consider frames that were successfully decoded (packet loss may cause
// failures)
std::vector<FrameStatistic> decoded_frames;
for (std::vector<FrameStatistic>::iterator it = stats_.begin();
it != stats_.end(); ++it) {
if (it->decoding_successful) {
decoded_frames.push_back(*it);
}
}
if (decoded_frames.size() == 0) {
printf("No successfully decoded frames exist in this statistics.\n");
} else {
frame = std::min_element(decoded_frames.begin(),
decoded_frames.end(), LessForDecodeTime);
printf(" Min : %7d us (frame %d)\n",
frame->decode_time_in_us, frame->frame_number);
frame = std::max_element(decoded_frames.begin(),
decoded_frames.end(), LessForDecodeTime);
printf(" Max : %7d us (frame %d)\n",
frame->decode_time_in_us, frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_decoding_time_in_us / decoded_frames.size()));
printf(" Failures: %d frames failed to decode.\n",
static_cast<int>(stats_.size() - decoded_frames.size()));
}
// SIZE
printf("Frame sizes:\n");
frame = std::min_element(stats_.begin(),
stats_.end(), LessForEncodedSize);
printf(" Min : %7d bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
frame = std::max_element(stats_.begin(),
stats_.end(), LessForEncodedSize);
printf(" Max : %7d bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
printf(" Average : %7d bytes\n",
static_cast<int>(total_encoded_frames_lengths / stats_.size()));
if (nbr_keyframes > 0) {
printf(" Average key frame size : %7d bytes (%d keyframes)\n",
total_encoded_key_frames_lengths / nbr_keyframes,
nbr_keyframes);
}
if (nbr_nonkeyframes > 0) {
printf(" Average non-key frame size: %7d bytes (%d frames)\n",
total_encoded_nonkey_frames_lengths / nbr_nonkeyframes,
nbr_nonkeyframes);
}
// BIT RATE
printf("Bit rates:\n");
frame = std::min_element(stats_.begin(),
stats_.end(), LessForBitRate);
printf(" Min bit rate: %7d kbps (frame %d)\n",
frame->bit_rate_in_kbps, frame->frame_number);
frame = std::max_element(stats_.begin(),
stats_.end(), LessForBitRate);
printf(" Max bit rate: %7d kbps (frame %d)\n",
frame->bit_rate_in_kbps, frame->frame_number);
printf("\n");
printf("Total encoding time : %7d ms.\n",
total_encoding_time_in_us / 1000);
printf("Total decoding time : %7d ms.\n",
total_decoding_time_in_us / 1000);
printf("Total processing time: %7d ms.\n",
(total_encoding_time_in_us + total_decoding_time_in_us) / 1000);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_
#include <vector>
#include "common_video/interface/video_image.h"
namespace webrtc {
namespace test {
// Contains statistics of a single frame that has been processed.
struct FrameStatistic {
FrameStatistic() :
encoding_successful(false), decoding_successful(false),
encode_return_code(0), decode_return_code(0),
encode_time_in_us(0), decode_time_in_us(0),
frame_number(0), packets_dropped(0), total_packets(0),
bit_rate_in_kbps(0), encoded_frame_length_in_bytes(0),
frame_type(kDeltaFrame) {
};
bool encoding_successful;
bool decoding_successful;
int encode_return_code;
int decode_return_code;
int encode_time_in_us;
int decode_time_in_us;
int frame_number;
// How many packets were discarded of the encoded frame data (if any)
int packets_dropped;
int total_packets;
// Current bit rate. Calculated out of the size divided with the time
// interval per frame.
int bit_rate_in_kbps;
// Copied from EncodedImage
int encoded_frame_length_in_bytes;
webrtc::VideoFrameType frame_type;
};
// Handles statistics from a single video processing run.
// Contains calculation methods for interesting metrics from these stats.
class Stats {
public:
typedef std::vector<FrameStatistic>::iterator FrameStatisticsIterator;
Stats();
virtual ~Stats();
// Add a new statistic data object.
// The frame number must be incrementing and start at zero in order to use
// it as an index for the frame_statistics_ vector.
// Returns the newly created statistic object.
FrameStatistic& NewFrame(int frame_number);
// Prints a summary of all the statistics that have been gathered during the
// processing
void PrintSummary();
std::vector<FrameStatistic> stats_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/stats.h"
#include "gtest/gtest.h"
#include "typedefs.h"
namespace webrtc {
namespace test {
class StatsTest: public testing::Test {
protected:
StatsTest() {
}
virtual ~StatsTest() {
}
void SetUp() {
stats_ = new Stats();
}
void TearDown() {
delete stats_;
}
Stats* stats_;
};
// Test empty object
TEST_F(StatsTest, Uninitialized) {
EXPECT_EQ(0u, stats_->stats_.size());
stats_->PrintSummary(); // should not crash
}
// Add single frame stats and verify
TEST_F(StatsTest, AddOne) {
stats_->NewFrame(0u);
FrameStatistic* frameStat = &stats_->stats_[0];
EXPECT_EQ(0, frameStat->frame_number);
}
// Add multiple frame stats and verify
TEST_F(StatsTest, AddMany) {
int nbr_of_frames = 1000;
for (int i = 0; i < nbr_of_frames; ++i) {
FrameStatistic& frameStat = stats_->NewFrame(i);
EXPECT_EQ(i, frameStat.frame_number);
}
EXPECT_EQ(nbr_of_frames, static_cast<int>(stats_->stats_.size()));
stats_->PrintSummary(); // should not crash
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,65 @@
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'conditions': [
['include_tests==1', {
'targets': [
{
'target_name': 'video_codecs_test_framework',
'type': '<(library)',
'dependencies': [
'<(webrtc_root)/test/test.gyp:test_support',
],
'sources': [
'mock/mock_packet_manipulator.h',
'packet_manipulator.h',
'packet_manipulator.cc',
'predictive_packet_manipulator.h',
'predictive_packet_manipulator.cc',
'stats.h',
'stats.cc',
'videoprocessor.h',
'videoprocessor.cc',
],
},
{
'target_name': 'video_codecs_test_framework_unittests',
'type': 'executable',
'dependencies': [
'video_codecs_test_framework',
'webrtc_video_coding',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/test/test.gyp:test_support_main',
],
'sources': [
'packet_manipulator_unittest.cc',
'stats_unittest.cc',
'videoprocessor_unittest.cc',
],
},
{
'target_name': 'video_codecs_test_framework_integrationtests',
'type': 'executable',
'dependencies': [
'video_codecs_test_framework',
'webrtc_video_coding',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/test/metrics.gyp:metrics',
'<(webrtc_root)/test/test.gyp:test_support_main',
'<(webrtc_vp8_dir)/vp8.gyp:webrtc_vp8',
],
'sources': [
'videoprocessor_integrationtest.cc',
],
},
], # targets
}], # include_tests
], # conditions
}

View File

@ -0,0 +1,388 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include <cassert>
#include <cstring>
#include <limits>
#include "system_wrappers/interface/cpu_info.h"
namespace webrtc {
namespace test {
VideoProcessorImpl::VideoProcessorImpl(webrtc::VideoEncoder* encoder,
webrtc::VideoDecoder* decoder,
FrameReader* frame_reader,
FrameWriter* frame_writer,
PacketManipulator* packet_manipulator,
const TestConfig& config,
Stats* stats)
: encoder_(encoder),
decoder_(decoder),
frame_reader_(frame_reader),
frame_writer_(frame_writer),
packet_manipulator_(packet_manipulator),
config_(config),
stats_(stats),
encode_callback_(NULL),
decode_callback_(NULL),
source_buffer_(NULL),
first_key_frame_has_been_excluded_(false),
last_frame_missing_(false),
initialized_(false),
encoded_frame_size_(0),
prev_time_stamp_(0),
num_dropped_frames_(0),
num_spatial_resizes_(0),
last_encoder_frame_width_(0),
last_encoder_frame_height_(0),
scaler_() {
assert(encoder);
assert(decoder);
assert(frame_reader);
assert(frame_writer);
assert(packet_manipulator);
assert(stats);
}
bool VideoProcessorImpl::Init() {
// Calculate a factor used for bit rate calculations:
bit_rate_factor_ = config_.codec_settings->maxFramerate * 0.001 * 8; // bits
int frame_length_in_bytes = frame_reader_->FrameLength();
// Initialize data structures used by the encoder/decoder APIs
source_buffer_ = new WebRtc_UWord8[frame_length_in_bytes];
last_successful_frame_buffer_ = new WebRtc_UWord8[frame_length_in_bytes];
// Set fixed properties common for all frames:
source_frame_.SetWidth(config_.codec_settings->width);
source_frame_.SetHeight(config_.codec_settings->height);
source_frame_.VerifyAndAllocate(frame_length_in_bytes);
source_frame_.SetLength(frame_length_in_bytes);
// To keep track of spatial resize actions by encoder.
last_encoder_frame_width_ = config_.codec_settings->width;
last_encoder_frame_height_ = config_.codec_settings->height;
// Setup required callbacks for the encoder/decoder:
encode_callback_ = new VideoProcessorEncodeCompleteCallback(this);
decode_callback_ = new VideoProcessorDecodeCompleteCallback(this);
WebRtc_Word32 register_result =
encoder_->RegisterEncodeCompleteCallback(encode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
fprintf(stderr, "Failed to register encode complete callback, return code: "
"%d\n", register_result);
return false;
}
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
fprintf(stderr, "Failed to register decode complete callback, return code: "
"%d\n", register_result);
return false;
}
// Init the encoder and decoder
WebRtc_UWord32 nbr_of_cores = 1;
if (!config_.use_single_core) {
nbr_of_cores = CpuInfo::DetectNumberOfCores();
}
WebRtc_Word32 init_result =
encoder_->InitEncode(config_.codec_settings, nbr_of_cores,
config_.networking_config.max_payload_size_in_bytes);
if (init_result != WEBRTC_VIDEO_CODEC_OK) {
fprintf(stderr, "Failed to initialize VideoEncoder, return code: %d\n",
init_result);
return false;
}
init_result = decoder_->InitDecode(config_.codec_settings, nbr_of_cores);
if (init_result != WEBRTC_VIDEO_CODEC_OK) {
fprintf(stderr, "Failed to initialize VideoDecoder, return code: %d\n",
init_result);
return false;
}
if (config_.verbose) {
printf("Video Processor:\n");
printf(" #CPU cores used : %d\n", nbr_of_cores);
printf(" Total # of frames: %d\n", frame_reader_->NumberOfFrames());
printf(" Codec settings:\n");
printf(" Start bitrate : %d kbps\n",
config_.codec_settings->startBitrate);
printf(" Width : %d\n", config_.codec_settings->width);
printf(" Height : %d\n", config_.codec_settings->height);
}
initialized_ = true;
return true;
}
VideoProcessorImpl::~VideoProcessorImpl() {
delete[] source_buffer_;
delete[] last_successful_frame_buffer_;
encoder_->RegisterEncodeCompleteCallback(NULL);
delete encode_callback_;
decoder_->RegisterDecodeCompleteCallback(NULL);
delete decode_callback_;
}
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate);
assert(set_rates_result >= 0);
if (set_rates_result < 0) {
fprintf(stderr, "Failed to update encoder with new rate %d, "
"return code: %d\n", bit_rate, set_rates_result);
}
num_dropped_frames_ = 0;
num_spatial_resizes_ = 0;
}
int VideoProcessorImpl::EncodedFrameSize() {
return encoded_frame_size_;
}
int VideoProcessorImpl::NumberDroppedFrames() {
return num_dropped_frames_;
}
int VideoProcessorImpl::NumberSpatialResizes() {
return num_spatial_resizes_;
}
bool VideoProcessorImpl::ProcessFrame(int frame_number) {
assert(frame_number >=0);
if (!initialized_) {
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n");
return false;
}
// |prev_time_stamp_| is used for getting number of dropped frames.
if (frame_number == 0) {
prev_time_stamp_ = -1;
}
if (frame_reader_->ReadFrame(source_buffer_)) {
// Copy the source frame to the newly read frame data.
// Length is common for all frames.
source_frame_.CopyFrame(source_frame_.Length(), source_buffer_);
// Ensure we have a new statistics data object we can fill:
FrameStatistic& stat = stats_->NewFrame(frame_number);
encode_start_ = TickTime::Now();
// Use the frame number as "timestamp" to identify frames
source_frame_.SetTimeStamp(frame_number);
// Decide if we're going to force a keyframe:
std::vector<VideoFrameType> frame_types(1, kDeltaFrame);
if (config_.keyframe_interval > 0 &&
frame_number % config_.keyframe_interval == 0) {
frame_types[0] = kKeyFrame;
}
// For dropped frames, we regard them as zero size encoded frames.
encoded_frame_size_ = 0;
WebRtc_Word32 encode_result = encoder_->Encode(source_frame_, NULL,
&frame_types);
if (encode_result != WEBRTC_VIDEO_CODEC_OK) {
fprintf(stderr, "Failed to encode frame %d, return code: %d\n",
frame_number, encode_result);
}
stat.encode_return_code = encode_result;
return true;
} else {
return false; // we've reached the last frame
}
}
void VideoProcessorImpl::FrameEncoded(EncodedImage* encoded_image) {
// Timestamp is frame number, so this gives us #dropped frames.
int num_dropped_from_prev_encode = encoded_image->_timeStamp -
prev_time_stamp_ - 1;
num_dropped_frames_ += num_dropped_from_prev_encode;
prev_time_stamp_ = encoded_image->_timeStamp;
if (num_dropped_from_prev_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid getting
// out of sync for the computation of PSNR and SSIM.
for (int i = 0; i < num_dropped_from_prev_encode; i++) {
frame_writer_->WriteFrame(last_successful_frame_buffer_);
}
}
// Frame is not dropped, so update the encoded frame size
// (encoder callback is only called for non-zero length frames).
encoded_frame_size_ = encoded_image->_length;
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image->_timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
encode_stop);
stat.encoding_successful = true;
stat.encoded_frame_length_in_bytes = encoded_image->_length;
stat.frame_number = encoded_image->_timeStamp;
stat.frame_type = encoded_image->_frameType;
stat.bit_rate_in_kbps = encoded_image->_length * bit_rate_factor_;
stat.total_packets = encoded_image->_length /
config_.networking_config.packet_size_in_bytes + 1;
// Perform packet loss if criteria is fullfilled:
bool exclude_this_frame = false;
// Only keyframes can be excluded
if (encoded_image->_frameType == kKeyFrame) {
switch (config_.exclude_frame_types) {
case kExcludeOnlyFirstKeyFrame:
if (!first_key_frame_has_been_excluded_) {
first_key_frame_has_been_excluded_ = true;
exclude_this_frame = true;
}
break;
case kExcludeAllKeyFrames:
exclude_this_frame = true;
break;
default:
assert(false);
}
}
if (!exclude_this_frame) {
stat.packets_dropped =
packet_manipulator_->ManipulatePackets(encoded_image);
}
// Keep track of if frames are lost due to packet loss so we can tell
// this to the encoder (this is handled by the RTP logic in the full stack)
decode_start_ = TickTime::Now();
// TODO(kjellander): Pass fragmentation header to the decoder when
// CL 172001 has been submitted and PacketManipulator supports this.
WebRtc_Word32 decode_result = decoder_->Decode(*encoded_image,
last_frame_missing_, NULL);
stat.decode_return_code = decode_result;
if (decode_result != WEBRTC_VIDEO_CODEC_OK) {
// Write the last successful frame the output file to avoid getting it out
// of sync with the source file for SSIM and PSNR comparisons:
frame_writer_->WriteFrame(last_successful_frame_buffer_);
}
// save status for losses so we can inform the decoder for the next frame:
last_frame_missing_ = encoded_image->_length == 0;
}
void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
TickTime decode_stop = TickTime::Now();
int frame_number = image.TimeStamp();
// Report stats
FrameStatistic& stat = stats_->stats_[frame_number];
stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_,
decode_stop);
stat.decoding_successful = true;
// Check for resize action (either down or up):
if (static_cast<int>(image.Width()) != last_encoder_frame_width_ ||
static_cast<int>(image.Height()) != last_encoder_frame_height_ ) {
++num_spatial_resizes_;
last_encoder_frame_width_ = image.Width();
last_encoder_frame_height_ = image.Height();
}
// Check if codec size is different from native/original size, and if so,
// upsample back to original size: needed for PSNR and SSIM computations.
if (image.Width() != config_.codec_settings->width ||
image.Height() != config_.codec_settings->height) {
VideoFrame up_image;
int ret_val = scaler_.Set(image.Width(), image.Height(),
config_.codec_settings->width,
config_.codec_settings->height,
kI420, kI420, kScaleBilinear);
assert(ret_val >= 0);
if (ret_val < 0) {
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
frame_number, ret_val);
}
ret_val = scaler_.Scale(image, &up_image);
assert(ret_val >= 0);
if (ret_val < 0) {
fprintf(stderr, "Failed to scale frame: %d, return code: %d\n",
frame_number, ret_val);
}
// Update our copy of the last successful frame:
memcpy(last_successful_frame_buffer_, up_image.Buffer(), up_image.Length());
bool write_success = frame_writer_->WriteFrame(up_image.Buffer());
assert(write_success);
if (!write_success) {
fprintf(stderr, "Failed to write frame %d to disk!", frame_number);
}
up_image.Free();
} else { // No resize.
// Update our copy of the last successful frame:
memcpy(last_successful_frame_buffer_, image.Buffer(), image.Length());
bool write_success = frame_writer_->WriteFrame(image.Buffer());
assert(write_success);
if (!write_success) {
fprintf(stderr, "Failed to write frame %d to disk!", frame_number);
}
}
}
int VideoProcessorImpl::GetElapsedTimeMicroseconds(
const webrtc::TickTime& start, const webrtc::TickTime& stop) {
WebRtc_UWord64 encode_time = (stop - start).Microseconds();
assert(encode_time <
static_cast<unsigned int>(std::numeric_limits<int>::max()));
return static_cast<int>(encode_time);
}
const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) {
switch (e) {
case kExcludeOnlyFirstKeyFrame:
return "ExcludeOnlyFirstKeyFrame";
case kExcludeAllKeyFrames:
return "ExcludeAllKeyFrames";
default:
assert(false);
return "Unknown";
}
}
const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
switch (e) {
case kVideoCodecVP8:
return "VP8";
case kVideoCodecI420:
return "I420";
case kVideoCodecRED:
return "RED";
case kVideoCodecULPFEC:
return "ULPFEC";
case kVideoCodecUnknown:
return "Unknown";
default:
assert(false);
return "Unknown";
}
}
// Callbacks
WebRtc_Word32
VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
video_processor_->FrameEncoded(&encoded_image); // Forward to parent class.
return 0;
}
WebRtc_Word32
VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
VideoFrame& image) {
video_processor_->FrameDecoded(image); // forward to parent class
return 0;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,260 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#include <string>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "common_video/libyuv/include/scaler.h"
#include "modules/interface/module_common_types.h"
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include "modules/video_coding/codecs/test/stats.h"
#include "system_wrappers/interface/tick_util.h"
#include "testsupport/frame_reader.h"
#include "testsupport/frame_writer.h"
namespace webrtc {
namespace test {
// Defines which frame types shall be excluded from packet loss and when.
enum ExcludeFrameTypes {
// Will exclude the first keyframe in the video sequence from packet loss.
// Following keyframes will be targeted for packet loss.
kExcludeOnlyFirstKeyFrame,
// Exclude all keyframes from packet loss, no matter where in the video
// sequence they occur.
kExcludeAllKeyFrames
};
// Returns a string representation of the enum value.
const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e);
// Test configuration for a test run
struct TestConfig {
TestConfig()
: name(""), description(""), test_number(0),
input_filename(""), output_filename(""), output_dir("out"),
networking_config(), exclude_frame_types(kExcludeOnlyFirstKeyFrame),
frame_length_in_bytes(-1), use_single_core(false), keyframe_interval(0),
codec_settings(NULL), verbose(true) {
};
// Name of the test. This is purely metadata and does not affect
// the test in any way.
std::string name;
// More detailed description of the test. This is purely metadata and does
// not affect the test in any way.
std::string description;
// Number of this test. Useful if multiple runs of the same test with
// different configurations shall be managed.
int test_number;
// File to process for the test. This must be a video file in the YUV format.
std::string input_filename;
// File to write to during processing for the test. Will be a video file
// in the YUV format.
std::string output_filename;
// Path to the directory where encoded files will be put
// (absolute or relative to the executable). Default: "out".
std::string output_dir;
// Configurations related to networking.
NetworkingConfig networking_config;
// Decides how the packet loss simulations shall exclude certain frames
// from packet loss. Default: kExcludeOnlyFirstKeyFrame.
ExcludeFrameTypes exclude_frame_types;
// The length of a single frame of the input video file. This value is
// calculated out of the width and height according to the video format
// specification. Must be set before processing.
int frame_length_in_bytes;
// Force the encoder and decoder to use a single core for processing.
// Using a single core is necessary to get a deterministic behavior for the
// encoded frames - using multiple cores will produce different encoded frames
// since multiple cores are competing to consume the byte budget for each
// frame in parallel.
// If set to false, the maximum number of available cores will be used.
// Default: false.
bool use_single_core;
// If set to a value >0 this setting forces the encoder to create a keyframe
// every Nth frame. Note that the encoder may create a keyframe in other
// locations in addition to the interval that is set using this parameter.
// Forcing key frames may also affect encoder planning optimizations in
// a negative way, since it will suddenly be forced to produce an expensive
// key frame.
// Default: 0.
int keyframe_interval;
// The codec settings to use for the test (target bitrate, video size,
// framerate and so on). This struct must be created and filled in using
// the VideoCodingModule::Codec() method.
webrtc::VideoCodec* codec_settings;
// If printing of information to stdout shall be performed during processing.
bool verbose;
};
// Returns a string representation of the enum value.
const char* VideoCodecTypeToStr(webrtc::VideoCodecType e);
// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder
// interfaces. This is done in a sequential manner in order to be able to
// measure times properly.
// The class processes a frame at the time for the configured input file.
// It maintains state of where in the source input file the processing is at.
//
// Regarding packet loss: Note that keyframes are excluded (first or all
// depending on the ExcludeFrameTypes setting). This is because if key frames
// would be altered, all the following delta frames would be pretty much
// worthless. VP8 has an error-resilience feature that makes it able to handle
// packet loss in key non-first keyframes, which is why only the first is
// excluded by default.
// Packet loss in such important frames is handled on a higher level in the
// Video Engine, where signaling would request a retransmit of the lost packets,
// since they're so important.
//
// Note this class is not thread safe in any way and is meant for simple testing
// purposes.
class VideoProcessor {
public:
virtual ~VideoProcessor() {}
// Performs initial calculations about frame size, sets up callbacks etc.
// Returns false if an error has occurred, in addition to printing to stderr.
virtual bool Init() = 0;
// Processes a single frame. Returns true as long as there's more frames
// available in the source clip.
// Frame number must be an integer >=0.
virtual bool ProcessFrame(int frame_number) = 0;
// Updates the encoder with the target bit rate and the frame rate.
virtual void SetRates(int bit_rate, int frame_rate) = 0;
// Return the size of the encoded frame in bytes. Dropped frames by the
// encoder are regarded as zero size.
virtual int EncodedFrameSize() = 0;
// Return the number of dropped frames.
virtual int NumberDroppedFrames() = 0;
// Return the number of spatial resizes.
virtual int NumberSpatialResizes() = 0;
};
class VideoProcessorImpl : public VideoProcessor {
public:
VideoProcessorImpl(webrtc::VideoEncoder* encoder,
webrtc::VideoDecoder* decoder,
FrameReader* frame_reader,
FrameWriter* frame_writer,
PacketManipulator* packet_manipulator,
const TestConfig& config,
Stats* stats);
virtual ~VideoProcessorImpl();
virtual bool Init();
virtual bool ProcessFrame(int frame_number);
private:
// Invoked by the callback when a frame has completed encoding.
void FrameEncoded(webrtc::EncodedImage* encodedImage);
// Invoked by the callback when a frame has completed decoding.
void FrameDecoded(const webrtc::VideoFrame& image);
// Used for getting a 32-bit integer representing time
// (checks the size is within signed 32-bit bounds before casting it)
int GetElapsedTimeMicroseconds(const webrtc::TickTime& start,
const webrtc::TickTime& stop);
// Updates the encoder with the target bit rate and the frame rate.
void SetRates(int bit_rate, int frame_rate);
// Return the size of the encoded frame in bytes.
int EncodedFrameSize();
// Return the number of dropped frames.
int NumberDroppedFrames();
// Return the number of spatial resizes.
int NumberSpatialResizes();
webrtc::VideoEncoder* encoder_;
webrtc::VideoDecoder* decoder_;
FrameReader* frame_reader_;
FrameWriter* frame_writer_;
PacketManipulator* packet_manipulator_;
const TestConfig& config_;
Stats* stats_;
EncodedImageCallback* encode_callback_;
DecodedImageCallback* decode_callback_;
// Buffer used for reading the source video file:
WebRtc_UWord8* source_buffer_;
// Keep track of the last successful frame, since we need to write that
// when decoding fails:
WebRtc_UWord8* last_successful_frame_buffer_;
webrtc::VideoFrame source_frame_;
// To keep track of if we have excluded the first key frame from packet loss:
bool first_key_frame_has_been_excluded_;
// To tell the decoder previous frame have been dropped due to packet loss:
bool last_frame_missing_;
// If Init() has executed successfully.
bool initialized_;
int encoded_frame_size_;
int prev_time_stamp_;
int num_dropped_frames_;
int num_spatial_resizes_;
int last_encoder_frame_width_;
int last_encoder_frame_height_;
Scaler scaler_;
// Statistics
double bit_rate_factor_; // multiply frame length with this to get bit rate
webrtc::TickTime encode_start_;
webrtc::TickTime decode_start_;
// Callback class required to implement according to the VideoEncoder API.
class VideoProcessorEncodeCompleteCallback
: public webrtc::EncodedImageCallback {
public:
explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
: video_processor_(vp) {
}
WebRtc_Word32 Encoded(
webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info = NULL,
const webrtc::RTPFragmentationHeader* fragmentation = NULL);
private:
VideoProcessorImpl* video_processor_;
};
// Callback class required to implement according to the VideoDecoder API.
class VideoProcessorDecodeCompleteCallback
: public webrtc::DecodedImageCallback {
public:
explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
: video_processor_(vp) {
}
WebRtc_Word32 Decoded(webrtc::VideoFrame& image);
private:
VideoProcessorImpl* video_processor_;
};
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_

View File

@ -0,0 +1,750 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include <math.h>
#include "modules/video_coding/codecs/interface/video_codec_interface.h"
#include "modules/video_coding/codecs/test/packet_manipulator.h"
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "modules/video_coding/main/interface/video_coding.h"
#include "testsupport/fileutils.h"
#include "testsupport/frame_reader.h"
#include "testsupport/frame_writer.h"
#include "testsupport/metrics/video_metrics.h"
#include "testsupport/packet_reader.h"
#include "typedefs.h"
namespace webrtc {
// Maximum number of rate updates (i.e., calls to encoder to change bitrate
// and/or frame rate) for the current tests.
const int kMaxNumRateUpdates = 3;
const int kPercTargetvsActualMismatch = 20;
// Codec and network settings.
struct CodecConfigPars {
float packet_loss;
int num_temporal_layers;
int key_frame_interval;
bool error_concealment_on;
bool denoising_on;
bool frame_dropper_on;
bool spatial_resize_on;
};
// Quality metrics.
struct QualityMetrics {
double minimum_avg_psnr;
double minimum_min_psnr;
double minimum_avg_ssim;
double minimum_min_ssim;
};
// The sequence of bitrate and frame rate changes for the encoder, the frame
// number where the changes are made, and the total number of frames for the
// test.
struct RateProfile {
int target_bit_rate[kMaxNumRateUpdates];
int input_frame_rate[kMaxNumRateUpdates];
int frame_index_rate_update[kMaxNumRateUpdates + 1];
int num_frames;
};
// Metrics for the rate control. The rate mismatch metrics are defined as
// percentages.|max_time_hit_target| is defined as number of frames, after a
// rate update is made to the encoder, for the encoder to reach within
// |kPercTargetvsActualMismatch| of new target rate. The metrics are defined for
// each rate update sequence.
struct RateControlMetrics {
int max_num_dropped_frames;
int max_key_frame_size_mismatch;
int max_delta_frame_size_mismatch;
int max_encoding_rate_mismatch;
int max_time_hit_target;
int num_spatial_resizes;
};
// Sequence used is foreman (CIF): may be better to use VGA for resize test.
const int kCIFWidth = 352;
const int kCIFHeight = 288;
const int kNbrFramesShort = 100; // Some tests are run for shorter sequence.
const int kNbrFramesLong = 299;
// Parameters from VP8 wrapper, which control target size of key frames.
const float kInitialBufferSize = 0.5f;
const float kOptimalBufferSize = 0.6f;
const float kScaleKeyFrameSize = 0.5f;
// Integration test for video processor. Encodes+decodes a clip and
// writes it to the output directory. After completion, quality metrics
// (PSNR and SSIM) and rate control metrics are computed to verify that the
// quality and encoder response is acceptable. The rate control tests allow us
// to verify the behavior for changing bitrate, changing frame rate, frame
// dropping/spatial resize, and temporal layers. The limits for the rate
// control metrics are set to be fairly conservative, so failure should only
// happen when some significant regression or breakdown occurs.
class VideoProcessorIntegrationTest: public testing::Test {
protected:
VideoEncoder* encoder_;
VideoDecoder* decoder_;
webrtc::test::FrameReader* frame_reader_;
webrtc::test::FrameWriter* frame_writer_;
webrtc::test::PacketReader packet_reader_;
webrtc::test::PacketManipulator* packet_manipulator_;
webrtc::test::Stats stats_;
webrtc::test::TestConfig config_;
VideoCodec codec_settings_;
webrtc::test::VideoProcessor* processor_;
// Quantities defined/updated for every encoder rate update.
// Some quantities defined per temporal layer (at most 3 layers in this test).
int num_frames_per_update_[3];
float sum_frame_size_mismatch_[3];
float sum_encoded_frame_size_[3];
float encoding_bitrate_[3];
float per_frame_bandwidth_[3];
float bit_rate_layer_[3];
float frame_rate_layer_[3];
int num_frames_total_;
float sum_encoded_frame_size_total_;
float encoding_bitrate_total_;
float perc_encoding_rate_mismatch_;
int num_frames_to_hit_target_;
bool encoding_rate_within_target_;
int bit_rate_;
int frame_rate_;
int layer_;
float target_size_key_frame_initial_;
float target_size_key_frame_;
float sum_key_frame_size_mismatch_;
int num_key_frames_;
float start_bitrate_;
// Codec and network settings.
float packet_loss_;
int num_temporal_layers_;
int key_frame_interval_;
bool error_concealment_on_;
bool denoising_on_;
bool frame_dropper_on_;
bool spatial_resize_on_;
VideoProcessorIntegrationTest() {}
virtual ~VideoProcessorIntegrationTest() {}
void SetUpCodecConfig() {
encoder_ = VP8Encoder::Create();
decoder_ = VP8Decoder::Create();
// CIF is currently used for all tests below.
// Setup the TestConfig struct for processing of a clip in CIF resolution.
config_.input_filename =
webrtc::test::ResourcePath("foreman_cif", "yuv");
config_.output_filename = webrtc::test::OutputPath() +
"foreman_cif_short_video_codecs_test_framework_integrationtests.yuv";
config_.frame_length_in_bytes = 3 * kCIFWidth * kCIFHeight / 2;
config_.verbose = false;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
// Key frame interval and packet loss are set for each test.
config_.keyframe_interval = key_frame_interval_;
config_.networking_config.packet_loss_probability = packet_loss_;
// Get a codec configuration struct and configure it.
VideoCodingModule::Codec(kVideoCodecVP8, &codec_settings_);
config_.codec_settings = &codec_settings_;
config_.codec_settings->startBitrate = start_bitrate_;
config_.codec_settings->width = kCIFWidth;
config_.codec_settings->height = kCIFHeight;
// These features may be set depending on the test.
config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
error_concealment_on_;
config_.codec_settings->codecSpecific.VP8.denoisingOn =
denoising_on_;
config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
num_temporal_layers_;
config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
frame_dropper_on_;
config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
spatial_resize_on_;
frame_reader_ =
new webrtc::test::FrameReaderImpl(config_.input_filename,
config_.frame_length_in_bytes);
frame_writer_ =
new webrtc::test::FrameWriterImpl(config_.output_filename,
config_.frame_length_in_bytes);
ASSERT_TRUE(frame_reader_->Init());
ASSERT_TRUE(frame_writer_->Init());
packet_manipulator_ = new webrtc::test::PacketManipulatorImpl(
&packet_reader_, config_.networking_config, config_.verbose);
processor_ = new webrtc::test::VideoProcessorImpl(encoder_, decoder_,
frame_reader_,
frame_writer_,
packet_manipulator_,
config_, &stats_);
ASSERT_TRUE(processor_->Init());
}
// Reset quantities after each encoder update, update the target
// per-frame bandwidth.
void ResetRateControlMetrics(int num_frames) {
for (int i = 0; i < num_temporal_layers_; i++) {
num_frames_per_update_[i] = 0;
sum_frame_size_mismatch_[i] = 0.0f;
sum_encoded_frame_size_[i] = 0.0f;
encoding_bitrate_[i] = 0.0f;
// Update layer per-frame-bandwidth.
per_frame_bandwidth_[i] = static_cast<float>(bit_rate_layer_[i]) /
static_cast<float>(frame_rate_layer_[i]);
}
// Set maximum size of key frames, following setting in the VP8 wrapper.
float max_key_size = kScaleKeyFrameSize * kOptimalBufferSize * frame_rate_;
// We don't know exact target size of the key frames (except for first one),
// but the minimum in libvpx is ~|3 * per_frame_bandwidth| and maximum is
// set by |max_key_size_ * per_frame_bandwidth|. Take middle point/average
// as reference for mismatch. Note key frames always correspond to base
// layer frame in this test.
target_size_key_frame_ = 0.5 * (3 + max_key_size) * per_frame_bandwidth_[0];
num_frames_total_ = 0;
sum_encoded_frame_size_total_ = 0.0f;
encoding_bitrate_total_ = 0.0f;
perc_encoding_rate_mismatch_ = 0.0f;
num_frames_to_hit_target_ = num_frames;
encoding_rate_within_target_ = false;
sum_key_frame_size_mismatch_ = 0.0;
num_key_frames_ = 0;
}
// For every encoded frame, update the rate control metrics.
void UpdateRateControlMetrics(int frame_num, VideoFrameType frame_type) {
int encoded_frame_size = processor_->EncodedFrameSize();
float encoded_size_kbits = encoded_frame_size * 8.0f / 1000.0f;
// Update layer data.
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kDeltaFrame) {
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
per_frame_bandwidth_[layer_]) /
per_frame_bandwidth_[layer_];
} else {
float target_size = (frame_num == 1) ? target_size_key_frame_initial_ :
target_size_key_frame_;
sum_key_frame_size_mismatch_ += fabs(encoded_size_kbits - target_size) /
target_size;
num_key_frames_ += 1;
}
sum_encoded_frame_size_[layer_] += encoded_size_kbits;
// Encoding bitrate per layer: from the start of the update/run to the
// current frame.
encoding_bitrate_[layer_] = sum_encoded_frame_size_[layer_] *
frame_rate_layer_[layer_] /
num_frames_per_update_[layer_];
// Total encoding rate: from the start of the update/run to current frame.
sum_encoded_frame_size_total_ += encoded_size_kbits;
encoding_bitrate_total_ = sum_encoded_frame_size_total_ * frame_rate_ /
num_frames_total_;
perc_encoding_rate_mismatch_ = 100 * fabs(encoding_bitrate_total_ -
bit_rate_) / bit_rate_;
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
!encoding_rate_within_target_) {
num_frames_to_hit_target_ = num_frames_total_;
encoding_rate_within_target_ = true;
}
}
// Verify expected behavior of rate control and print out data.
void VerifyRateControl(int update_index,
int max_key_frame_size_mismatch,
int max_delta_frame_size_mismatch,
int max_encoding_rate_mismatch,
int max_time_hit_target,
int max_num_dropped_frames,
int num_spatial_resizes) {
int num_dropped_frames = processor_->NumberDroppedFrames();
int num_resize_actions = processor_->NumberSpatialResizes();
printf("For update #: %d,\n "
" Target Bitrate: %d,\n"
" Encoding bitrate: %f,\n"
" Frame rate: %d \n",
update_index, bit_rate_, encoding_bitrate_total_, frame_rate_);
printf(" Number of frames to approach target rate = %d, \n"
" Number of dropped frames = %d, \n"
" Number of spatial resizes = %d, \n",
num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
EXPECT_LE(perc_encoding_rate_mismatch_, max_encoding_rate_mismatch);
if (num_key_frames_ > 0) {
int perc_key_frame_size_mismatch = 100 * sum_key_frame_size_mismatch_ /
num_key_frames_;
printf(" Number of Key frames: %d \n"
" Key frame rate mismatch: %d \n",
num_key_frames_, perc_key_frame_size_mismatch);
EXPECT_LE(perc_key_frame_size_mismatch, max_key_frame_size_mismatch);
}
printf("\n");
printf("Rates statistics for Layer data \n");
for (int i = 0; i < num_temporal_layers_ ; i++) {
printf("Layer #%d \n", i);
int perc_frame_size_mismatch = 100 * sum_frame_size_mismatch_[i] /
num_frames_per_update_[i];
int perc_encoding_rate_mismatch = 100 * fabs(encoding_bitrate_[i] -
bit_rate_layer_[i]) /
bit_rate_layer_[i];
printf(" Target Layer Bit rate: %f \n"
" Layer frame rate: %f, \n"
" Layer per frame bandwidth: %f, \n"
" Layer Encoding bit rate: %f, \n"
" Layer Percent frame size mismatch: %d, \n"
" Layer Percent encoding rate mismatch = %d, \n"
" Number of frame processed per layer = %d \n",
bit_rate_layer_[i], frame_rate_layer_[i], per_frame_bandwidth_[i],
encoding_bitrate_[i], perc_frame_size_mismatch,
perc_encoding_rate_mismatch, num_frames_per_update_[i]);
EXPECT_LE(perc_frame_size_mismatch, max_delta_frame_size_mismatch);
EXPECT_LE(perc_encoding_rate_mismatch, max_encoding_rate_mismatch);
}
printf("\n");
EXPECT_LE(num_frames_to_hit_target_, max_time_hit_target);
EXPECT_LE(num_dropped_frames, max_num_dropped_frames);
EXPECT_EQ(num_resize_actions, num_spatial_resizes);
}
// Layer index corresponding to frame number, for up to 3 layers.
void LayerIndexForFrame(int frame_number) {
if (num_temporal_layers_ == 1) {
layer_ = 0;
} else if (num_temporal_layers_ == 2) {
// layer 0: 0 2 4 ...
// layer 1: 1 3
if (frame_number % 2 == 0) {
layer_ = 0;
} else {
layer_ = 1;
}
} else if (num_temporal_layers_ == 3) {
// layer 0: 0 4 8 ...
// layer 1: 2 6
// layer 2: 1 3 5 7
if (frame_number % 4 == 0) {
layer_ = 0;
} else if ((frame_number + 2) % 4 == 0) {
layer_ = 1;
} else if ((frame_number + 1) % 2 == 0) {
layer_ = 2;
}
} else {
assert(false); // Only up to 3 layers.
}
}
// Set the bitrate and frame rate per layer, for up to 3 layers.
void SetLayerRates() {
assert(num_temporal_layers_<= 3);
for (int i = 0; i < num_temporal_layers_; i++) {
float bit_rate_ratio =
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i];
if (i > 0) {
float bit_rate_delta_ratio = kVp8LayerRateAlloction
[num_temporal_layers_ - 1][i] -
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i - 1];
bit_rate_layer_[i] = bit_rate_ * bit_rate_delta_ratio;
} else {
bit_rate_layer_[i] = bit_rate_ * bit_rate_ratio;
}
frame_rate_layer_[i] = frame_rate_ / static_cast<float>(
1 << (num_temporal_layers_ - 1));
}
if (num_temporal_layers_ == 3) {
frame_rate_layer_[2] = frame_rate_ / 2.0f;
}
}
VideoFrameType FrameType(int frame_number) {
if (frame_number == 0 || ((frame_number) % key_frame_interval_ == 0 &&
key_frame_interval_ > 0)) {
return kKeyFrame;
} else {
return kDeltaFrame;
}
}
void TearDown() {
delete processor_;
delete packet_manipulator_;
delete frame_writer_;
delete frame_reader_;
delete decoder_;
delete encoder_;
}
// Processes all frames in the clip and verifies the result.
void ProcessFramesAndVerify(QualityMetrics quality_metrics,
RateProfile rate_profile,
CodecConfigPars process,
RateControlMetrics* rc_metrics) {
// Codec/config settings.
start_bitrate_ = rate_profile.target_bit_rate[0];
packet_loss_ = process.packet_loss;
key_frame_interval_ = process.key_frame_interval;
num_temporal_layers_ = process.num_temporal_layers;
error_concealment_on_ = process.error_concealment_on;
denoising_on_ = process.denoising_on;
frame_dropper_on_ = process.frame_dropper_on;
spatial_resize_on_ = process.spatial_resize_on;
SetUpCodecConfig();
// Update the layers and the codec with the initial rates.
bit_rate_ = rate_profile.target_bit_rate[0];
frame_rate_ = rate_profile.input_frame_rate[0];
SetLayerRates();
// Set the initial target size for key frame.
target_size_key_frame_initial_ = 0.5 * kInitialBufferSize *
bit_rate_layer_[0];
processor_->SetRates(bit_rate_, frame_rate_);
// Process each frame, up to |num_frames|.
int num_frames = rate_profile.num_frames;
int update_index = 0;
ResetRateControlMetrics(
rate_profile.frame_index_rate_update[update_index + 1]);
int frame_number = 0;
VideoFrameType frame_type = kDeltaFrame;
while (processor_->ProcessFrame(frame_number) &&
frame_number < num_frames) {
// Get the layer index for the frame |frame_number|.
LayerIndexForFrame(frame_number);
frame_type = FrameType(frame_number);
// Counter for whole sequence run.
++frame_number;
// Counters for each rate update.
++num_frames_per_update_[layer_];
++num_frames_total_;
UpdateRateControlMetrics(frame_number, frame_type);
// If we hit another/next update, verify stats for current state and
// update layers and codec with new rates.
if (frame_number ==
rate_profile.frame_index_rate_update[update_index + 1]) {
VerifyRateControl(
update_index,
rc_metrics[update_index].max_key_frame_size_mismatch,
rc_metrics[update_index].max_delta_frame_size_mismatch,
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
rc_metrics[update_index].max_num_dropped_frames,
rc_metrics[update_index].num_spatial_resizes);
// Update layer rates and the codec with new rates.
++update_index;
bit_rate_ = rate_profile.target_bit_rate[update_index];
frame_rate_ = rate_profile.input_frame_rate[update_index];
SetLayerRates();
ResetRateControlMetrics(rate_profile.
frame_index_rate_update[update_index + 1]);
processor_->SetRates(bit_rate_, frame_rate_);
}
}
VerifyRateControl(
update_index,
rc_metrics[update_index].max_key_frame_size_mismatch,
rc_metrics[update_index].max_delta_frame_size_mismatch,
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
rc_metrics[update_index].max_num_dropped_frames,
rc_metrics[update_index].num_spatial_resizes);
EXPECT_EQ(num_frames, frame_number);
EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size()));
// Release encoder and decoder to make sure they have finished processing:
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
// Close the files before we start using them for SSIM/PSNR calculations.
frame_reader_->Close();
frame_writer_->Close();
// TODO(marpan): should compute these quality metrics per SetRates update.
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
config_.input_filename.c_str(),
config_.output_filename.c_str(),
config_.codec_settings->width,
config_.codec_settings->height,
&psnr_result,
&ssim_result));
printf("PSNR avg: %f, min: %f SSIM avg: %f, min: %f\n",
psnr_result.average, psnr_result.min,
ssim_result.average, ssim_result.min);
stats_.PrintSummary();
EXPECT_GT(psnr_result.average, quality_metrics.minimum_avg_psnr);
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
EXPECT_GT(ssim_result.average, quality_metrics.minimum_avg_ssim);
EXPECT_GT(ssim_result.min, quality_metrics.minimum_min_ssim);
}
};
void SetRateProfilePars(RateProfile* rate_profile,
int update_index,
int bit_rate,
int frame_rate,
int frame_index_rate_update) {
rate_profile->target_bit_rate[update_index] = bit_rate;
rate_profile->input_frame_rate[update_index] = frame_rate;
rate_profile->frame_index_rate_update[update_index] = frame_index_rate_update;
}
void SetCodecParameters(CodecConfigPars* process_settings,
float packet_loss,
int key_frame_interval,
int num_temporal_layers,
bool error_concealment_on,
bool denoising_on,
bool frame_dropper_on,
bool spatial_resize_on) {
process_settings->packet_loss = packet_loss;
process_settings->key_frame_interval = key_frame_interval;
process_settings->num_temporal_layers = num_temporal_layers,
process_settings->error_concealment_on = error_concealment_on;
process_settings->denoising_on = denoising_on;
process_settings->frame_dropper_on = frame_dropper_on;
process_settings->spatial_resize_on = spatial_resize_on;
}
void SetQualityMetrics(QualityMetrics* quality_metrics,
double minimum_avg_psnr,
double minimum_min_psnr,
double minimum_avg_ssim,
double minimum_min_ssim) {
quality_metrics->minimum_avg_psnr = minimum_avg_psnr;
quality_metrics->minimum_min_psnr = minimum_min_psnr;
quality_metrics->minimum_avg_ssim = minimum_avg_ssim;
quality_metrics->minimum_min_ssim = minimum_min_ssim;
}
void SetRateControlMetrics(RateControlMetrics* rc_metrics,
int update_index,
int max_num_dropped_frames,
int max_key_frame_size_mismatch,
int max_delta_frame_size_mismatch,
int max_encoding_rate_mismatch,
int max_time_hit_target,
int num_spatial_resizes) {
rc_metrics[update_index].max_num_dropped_frames = max_num_dropped_frames;
rc_metrics[update_index].max_key_frame_size_mismatch =
max_key_frame_size_mismatch;
rc_metrics[update_index].max_delta_frame_size_mismatch =
max_delta_frame_size_mismatch;
rc_metrics[update_index].max_encoding_rate_mismatch =
max_encoding_rate_mismatch;
rc_metrics[update_index].max_time_hit_target = max_time_hit_target;
rc_metrics[update_index].num_spatial_resizes = num_spatial_resizes;
}
// Run with no packet loss and fixed bitrate. Quality should be very high.
// One key frame (first frame only) in sequence. Setting |key_frame_interval|
// to -1 below means no periodic key frames in test.
TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNbrFramesShort + 1;
rate_profile.num_frames = kNbrFramesShort;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.0f, -1, 1, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 36.95, 33.0, 0.90, 0.90);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with 5% packet loss and fixed bitrate. Quality should be a bit lower.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNbrFramesShort + 1;
rate_profile.num_frames = kNbrFramesShort;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.05f, -1, 1, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 20.0, 16.0, 0.60, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with 10% packet loss and fixed bitrate. Quality should be even lower.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNbrFramesShort + 1;
rate_profile.num_frames = kNbrFramesShort;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.1f, -1, 1, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 19.0, 16.0, 0.50, 0.35);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRate) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 200, 30, 0);
SetRateProfilePars(&rate_profile, 1, 800, 30, 100);
SetRateProfilePars(&rate_profile, 2, 500, 30, 200);
rate_profile.frame_index_rate_update[3] = kNbrFramesLong + 1;
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.0f, -1, 1, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 34.0, 32.0, 0.85, 0.80);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with no packet loss, with an update (decrease) in frame rate.
// Lower frame rate means higher per-frame-bandwidth, so easier to encode.
// At the bitrate in this test, this means better rate control after the
// update(s) to lower frame rate. So expect less frame drops, and max values
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics avergaed over whole sequence run.
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeFrameRateFrameDrop) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 80, 24, 0);
SetRateProfilePars(&rate_profile, 1, 80, 15, 100);
SetRateProfilePars(&rate_profile, 2, 80, 10, 200);
rate_profile.frame_index_rate_update[3] = kNbrFramesLong + 1;
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.0f, -1, 1, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 31.0, 23.0, 0.80, 0.65);
quality_metrics.minimum_avg_psnr = 31;
quality_metrics.minimum_min_psnr = 23;
quality_metrics.minimum_avg_ssim = 0.8;
quality_metrics.minimum_min_ssim = 0.65;
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate, then increase rate somewhat.
// Key frame is thrown in every 120 frames. Can expect some frame drops after
// key frame, even at high rate. The internal spatial resizer is on, so expect
// spatial resize down at first key frame, and back up at second key frame.
// Error_concealment is off in this test since there is a memory leak with
// resizing and error concealment.
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDrop) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 100, 30, 0);
SetRateProfilePars(&rate_profile, 1, 200, 30, 120);
SetRateProfilePars(&rate_profile, 2, 200, 30, 240);
rate_profile.frame_index_rate_update[3] = kNbrFramesLong + 1;
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.0f, 120, 1, false, true, true, true);
// Metrics for expected quality.: lower quality on average from up-sampling
// the down-sampled portion of the run, in case resizer is on.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 29.0, 20.0, 0.75, 0.60);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 45, 30, 75, 20, 70, 0);
SetRateControlMetrics(rc_metrics, 1, 20, 35, 30, 20, 15, 1);
SetRateControlMetrics(rc_metrics, 2, 0, 30, 30, 15, 25, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
// Run with no packet loss, with 3 temporal layers, with a rate update in the
// middle of the sequence. The max values for the frame size mismatch and
// encoding rate mismatch are applied to each layer.
// No dropped frames in this test, and internal spatial resizer is off.
// One key frame (first frame only) in sequence, so no spatial resizing.
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossTemporalLayers) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 200, 30, 0);
SetRateProfilePars(&rate_profile, 1, 400, 30, 150);
rate_profile.frame_index_rate_update[2] = kNbrFramesLong + 1;
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
SetCodecParameters(&process_settings, 0.0f, -1, 3, true, true, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 32.5, 30.0, 0.85, 0.80);
// Metrics for rate control.
RateControlMetrics rc_metrics[2];
SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
rc_metrics);
}
} // namespace webrtc

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "modules/video_coding/codecs/test/mock/mock_packet_manipulator.h"
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include "modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
#include "modules/video_coding/main/interface/video_coding.h"
#include "testsupport/mock/mock_frame_reader.h"
#include "testsupport/mock/mock_frame_writer.h"
#include "testsupport/packet_reader.h"
#include "testsupport/unittest_utils.h"
#include "typedefs.h"
using ::testing::_;
using ::testing::AtLeast;
using ::testing::Return;
namespace webrtc {
namespace test {
// Very basic testing for VideoProcessor. It's mostly tested by running the
// video_quality_measurement program.
class VideoProcessorTest: public testing::Test {
protected:
MockVideoEncoder encoder_mock_;
MockVideoDecoder decoder_mock_;
MockFrameReader frame_reader_mock_;
MockFrameWriter frame_writer_mock_;
MockPacketManipulator packet_manipulator_mock_;
Stats stats_;
TestConfig config_;
VideoCodec codec_settings_;
VideoProcessorTest() {}
virtual ~VideoProcessorTest() {}
void SetUp() {
// Get a codec configuration struct and configure it.
VideoCodingModule::Codec(kVideoCodecVP8, &codec_settings_);
config_.codec_settings = &codec_settings_;
config_.codec_settings->startBitrate = 100;
config_.codec_settings->width = 352;
config_.codec_settings->height = 288;
}
void TearDown() {}
void ExpectInit() {
EXPECT_CALL(encoder_mock_, InitEncode(_, _, _))
.Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_))
.Times(AtLeast(1));
EXPECT_CALL(decoder_mock_, InitDecode(_, _))
.Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_))
.Times(AtLeast(1));
EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
.WillOnce(Return(1));
EXPECT_CALL(frame_reader_mock_, FrameLength())
.WillOnce(Return(150000));
}
};
TEST_F(VideoProcessorTest, Init) {
ExpectInit();
VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
&frame_reader_mock_,
&frame_writer_mock_,
&packet_manipulator_mock_, config_,
&stats_);
ASSERT_TRUE(video_processor.Init());
}
TEST_F(VideoProcessorTest, ProcessFrame) {
ExpectInit();
EXPECT_CALL(encoder_mock_, Encode(_, _, _))
.Times(1);
EXPECT_CALL(frame_reader_mock_, ReadFrame(_))
.WillOnce(Return(true));
// Since we don't return any callback from the mock, the decoder will not
// be more than initialized...
VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
&frame_reader_mock_,
&frame_writer_mock_,
&packet_manipulator_mock_, config_,
&stats_);
ASSERT_TRUE(video_processor.Init());
video_processor.ProcessFrame(0);
}
} // namespace test
} // namespace webrtc