Moving src/webrtc into src/.

In order to eliminate the WebRTC Subtree mirror in Chromium, 
WebRTC is moving the content of the src/webrtc directory up
to the src/ directory.

NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
TBR=tommi@webrtc.org

Bug: chromium:611808
Change-Id: Iac59c5b51b950f174119565bac87955a7994bc38
Reviewed-on: https://webrtc-review.googlesource.com/1560
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Henrik Kjellander <kjellander@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#19845}
This commit is contained in:
Mirko Bonadei
2017-09-15 06:15:48 +02:00
committed by Commit Bot
parent 6674846b4a
commit bb547203bf
4576 changed files with 1092 additions and 1196 deletions

View File

@ -0,0 +1,55 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <pthread.h>
#include "webrtc/modules/video_coding/codecs/test/android_test_initializer.h"
#include "webrtc/rtc_base/ignore_wundef.h"
#include "webrtc/sdk/android/src/jni/classreferenceholder.h"
#include "webrtc/sdk/android/src/jni/jni_helpers.h"
// Note: this dependency is dangerous since it reaches into Chromium's base.
// There's a risk of e.g. macro clashes. This file may only be used in tests.
// Since we use Chrome's build system for creating the gtest binary, this should
// be fine.
RTC_PUSH_IGNORING_WUNDEF()
#include "base/android/jni_android.h"
RTC_POP_IGNORING_WUNDEF()
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace {
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
// There can only be one JNI_OnLoad in each binary. So since this is a GTEST
// C++ runner binary, we want to initialize the same global objects we normally
// do if this had been a Java binary.
void EnsureInitializedOnce() {
RTC_CHECK(::base::android::IsVMInitialized());
JNIEnv* jni = ::base::android::AttachCurrentThread();
JavaVM* jvm = NULL;
RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
jint ret = jni::InitGlobalJniVariables(jvm);
RTC_DCHECK_GE(ret, 0);
jni::LoadGlobalClassReferenceHolder();
}
} // namespace
void InitializeAndroidObjects() {
RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
}
} // namespace webrtc

View File

@ -0,0 +1,20 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_TEST_INITIALIZER_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_TEST_INITIALIZER_H_
namespace webrtc {
void InitializeAndroidObjects();
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_TEST_INITIALIZER_H_

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_
#include <string>
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/test/gmock.h"
#include "webrtc/typedefs.h"
#include "webrtc/common_video/include/video_frame.h"
namespace webrtc {
namespace test {
class MockPacketManipulator : public PacketManipulator {
public:
MOCK_METHOD1(ManipulatePackets, int(webrtc::EncodedImage* encoded_image));
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_MOCK_MOCK_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,26 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_H264_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_H264_TEST_H_
#include <memory>
#include "webrtc/media/engine/webrtcvideodecoderfactory.h"
#include "webrtc/media/engine/webrtcvideoencoderfactory.h"
namespace webrtc {
std::unique_ptr<cricket::WebRtcVideoEncoderFactory> CreateObjCEncoderFactory();
std::unique_ptr<cricket::WebRtcVideoDecoderFactory> CreateObjCDecoderFactory();
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_H264_TEST_H_

View File

@ -0,0 +1,29 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/objc_codec_h264_test.h"
#import "WebRTC/RTCVideoCodecH264.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/objc_video_decoder_factory.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/objc_video_encoder_factory.h"
namespace webrtc {
std::unique_ptr<cricket::WebRtcVideoEncoderFactory> CreateObjCEncoderFactory() {
return std::unique_ptr<cricket::WebRtcVideoEncoderFactory>(
new ObjCVideoEncoderFactory([[RTCVideoEncoderFactoryH264 alloc] init]));
}
std::unique_ptr<cricket::WebRtcVideoDecoderFactory> CreateObjCDecoderFactory() {
return std::unique_ptr<cricket::WebRtcVideoDecoderFactory>(
new ObjCVideoDecoderFactory([[RTCVideoDecoderFactoryH264 alloc] init]));
}
} // namespace webrtc

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include <assert.h>
#include <stdio.h>
#include "webrtc/rtc_base/format_macros.h"
namespace webrtc {
namespace test {
PacketManipulatorImpl::PacketManipulatorImpl(PacketReader* packet_reader,
const NetworkingConfig& config,
bool verbose)
: packet_reader_(packet_reader),
config_(config),
active_burst_packets_(0),
random_seed_(1),
verbose_(verbose) {
assert(packet_reader);
}
int PacketManipulatorImpl::ManipulatePackets(
webrtc::EncodedImage* encoded_image) {
int nbr_packets_dropped = 0;
// There's no need to build a copy of the image data since viewing an
// EncodedImage object, setting the length to a new lower value represents
// that everything is dropped after that position in the byte array.
// EncodedImage._size is the allocated bytes.
// EncodedImage._length is how many that are filled with data.
int new_length = 0;
packet_reader_->InitializeReading(encoded_image->_buffer,
encoded_image->_length,
config_.packet_size_in_bytes);
uint8_t* packet = NULL;
int nbr_bytes_to_read;
// keep track of if we've lost any packets, since then we shall loose
// the remains of the current frame:
bool packet_loss_has_occurred = false;
while ((nbr_bytes_to_read = packet_reader_->NextPacket(&packet)) > 0) {
// Check if we're currently in a packet loss burst that is not completed:
if (active_burst_packets_ > 0) {
active_burst_packets_--;
nbr_packets_dropped++;
} else if (RandomUniform() < config_.packet_loss_probability ||
packet_loss_has_occurred) {
packet_loss_has_occurred = true;
nbr_packets_dropped++;
if (config_.packet_loss_mode == kBurst) {
// Initiate a new burst
active_burst_packets_ = config_.packet_loss_burst_length - 1;
}
} else {
new_length += nbr_bytes_to_read;
}
}
encoded_image->_length = new_length;
if (nbr_packets_dropped > 0) {
// Must set completeFrame to false to inform the decoder about this:
encoded_image->_completeFrame = false;
if (verbose_) {
printf("Dropped %d packets for frame %d (frame length: %" PRIuS ")\n",
nbr_packets_dropped, encoded_image->_timeStamp,
encoded_image->_length);
}
}
return nbr_packets_dropped;
}
void PacketManipulatorImpl::InitializeRandomSeed(unsigned int seed) {
random_seed_ = seed;
}
inline double PacketManipulatorImpl::RandomUniform() {
// Use the previous result as new seed before each rand() call. Doing this
// it doesn't matter if other threads are calling rand() since we'll always
// get the same behavior as long as we're using a fixed initial seed.
critsect_.Enter();
srand(random_seed_);
random_seed_ = rand(); // NOLINT (rand_r instead of rand)
critsect_.Leave();
return (random_seed_ + 1.0) / (RAND_MAX + 1.0);
}
const char* PacketLossModeToStr(PacketLossMode e) {
switch (e) {
case kUniform:
return "Uniform";
case kBurst:
return "Burst";
default:
assert(false);
return "Unknown";
}
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_
#include <stdlib.h>
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/test/testsupport/packet_reader.h"
namespace webrtc {
namespace test {
// Which mode the packet loss shall be performed according to.
enum PacketLossMode {
// Drops packets with a configured probability independently for each packet
kUniform,
// Drops packets similar to uniform but when a packet is being dropped,
// the number of lost packets in a row is equal to the configured burst
// length.
kBurst
};
// Returns a string representation of the enum value.
const char* PacketLossModeToStr(PacketLossMode e);
// Contains configurations related to networking and simulation of
// scenarios caused by network interference.
struct NetworkingConfig {
NetworkingConfig()
: packet_size_in_bytes(1500),
max_payload_size_in_bytes(1440),
packet_loss_mode(kUniform),
packet_loss_probability(0.0),
packet_loss_burst_length(1) {}
// Packet size in bytes. Default: 1500 bytes.
size_t packet_size_in_bytes;
// Encoder specific setting of maximum size in bytes of each payload.
// Default: 1440 bytes.
size_t max_payload_size_in_bytes;
// Packet loss mode. Two different packet loss models are supported:
// uniform or burst. This setting has no effect unless
// packet_loss_probability is >0.
// Default: uniform.
PacketLossMode packet_loss_mode;
// Packet loss probability. A value between 0.0 and 1.0 that defines the
// probability of a packet being lost. 0.1 means 10% and so on.
// Default: 0 (no loss).
double packet_loss_probability;
// Packet loss burst length. Defines how many packets will be lost in a burst
// when a packet has been decided to be lost. Must be >=1. Default: 1.
int packet_loss_burst_length;
};
// Class for simulating packet loss on the encoded frame data.
// When a packet loss has occurred in a frame, the remaining data in that
// frame is lost (even if burst length is only a single packet).
// TODO(kjellander): Support discarding only individual packets in the frame
// when CL 172001 has been submitted. This also requires a correct
// fragmentation header to be passed to the decoder.
//
// To get a repeatable packet drop pattern, re-initialize the random seed
// using InitializeRandomSeed before each test run.
class PacketManipulator {
public:
virtual ~PacketManipulator() {}
// Manipulates the data of the encoded_image to simulate parts being lost
// during transport.
// If packets are dropped from frame data, the completedFrame field will be
// set to false.
// Returns the number of packets being dropped.
virtual int ManipulatePackets(webrtc::EncodedImage* encoded_image) = 0;
};
class PacketManipulatorImpl : public PacketManipulator {
public:
PacketManipulatorImpl(PacketReader* packet_reader,
const NetworkingConfig& config,
bool verbose);
~PacketManipulatorImpl() = default;
int ManipulatePackets(webrtc::EncodedImage* encoded_image) override;
virtual void InitializeRandomSeed(unsigned int seed);
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
private:
PacketReader* packet_reader_;
const NetworkingConfig& config_;
// Used to simulate a burst over several frames.
int active_burst_packets_;
rtc::CriticalSection critsect_;
unsigned int random_seed_;
bool verbose_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include <queue>
#include "webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/unittest_utils.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
const double kNeverDropProbability = 0.0;
const double kAlwaysDropProbability = 1.0;
const int kBurstLength = 1;
class PacketManipulatorTest : public PacketRelatedTest {
protected:
PacketReader packet_reader_;
EncodedImage image_;
NetworkingConfig drop_config_;
NetworkingConfig no_drop_config_;
PacketManipulatorTest() {
image_._buffer = packet_data_;
image_._length = kPacketDataLength;
image_._size = kPacketDataLength;
drop_config_.packet_size_in_bytes = kPacketSizeInBytes;
drop_config_.packet_loss_probability = kAlwaysDropProbability;
drop_config_.packet_loss_burst_length = kBurstLength;
drop_config_.packet_loss_mode = kUniform;
no_drop_config_.packet_size_in_bytes = kPacketSizeInBytes;
no_drop_config_.packet_loss_probability = kNeverDropProbability;
no_drop_config_.packet_loss_burst_length = kBurstLength;
no_drop_config_.packet_loss_mode = kUniform;
}
virtual ~PacketManipulatorTest() {}
void SetUp() { PacketRelatedTest::SetUp(); }
void TearDown() { PacketRelatedTest::TearDown(); }
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
size_t expected_packet_data_length,
uint8_t* expected_packet_data,
const EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
EXPECT_EQ(expected_packet_data_length, image_._length);
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
expected_packet_data_length));
}
};
TEST_F(PacketManipulatorTest, Constructor) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
}
TEST_F(PacketManipulatorTest, DropNone) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength, packet_data_,
image_);
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
size_t data_length = 400; // smaller than the packet size
image_._length = data_length;
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(0, nbr_packets_dropped, data_length, packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropAll) {
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped, 0,
packet_data_, image_);
}
// Use our customized test class to make the second packet being lost
TEST_F(PacketManipulatorTest, UniformDropSinglePacket) {
drop_config_.packet_loss_probability = 0.5;
PredictivePacketManipulator manipulator(&packet_reader_, drop_config_);
manipulator.AddRandomResult(1.0);
manipulator.AddRandomResult(0.3); // less than 0.5 will cause packet loss
manipulator.AddRandomResult(1.0);
// Execute the test target method:
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
// Since we setup the predictive packet manipulator, it will throw away the
// second packet. The third packet is also lost because when we have lost one,
// the remains shall also be discarded (in the current implementation).
VerifyPacketLoss(2, nbr_packets_dropped, kPacketSizeInBytes, packet1_,
image_);
}
// Use our customized test class to make the second packet being lost
TEST_F(PacketManipulatorTest, BurstDropNinePackets) {
// Create a longer packet data structure (10 packets)
const int kNbrPackets = 10;
const size_t kDataLength = kPacketSizeInBytes * kNbrPackets;
uint8_t data[kDataLength];
uint8_t* data_pointer = data;
// Fill with 0s, 1s and so on to be able to easily verify which were dropped:
for (int i = 0; i < kNbrPackets; ++i) {
memset(data_pointer + i * kPacketSizeInBytes, i, kPacketSizeInBytes);
}
// Overwrite the defaults from the test fixture:
image_._buffer = data;
image_._length = kDataLength;
image_._size = kDataLength;
drop_config_.packet_loss_probability = 0.5;
drop_config_.packet_loss_burst_length = 5;
drop_config_.packet_loss_mode = kBurst;
PredictivePacketManipulator manipulator(&packet_reader_, drop_config_);
manipulator.AddRandomResult(1.0);
manipulator.AddRandomResult(0.3); // less than 0.5 will cause packet loss
for (int i = 0; i < kNbrPackets - 2; ++i) {
manipulator.AddRandomResult(1.0);
}
// Execute the test target method:
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
// Should discard every packet after the first one.
VerifyPacketLoss(9, nbr_packets_dropped, kPacketSizeInBytes, data, image_);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,452 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots statistics from WebRTC integration test logs.
Usage: $ python plot_webrtc_test_logs.py filename.txt
"""
import numpy
import sys
import re
import matplotlib.pyplot as plt
# Log events.
EVENT_START = \
'RUN ] CodecSettings/VideoProcessorIntegrationTestParameterized.'
EVENT_END = 'OK ] CodecSettings/VideoProcessorIntegrationTestParameterized.'
# Metrics to plot, tuple: (name to parse in file, label to use when plotting).
BITRATE = ('Target bitrate', 'target bitrate (kbps)')
WIDTH = ('Width', 'width')
HEIGHT = ('Height', 'height')
FILENAME = ('Filename', 'clip')
CODEC_TYPE = ('Codec type', 'Codec')
ENCODER_IMPLEMENTATION_NAME = ('Encoder implementation name', 'enc name')
DECODER_IMPLEMENTATION_NAME = ('Decoder implementation name', 'dec name')
CODEC_IMPLEMENTATION_NAME = ('Codec implementation name', 'codec name')
CORES = ('# CPU cores used', 'CPU cores used')
DENOISING = ('Denoising', 'denoising')
RESILIENCE = ('Resilience', 'resilience')
ERROR_CONCEALMENT = ('Error concealment', 'error concealment')
QP = ('Average QP', 'avg QP')
PSNR = ('PSNR avg', 'PSNR (dB)')
SSIM = ('SSIM avg', 'SSIM')
ENC_BITRATE = ('Encoded bitrate', 'encoded bitrate (kbps)')
FRAMERATE = ('Frame rate', 'fps')
NUM_FRAMES = ('# processed frames', 'num frames')
NUM_DROPPED_FRAMES = ('# dropped frames', 'num dropped frames')
NUM_FRAMES_TO_TARGET = ('# frames to convergence',
'frames to reach target rate')
ENCODE_TIME = ('Encoding time', 'encode time (us)')
ENCODE_TIME_AVG = ('Encoding time', 'encode time (us) avg')
DECODE_TIME = ('Decoding time', 'decode time (us)')
DECODE_TIME_AVG = ('Decoding time', 'decode time (us) avg')
FRAME_SIZE = ('Frame sizes', 'frame size (bytes)')
FRAME_SIZE_AVG = ('Frame sizes', 'frame size (bytes) avg')
AVG_KEY_FRAME_SIZE = ('Average key frame size', 'avg key frame size (bytes)')
AVG_NON_KEY_FRAME_SIZE = ('Average non-key frame size',
'avg non-key frame size (bytes)')
# Settings.
SETTINGS = [
WIDTH,
HEIGHT,
FILENAME,
NUM_FRAMES,
ENCODE_TIME,
DECODE_TIME,
FRAME_SIZE,
]
# Settings, options for x-axis.
X_SETTINGS = [
CORES,
FRAMERATE,
DENOISING,
RESILIENCE,
ERROR_CONCEALMENT,
BITRATE, # TODO(asapersson): Needs to be last.
]
# Settings, options for subplots.
SUBPLOT_SETTINGS = [
CODEC_TYPE,
ENCODER_IMPLEMENTATION_NAME,
DECODER_IMPLEMENTATION_NAME,
CODEC_IMPLEMENTATION_NAME,
] + X_SETTINGS
# Results.
RESULTS = [
PSNR,
SSIM,
ENC_BITRATE,
NUM_DROPPED_FRAMES,
NUM_FRAMES_TO_TARGET,
ENCODE_TIME_AVG,
DECODE_TIME_AVG,
QP,
AVG_KEY_FRAME_SIZE,
AVG_NON_KEY_FRAME_SIZE,
]
METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS
Y_METRICS = [res[1] for res in RESULTS]
# Parameters for plotting.
FIG_SIZE_SCALE_FACTOR_X = 1.6
FIG_SIZE_SCALE_FACTOR_Y = 1.8
GRID_COLOR = [0.45, 0.45, 0.45]
def ParseSetting(filename, setting):
"""Parses setting from file.
Args:
filename: The name of the file.
setting: Name of setting to parse (e.g. width).
Returns:
A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """
settings = []
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed setting to list.
if setting in parsed:
s = setting + ': ' + str(parsed[setting])
if s not in settings:
settings.append(s)
break
TryFindMetric(parsed, line, settings_file)
settings_file.close()
return settings
def ParseMetrics(filename, setting1, setting2):
"""Parses metrics from file.
Args:
filename: The name of the file.
setting1: First setting for sorting metrics (e.g. width).
setting2: Second setting for sorting metrics (e.g. CPU cores used).
Returns:
A dictionary holding parsed metrics.
For example:
metrics[key1][key2][measurement]
metrics = {
"width: 352": {
"CPU cores used: 1.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
},
"width: 176": {
"CPU cores used: 1.0": {
"encode time (us)": [0.857897, 0.91608, 0.959173, 0.971116, 0.980961],
"PSNR (dB)": [30.243646, 33.375592, 37.574387, 39.42184, 41.437897],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
} """
metrics = {}
# Parse events.
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed values to metrics.
key1 = setting1 + ': ' + str(parsed[setting1])
key2 = setting2 + ': ' + str(parsed[setting2])
if key1 not in metrics:
metrics[key1] = {}
if key2 not in metrics[key1]:
metrics[key1][key2] = {}
for label in parsed:
if label not in metrics[key1][key2]:
metrics[key1][key2][label] = []
metrics[key1][key2][label].append(parsed[label])
break
TryFindMetric(parsed, line, settings_file)
settings_file.close()
return metrics
def TryFindMetric(parsed, line, settings_file):
for metric in METRICS_TO_PARSE:
name = metric[0]
label = metric[1]
if re.search(r'%s' % name, line):
found, value = GetMetric(name, line)
if not found:
# TODO(asapersson): Change format.
# Try find min, max, average stats.
found, minimum = GetMetric("Min", settings_file.readline())
if not found:
return
found, maximum = GetMetric("Max", settings_file.readline())
if not found:
return
found, average = GetMetric("Average", settings_file.readline())
if not found:
return
parsed[label + ' min'] = minimum
parsed[label + ' max'] = maximum
parsed[label + ' avg'] = average
parsed[label] = value
return
def GetMetric(name, string):
# Float (e.g. bitrate = 98.8253).
pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return StringToFloat(m.group(1))
# Alphanumeric characters (e.g. codec type : VP8).
pattern = r'%s\s*[:=]\s*(\w+)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return True, m.group(1)
return False, -1
def StringToFloat(value):
try:
value = float(value)
except ValueError:
print "Not a float, skipped %s" % value
return False, -1
return True, value
def Plot(y_metric, x_metric, metrics):
"""Plots y_metric vs x_metric per key in metrics.
For example:
y_metric = 'PSNR (dB)'
x_metric = 'bitrate (kbps)'
metrics = {
"CPU cores used: 1.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
"""
for key in sorted(metrics):
data = metrics[key]
if y_metric not in data:
print "Failed to find metric: %s" % y_metric
continue
y = numpy.array(data[y_metric])
x = numpy.array(data[x_metric])
if len(y) != len(x):
print "Length mismatch for %s, %s" % (y, x)
continue
label = y_metric + ' - ' + str(key)
plt.plot(x, y, label=label, linewidth=1.5, marker='o', markersize=5,
markeredgewidth=0.0)
def PlotFigure(settings, y_metrics, x_metric, metrics, title):
"""Plots metrics in y_metrics list. One figure is plotted and each entry
in the list is plotted in a subplot (and sorted per settings).
For example:
settings = ['width: 128.0', 'width: 160.0']. Sort subplot per setting.
y_metrics = ['PSNR (dB)', 'PSNR (dB)']. Metric to plot per subplot.
x_metric = 'bitrate (kbps)'
"""
plt.figure()
plt.suptitle(title, fontsize='large', fontweight='bold')
settings.sort()
rows = len(settings)
cols = 1
pos = 1
while pos <= rows:
plt.rc('grid', color=GRID_COLOR)
ax = plt.subplot(rows, cols, pos)
plt.grid()
plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large')
plt.setp(ax.get_yticklabels(), fontsize='large')
setting = settings[pos - 1]
Plot(y_metrics[pos - 1], x_metric, metrics[setting])
if setting.startswith(WIDTH[1]):
plt.title(setting, fontsize='medium')
plt.legend(fontsize='large', loc='best')
pos += 1
plt.xlabel(x_metric, fontsize='large')
plt.subplots_adjust(left=0.06, right=0.98, bottom=0.05, top=0.94, hspace=0.08)
def GetTitle(filename, setting):
title = ''
if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]:
codec_types = ParseSetting(filename, CODEC_TYPE[1])
for i in range(0, len(codec_types)):
title += codec_types[i] + ', '
if setting != CORES[1]:
cores = ParseSetting(filename, CORES[1])
for i in range(0, len(cores)):
title += cores[i].split('.')[0] + ', '
if setting != FRAMERATE[1]:
framerate = ParseSetting(filename, FRAMERATE[1])
for i in range(0, len(framerate)):
title += framerate[i].split('.')[0] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != ENCODER_IMPLEMENTATION_NAME[1]):
enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(enc_names)):
title += enc_names[i] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != DECODER_IMPLEMENTATION_NAME[1]):
dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(dec_names)):
title += dec_names[i] + ', '
filenames = ParseSetting(filename, FILENAME[1])
title += filenames[0].split('_')[0]
num_frames = ParseSetting(filename, NUM_FRAMES[1])
for i in range(0, len(num_frames)):
title += ' (' + num_frames[i].split('.')[0] + ')'
return title
def ToString(input_list):
return ToStringWithoutMetric(input_list, ('', ''))
def ToStringWithoutMetric(input_list, metric):
i = 1
output_str = ""
for m in input_list:
if m != metric:
output_str = output_str + ("%s. %s\n" % (i, m[1]))
i += 1
return output_str
def GetIdx(text_list):
return int(raw_input(text_list)) - 1
def main():
filename = sys.argv[1]
# Setup.
idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS))
if idx_metric == -1:
# Plot all metrics. One subplot for each metric.
# Per subplot: metric vs bitrate (per resolution).
cores = ParseSetting(filename, CORES[1])
setting1 = CORES[1]
setting2 = WIDTH[1]
sub_keys = [cores[0]] * len(Y_METRICS)
y_metrics = Y_METRICS
x_metric = BITRATE[1]
else:
resolutions = ParseSetting(filename, WIDTH[1])
idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
if X_SETTINGS[idx] == BITRATE:
idx = GetIdx("Plot per:\n%s" % ToStringWithoutMetric(SUBPLOT_SETTINGS,
BITRATE))
idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs bitrate (per setting).
setting1 = WIDTH[1]
setting2 = METRICS_TO_PARSE[idx_setting][1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = BITRATE[1]
else:
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs setting (per bitrate).
setting1 = WIDTH[1]
setting2 = BITRATE[1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = X_SETTINGS[idx][1]
metrics = ParseMetrics(filename, setting1, setting2)
# Stretch fig size.
figsize = plt.rcParams["figure.figsize"]
figsize[0] *= FIG_SIZE_SCALE_FACTOR_X
figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y
plt.rcParams["figure.figsize"] = figsize
PlotFigure(sub_keys, y_metrics, x_metric, metrics,
GetTitle(filename, setting2))
plt.show()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include <assert.h>
#include <stdio.h>
#include "webrtc/test/testsupport/packet_reader.h"
namespace webrtc {
namespace test {
PredictivePacketManipulator::PredictivePacketManipulator(
PacketReader* packet_reader,
const NetworkingConfig& config)
: PacketManipulatorImpl(packet_reader, config, false) {}
PredictivePacketManipulator::~PredictivePacketManipulator() {}
void PredictivePacketManipulator::AddRandomResult(double result) {
assert(result >= 0.0 && result <= 1.0);
random_results_.push(result);
}
double PredictivePacketManipulator::RandomUniform() {
if (random_results_.size() == 0u) {
fprintf(stderr,
"No more stored results, please make sure AddRandomResult()"
"is called same amount of times you're going to invoke the "
"RandomUniform() function, i.e. once per packet.\n");
assert(false);
}
double result = random_results_.front();
random_results_.pop();
return result;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_
#include <queue>
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/test/testsupport/packet_reader.h"
namespace webrtc {
namespace test {
// Predictive packet manipulator that allows for setup of the result of
// the random invocations.
class PredictivePacketManipulator : public PacketManipulatorImpl {
public:
PredictivePacketManipulator(PacketReader* packet_reader,
const NetworkingConfig& config);
virtual ~PredictivePacketManipulator();
// Adds a result. You must add at least the same number of results as the
// expected calls to the RandomUniform method. The results are added to a
// FIFO queue so they will be returned in the same order they were added.
// Result parameter must be 0.0 to 1.0.
void AddRandomResult(double result);
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
double RandomUniform() override;
private:
std::queue<double> random_results_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_PREDICTIVE_PACKET_MANIPULATOR_H_

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include <stdio.h>
#include <algorithm>
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/format_macros.h"
namespace webrtc {
namespace test {
namespace {
bool LessForEncodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
RTC_DCHECK_NE(s1.frame_number, s2.frame_number);
return s1.encode_time_us < s2.encode_time_us;
}
bool LessForDecodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
RTC_DCHECK_NE(s1.frame_number, s2.frame_number);
return s1.decode_time_us < s2.decode_time_us;
}
bool LessForEncodedSize(const FrameStatistic& s1, const FrameStatistic& s2) {
RTC_DCHECK_NE(s1.frame_number, s2.frame_number);
return s1.encoded_frame_size_bytes < s2.encoded_frame_size_bytes;
}
bool LessForBitRate(const FrameStatistic& s1, const FrameStatistic& s2) {
RTC_DCHECK_NE(s1.frame_number, s2.frame_number);
return s1.bitrate_kbps < s2.bitrate_kbps;
}
} // namespace
FrameStatistic* Stats::AddFrame() {
// We don't expect more frames than what can be stored in an int.
stats_.emplace_back(static_cast<int>(stats_.size()));
return &stats_.back();
}
FrameStatistic* Stats::GetFrame(int frame_number) {
RTC_CHECK_GE(frame_number, 0);
RTC_CHECK_LT(frame_number, stats_.size());
return &stats_[frame_number];
}
size_t Stats::size() const {
return stats_.size();
}
void Stats::PrintSummary() const {
if (stats_.empty()) {
printf("No frame statistics have been logged yet.\n");
return;
}
// Calculate min, max, average and total encoding time.
int total_encoding_time_us = 0;
int total_decoding_time_us = 0;
size_t total_encoded_frame_size_bytes = 0;
size_t total_encoded_key_frame_size_bytes = 0;
size_t total_encoded_delta_frame_size_bytes = 0;
size_t num_key_frames = 0;
size_t num_delta_frames = 0;
for (const FrameStatistic& stat : stats_) {
total_encoding_time_us += stat.encode_time_us;
total_decoding_time_us += stat.decode_time_us;
total_encoded_frame_size_bytes += stat.encoded_frame_size_bytes;
if (stat.frame_type == webrtc::kVideoFrameKey) {
total_encoded_key_frame_size_bytes += stat.encoded_frame_size_bytes;
++num_key_frames;
} else {
total_encoded_delta_frame_size_bytes += stat.encoded_frame_size_bytes;
++num_delta_frames;
}
}
// Encoding stats.
printf("Encoding time:\n");
auto frame_it =
std::min_element(stats_.begin(), stats_.end(), LessForEncodeTime);
printf(" Min : %7d us (frame %d)\n", frame_it->encode_time_us,
frame_it->frame_number);
frame_it = std::max_element(stats_.begin(), stats_.end(), LessForEncodeTime);
printf(" Max : %7d us (frame %d)\n", frame_it->encode_time_us,
frame_it->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_encoding_time_us / stats_.size()));
// Decoding stats.
printf("Decoding time:\n");
// Only consider successfully decoded frames (packet loss may cause failures).
std::vector<FrameStatistic> decoded_frames;
for (const FrameStatistic& stat : stats_) {
if (stat.decoding_successful) {
decoded_frames.push_back(stat);
}
}
if (decoded_frames.empty()) {
printf("No successfully decoded frames exist in this statistics.\n");
} else {
frame_it = std::min_element(decoded_frames.begin(), decoded_frames.end(),
LessForDecodeTime);
printf(" Min : %7d us (frame %d)\n", frame_it->decode_time_us,
frame_it->frame_number);
frame_it = std::max_element(decoded_frames.begin(), decoded_frames.end(),
LessForDecodeTime);
printf(" Max : %7d us (frame %d)\n", frame_it->decode_time_us,
frame_it->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_decoding_time_us / decoded_frames.size()));
printf(" Failures: %d frames failed to decode.\n",
static_cast<int>(stats_.size() - decoded_frames.size()));
}
// Frame size stats.
printf("Frame sizes:\n");
frame_it = std::min_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Min : %7" PRIuS " bytes (frame %d)\n",
frame_it->encoded_frame_size_bytes, frame_it->frame_number);
frame_it = std::max_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Max : %7" PRIuS " bytes (frame %d)\n",
frame_it->encoded_frame_size_bytes, frame_it->frame_number);
printf(" Average : %7" PRIuS " bytes\n",
total_encoded_frame_size_bytes / stats_.size());
if (num_key_frames > 0) {
printf(" Average key frame size : %7" PRIuS " bytes (%" PRIuS
" keyframes)\n",
total_encoded_key_frame_size_bytes / num_key_frames, num_key_frames);
}
if (num_delta_frames > 0) {
printf(" Average non-key frame size: %7" PRIuS " bytes (%" PRIuS
" frames)\n",
total_encoded_delta_frame_size_bytes / num_delta_frames,
num_delta_frames);
}
// Bitrate stats.
printf("Bitrates:\n");
frame_it = std::min_element(stats_.begin(), stats_.end(), LessForBitRate);
printf(" Min bitrate: %7d kbps (frame %d)\n", frame_it->bitrate_kbps,
frame_it->frame_number);
frame_it = std::max_element(stats_.begin(), stats_.end(), LessForBitRate);
printf(" Max bitrate: %7d kbps (frame %d)\n", frame_it->bitrate_kbps,
frame_it->frame_number);
printf("\n");
printf("Total encoding time : %7d ms.\n", total_encoding_time_us / 1000);
printf("Total decoding time : %7d ms.\n", total_decoding_time_us / 1000);
printf("Total processing time: %7d ms.\n",
(total_encoding_time_us + total_decoding_time_us) / 1000);
// QP stats.
int total_qp = 0;
int total_qp_count = 0;
for (const FrameStatistic& stat : stats_) {
if (stat.qp >= 0) {
total_qp += stat.qp;
++total_qp_count;
}
}
int avg_qp = (total_qp_count > 0) ? (total_qp / total_qp_count) : -1;
printf("Average QP: %d\n", avg_qp);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_
#include <vector>
#include "webrtc/common_types.h"
namespace webrtc {
namespace test {
// Statistics for one processed frame.
struct FrameStatistic {
explicit FrameStatistic(int frame_number) : frame_number(frame_number) {}
const int frame_number = 0;
// Encoding.
int64_t encode_start_ns = 0;
int encode_return_code = 0;
bool encoding_successful = false;
int encode_time_us = 0;
int bitrate_kbps = 0;
size_t encoded_frame_size_bytes = 0;
webrtc::FrameType frame_type = kVideoFrameDelta;
// Decoding.
int64_t decode_start_ns = 0;
int decode_return_code = 0;
bool decoding_successful = false;
int decode_time_us = 0;
int decoded_width = 0;
int decoded_height = 0;
// Quantization.
int qp = -1;
// How many packets were discarded of the encoded frame data (if any).
int packets_dropped = 0;
size_t total_packets = 0;
size_t manipulated_length = 0;
};
// Statistics for a sequence of processed frames. This class is not thread safe.
class Stats {
public:
Stats() = default;
~Stats() = default;
// Creates a FrameStatistic for the next frame to be processed.
FrameStatistic* AddFrame();
// Returns the FrameStatistic corresponding to |frame_number|.
FrameStatistic* GetFrame(int frame_number);
size_t size() const;
// TODO(brandtr): Add output as CSV.
void PrintSummary() const;
private:
std::vector<FrameStatistic> stats_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_STATS_H_

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
TEST(StatsTest, TestEmptyObject) {
Stats stats;
stats.PrintSummary(); // Should not crash.
}
TEST(StatsTest, AddSingleFrame) {
Stats stats;
FrameStatistic* frame_stat = stats.AddFrame();
EXPECT_EQ(0, frame_stat->frame_number);
EXPECT_EQ(1u, stats.size());
}
TEST(StatsTest, AddMultipleFrames) {
Stats stats;
const int kNumFrames = 1000;
for (int i = 0; i < kNumFrames; ++i) {
FrameStatistic* frame_stat = stats.AddFrame();
EXPECT_EQ(i, frame_stat->frame_number);
}
EXPECT_EQ(kNumFrames, static_cast<int>(stats.size()));
stats.PrintSummary(); // Should not crash.
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,126 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/video_codec_test.h"
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/test/frame_utils.h"
#include "webrtc/test/testsupport/fileutils.h"
static const int kEncodeTimeoutMs = 100;
static const int kDecodeTimeoutMs = 25;
// Set bitrate to get higher quality.
static const int kStartBitrate = 300;
static const int kTargetBitrate = 2000;
static const int kMaxBitrate = 4000;
static const int kWidth = 172; // Width of the input image.
static const int kHeight = 144; // Height of the input image.
static const int kMaxFramerate = 30; // Arbitrary value.
namespace webrtc {
EncodedImageCallback::Result
VideoCodecTest::FakeEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&test_->encoded_frame_section_);
test_->encoded_frame_.emplace(frame);
RTC_DCHECK(codec_specific_info);
test_->codec_specific_info_.codecType = codec_specific_info->codecType;
// Skip |codec_name|, to avoid allocating.
test_->codec_specific_info_.codecSpecific =
codec_specific_info->codecSpecific;
test_->encoded_frame_event_.Set();
return Result(Result::OK);
}
void VideoCodecTest::FakeDecodeCompleteCallback::Decoded(
VideoFrame& frame,
rtc::Optional<int32_t> decode_time_ms,
rtc::Optional<uint8_t> qp) {
rtc::CritScope lock(&test_->decoded_frame_section_);
test_->decoded_frame_.emplace(frame);
test_->decoded_qp_ = qp;
test_->decoded_frame_event_.Set();
}
void VideoCodecTest::SetUp() {
// Using a QCIF image. Processing only one frame.
FILE* source_file_ =
fopen(test::ResourcePath("paris_qcif", "yuv").c_str(), "rb");
ASSERT_TRUE(source_file_ != NULL);
rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer(
test::ReadI420Buffer(kWidth, kHeight, source_file_));
input_frame_.reset(new VideoFrame(video_frame_buffer, kVideoRotation_0, 0));
fclose(source_file_);
encoder_.reset(CreateEncoder());
decoder_.reset(CreateDecoder());
encoder_->RegisterEncodeCompleteCallback(&encode_complete_callback_);
decoder_->RegisterDecodeCompleteCallback(&decode_complete_callback_);
InitCodecs();
}
bool VideoCodecTest::WaitForEncodedFrame(
EncodedImage* frame,
CodecSpecificInfo* codec_specific_info) {
bool ret = encoded_frame_event_.Wait(kEncodeTimeoutMs);
EXPECT_TRUE(ret) << "Timed out while waiting for an encoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames.
rtc::CritScope lock(&encoded_frame_section_);
EXPECT_TRUE(encoded_frame_);
if (encoded_frame_) {
*frame = std::move(*encoded_frame_);
encoded_frame_.reset();
RTC_DCHECK(codec_specific_info);
codec_specific_info->codecType = codec_specific_info_.codecType;
codec_specific_info->codecSpecific = codec_specific_info_.codecSpecific;
return true;
} else {
return false;
}
}
bool VideoCodecTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
rtc::Optional<uint8_t>* qp) {
bool ret = decoded_frame_event_.Wait(kDecodeTimeoutMs);
EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames.
rtc::CritScope lock(&decoded_frame_section_);
EXPECT_TRUE(decoded_frame_);
if (decoded_frame_) {
frame->reset(new VideoFrame(std::move(*decoded_frame_)));
*qp = decoded_qp_;
decoded_frame_.reset();
return true;
} else {
return false;
}
}
void VideoCodecTest::InitCodecs() {
codec_settings_ = codec_settings();
codec_settings_.startBitrate = kStartBitrate;
codec_settings_.targetBitrate = kTargetBitrate;
codec_settings_.maxBitrate = kMaxBitrate;
codec_settings_.maxFramerate = kMaxFramerate;
codec_settings_.width = kWidth;
codec_settings_.height = kHeight;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->InitDecode(&codec_settings_, 1 /* number of cores */));
}
} // namespace webrtc

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TEST_H_
#include <memory>
#include "webrtc/api/video_codecs/video_decoder.h"
#include "webrtc/api/video_codecs/video_encoder.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
#include "webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/event.h"
#include "webrtc/rtc_base/thread_annotations.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
class VideoCodecTest : public ::testing::Test {
public:
VideoCodecTest()
: encode_complete_callback_(this),
decode_complete_callback_(this),
encoded_frame_event_(false /* manual reset */,
false /* initially signaled */),
decoded_frame_event_(false /* manual reset */,
false /* initially signaled */) {}
protected:
class FakeEncodeCompleteCallback : public webrtc::EncodedImageCallback {
public:
explicit FakeEncodeCompleteCallback(VideoCodecTest* test) : test_(test) {}
Result OnEncodedImage(const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
private:
VideoCodecTest* const test_;
};
class FakeDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit FakeDecodeCompleteCallback(VideoCodecTest* test) : test_(test) {}
int32_t Decoded(VideoFrame& frame) override {
RTC_NOTREACHED();
return -1;
}
int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
RTC_NOTREACHED();
return -1;
}
void Decoded(VideoFrame& frame,
rtc::Optional<int32_t> decode_time_ms,
rtc::Optional<uint8_t> qp) override;
private:
VideoCodecTest* const test_;
};
virtual VideoEncoder* CreateEncoder() = 0;
virtual VideoDecoder* CreateDecoder() = 0;
virtual VideoCodec codec_settings() = 0;
void SetUp() override;
bool WaitForEncodedFrame(EncodedImage* frame,
CodecSpecificInfo* codec_specific_info);
bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
rtc::Optional<uint8_t>* qp);
// Populated by InitCodecs().
VideoCodec codec_settings_;
std::unique_ptr<VideoFrame> input_frame_;
std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<VideoDecoder> decoder_;
private:
void InitCodecs();
FakeEncodeCompleteCallback encode_complete_callback_;
FakeDecodeCompleteCallback decode_complete_callback_;
rtc::Event encoded_frame_event_;
rtc::CriticalSection encoded_frame_section_;
rtc::Optional<EncodedImage> encoded_frame_
RTC_GUARDED_BY(encoded_frame_section_);
CodecSpecificInfo codec_specific_info_ RTC_GUARDED_BY(encoded_frame_section_);
rtc::Event decoded_frame_event_;
rtc::CriticalSection decoded_frame_section_;
rtc::Optional<VideoFrame> decoded_frame_
RTC_GUARDED_BY(decoded_frame_section_);
rtc::Optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TEST_H_

View File

@ -0,0 +1,486 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include <string.h>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "webrtc/modules/video_coding/include/video_codec_initializer.h"
#include "webrtc/modules/video_coding/utility/default_video_bitrate_allocator.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/system_wrappers/include/cpu_info.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
namespace {
const int kRtpClockRateHz = 90000;
std::unique_ptr<VideoBitrateAllocator> CreateBitrateAllocator(
TestConfig* config) {
std::unique_ptr<TemporalLayersFactory> tl_factory;
if (config->codec_settings.codecType == VideoCodecType::kVideoCodecVP8) {
tl_factory.reset(new TemporalLayersFactory());
config->codec_settings.VP8()->tl_factory = tl_factory.get();
}
return std::unique_ptr<VideoBitrateAllocator>(
VideoCodecInitializer::CreateBitrateAllocator(config->codec_settings,
std::move(tl_factory)));
}
void PrintCodecSettings(const VideoCodec& codec_settings) {
printf(" Codec settings:\n");
printf(" Codec type : %s\n",
CodecTypeToPayloadString(codec_settings.codecType));
printf(" Start bitrate : %d kbps\n", codec_settings.startBitrate);
printf(" Max bitrate : %d kbps\n", codec_settings.maxBitrate);
printf(" Min bitrate : %d kbps\n", codec_settings.minBitrate);
printf(" Width : %d\n", codec_settings.width);
printf(" Height : %d\n", codec_settings.height);
printf(" Max frame rate : %d\n", codec_settings.maxFramerate);
printf(" QPmax : %d\n", codec_settings.qpMax);
if (codec_settings.codecType == kVideoCodecVP8) {
printf(" Complexity : %d\n", codec_settings.VP8().complexity);
printf(" Resilience : %d\n", codec_settings.VP8().resilience);
printf(" # temporal layers : %d\n",
codec_settings.VP8().numberOfTemporalLayers);
printf(" Denoising : %d\n", codec_settings.VP8().denoisingOn);
printf(" Error concealment : %d\n",
codec_settings.VP8().errorConcealmentOn);
printf(" Automatic resize : %d\n",
codec_settings.VP8().automaticResizeOn);
printf(" Frame dropping : %d\n", codec_settings.VP8().frameDroppingOn);
printf(" Key frame interval: %d\n", codec_settings.VP8().keyFrameInterval);
} else if (codec_settings.codecType == kVideoCodecVP9) {
printf(" Complexity : %d\n", codec_settings.VP9().complexity);
printf(" Resilience : %d\n", codec_settings.VP9().resilienceOn);
printf(" # temporal layers : %d\n",
codec_settings.VP9().numberOfTemporalLayers);
printf(" Denoising : %d\n", codec_settings.VP9().denoisingOn);
printf(" Frame dropping : %d\n", codec_settings.VP9().frameDroppingOn);
printf(" Key frame interval: %d\n", codec_settings.VP9().keyFrameInterval);
printf(" Adaptive QP mode : %d\n", codec_settings.VP9().adaptiveQpMode);
printf(" Automatic resize : %d\n",
codec_settings.VP9().automaticResizeOn);
printf(" # spatial layers : %d\n",
codec_settings.VP9().numberOfSpatialLayers);
printf(" Flexible mode : %d\n", codec_settings.VP9().flexibleMode);
} else if (codec_settings.codecType == kVideoCodecH264) {
printf(" Frame dropping : %d\n", codec_settings.H264().frameDroppingOn);
printf(" Key frame interval: %d\n",
codec_settings.H264().keyFrameInterval);
printf(" Profile : %d\n", codec_settings.H264().profile);
}
}
void VerifyQpParser(const EncodedImage& encoded_frame,
const TestConfig& config) {
if (config.hw_encoder)
return;
int qp;
if (config.codec_settings.codecType == kVideoCodecVP8) {
ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
} else if (config.codec_settings.codecType == kVideoCodecVP9) {
ASSERT_TRUE(vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
} else {
return;
}
EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
}
int GetElapsedTimeMicroseconds(int64_t start_ns, int64_t stop_ns) {
int64_t diff_us = (stop_ns - start_ns) / rtc::kNumNanosecsPerMicrosec;
RTC_DCHECK_GE(diff_us, std::numeric_limits<int>::min());
RTC_DCHECK_LE(diff_us, std::numeric_limits<int>::max());
return static_cast<int>(diff_us);
}
} // namespace
const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) {
switch (e) {
case kExcludeOnlyFirstKeyFrame:
return "ExcludeOnlyFirstKeyFrame";
case kExcludeAllKeyFrames:
return "ExcludeAllKeyFrames";
default:
RTC_NOTREACHED();
return "Unknown";
}
}
VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
webrtc::VideoDecoder* decoder,
FrameReader* analysis_frame_reader,
FrameWriter* analysis_frame_writer,
PacketManipulator* packet_manipulator,
const TestConfig& config,
Stats* stats,
IvfFileWriter* encoded_frame_writer,
FrameWriter* decoded_frame_writer)
: initialized_(false),
config_(config),
encoder_(encoder),
decoder_(decoder),
bitrate_allocator_(CreateBitrateAllocator(&config_)),
encode_callback_(this),
decode_callback_(this),
packet_manipulator_(packet_manipulator),
analysis_frame_reader_(analysis_frame_reader),
analysis_frame_writer_(analysis_frame_writer),
encoded_frame_writer_(encoded_frame_writer),
decoded_frame_writer_(decoded_frame_writer),
last_inputed_frame_num_(-1),
last_encoded_frame_num_(-1),
last_decoded_frame_num_(-1),
first_key_frame_has_been_excluded_(false),
last_decoded_frame_buffer_(analysis_frame_reader->FrameLength()),
stats_(stats),
rate_update_index_(-1) {
RTC_DCHECK(encoder);
RTC_DCHECK(decoder);
RTC_DCHECK(packet_manipulator);
RTC_DCHECK(analysis_frame_reader);
RTC_DCHECK(analysis_frame_writer);
RTC_DCHECK(stats);
}
VideoProcessor::~VideoProcessor() = default;
void VideoProcessor::Init() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
RTC_DCHECK(!initialized_) << "VideoProcessor already initialized.";
initialized_ = true;
// Setup required callbacks for the encoder and decoder.
RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(&encode_callback_),
WEBRTC_VIDEO_CODEC_OK)
<< "Failed to register encode complete callback";
RTC_CHECK_EQ(decoder_->RegisterDecodeCompleteCallback(&decode_callback_),
WEBRTC_VIDEO_CODEC_OK)
<< "Failed to register decode complete callback";
// Initialize the encoder and decoder.
uint32_t num_cores =
config_.use_single_core ? 1 : CpuInfo::DetectNumberOfCores();
RTC_CHECK_EQ(
encoder_->InitEncode(&config_.codec_settings, num_cores,
config_.networking_config.max_payload_size_in_bytes),
WEBRTC_VIDEO_CODEC_OK)
<< "Failed to initialize VideoEncoder";
RTC_CHECK_EQ(decoder_->InitDecode(&config_.codec_settings, num_cores),
WEBRTC_VIDEO_CODEC_OK)
<< "Failed to initialize VideoDecoder";
if (config_.verbose) {
printf("Video Processor:\n");
printf(" Filename : %s\n", config_.filename.c_str());
printf(" Total # of frames: %d\n",
analysis_frame_reader_->NumberOfFrames());
printf(" # CPU cores used : %d\n", num_cores);
const char* encoder_name = encoder_->ImplementationName();
printf(" Encoder implementation name: %s\n", encoder_name);
const char* decoder_name = decoder_->ImplementationName();
printf(" Decoder implementation name: %s\n", decoder_name);
if (strcmp(encoder_name, decoder_name) == 0) {
printf(" Codec implementation name : %s_%s\n",
CodecTypeToPayloadString(config_.codec_settings.codecType),
encoder_->ImplementationName());
}
PrintCodecSettings(config_.codec_settings);
printf("\n");
}
}
void VideoProcessor::Release() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
RTC_CHECK_EQ(decoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
encoder_->RegisterEncodeCompleteCallback(nullptr);
decoder_->RegisterDecodeCompleteCallback(nullptr);
initialized_ = false;
}
void VideoProcessor::ProcessFrame() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
RTC_DCHECK(initialized_) << "VideoProcessor not initialized.";
++last_inputed_frame_num_;
// Get frame from file.
rtc::scoped_refptr<I420BufferInterface> buffer(
analysis_frame_reader_->ReadFrame());
RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
// Use the frame number as the basis for timestamp to identify frames. Let the
// first timestamp be non-zero, to not make the IvfFileWriter believe that we
// want to use capture timestamps in the IVF files.
const uint32_t rtp_timestamp = (last_inputed_frame_num_ + 1) *
kRtpClockRateHz /
config_.codec_settings.maxFramerate;
rtp_timestamp_to_frame_num_[rtp_timestamp] = last_inputed_frame_num_;
const int64_t kNoRenderTime = 0;
VideoFrame source_frame(buffer, rtp_timestamp, kNoRenderTime,
webrtc::kVideoRotation_0);
// Decide if we are going to force a keyframe.
std::vector<FrameType> frame_types(1, kVideoFrameDelta);
if (config_.keyframe_interval > 0 &&
last_inputed_frame_num_ % config_.keyframe_interval == 0) {
frame_types[0] = kVideoFrameKey;
}
// Create frame statistics object used for aggregation at end of test run.
FrameStatistic* frame_stat = stats_->AddFrame();
// For the highest measurement accuracy of the encode time, the start/stop
// time recordings should wrap the Encode call as tightly as possible.
frame_stat->encode_start_ns = rtc::TimeNanos();
frame_stat->encode_return_code =
encoder_->Encode(source_frame, nullptr, &frame_types);
if (frame_stat->encode_return_code != WEBRTC_VIDEO_CODEC_OK) {
LOG(LS_WARNING) << "Failed to encode frame " << last_inputed_frame_num_
<< ", return code: " << frame_stat->encode_return_code
<< ".";
}
}
void VideoProcessor::SetRates(int bitrate_kbps, int framerate_fps) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
config_.codec_settings.maxFramerate = framerate_fps;
int set_rates_result = encoder_->SetRateAllocation(
bitrate_allocator_->GetAllocation(bitrate_kbps * 1000, framerate_fps),
framerate_fps);
RTC_DCHECK_GE(set_rates_result, 0)
<< "Failed to update encoder with new rate " << bitrate_kbps << ".";
++rate_update_index_;
num_dropped_frames_.push_back(0);
num_spatial_resizes_.push_back(0);
}
std::vector<int> VideoProcessor::NumberDroppedFramesPerRateUpdate() const {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
return num_dropped_frames_;
}
std::vector<int> VideoProcessor::NumberSpatialResizesPerRateUpdate() const {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
return num_spatial_resizes_;
}
void VideoProcessor::FrameEncoded(webrtc::VideoCodecType codec,
const EncodedImage& encoded_image) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
// For the highest measurement accuracy of the encode time, the start/stop
// time recordings should wrap the Encode call as tightly as possible.
int64_t encode_stop_ns = rtc::TimeNanos();
// Take the opportunity to verify the QP bitstream parser.
VerifyQpParser(encoded_image, config_);
// Check for dropped frames.
const int frame_number =
rtp_timestamp_to_frame_num_[encoded_image._timeStamp];
bool last_frame_missing = false;
if (frame_number > 0) {
RTC_DCHECK_GE(last_encoded_frame_num_, 0);
int num_dropped_from_last_encode =
frame_number - last_encoded_frame_num_ - 1;
RTC_DCHECK_GE(num_dropped_from_last_encode, 0);
RTC_CHECK_GE(rate_update_index_, 0);
num_dropped_frames_[rate_update_index_] += num_dropped_from_last_encode;
if (num_dropped_from_last_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid
// getting out of sync for the computation of PSNR and SSIM.
for (int i = 0; i < num_dropped_from_last_encode; i++) {
RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(),
analysis_frame_writer_->FrameLength());
RTC_CHECK(analysis_frame_writer_->WriteFrame(
last_decoded_frame_buffer_.data()));
if (decoded_frame_writer_) {
RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(),
decoded_frame_writer_->FrameLength());
RTC_CHECK(decoded_frame_writer_->WriteFrame(
last_decoded_frame_buffer_.data()));
}
}
}
const FrameStatistic* last_encoded_frame_stat =
stats_->GetFrame(last_encoded_frame_num_);
last_frame_missing = (last_encoded_frame_stat->manipulated_length == 0);
}
// Ensure strict monotonicity.
RTC_CHECK_GT(frame_number, last_encoded_frame_num_);
last_encoded_frame_num_ = frame_number;
// Update frame statistics.
FrameStatistic* frame_stat = stats_->GetFrame(frame_number);
frame_stat->encode_time_us =
GetElapsedTimeMicroseconds(frame_stat->encode_start_ns, encode_stop_ns);
frame_stat->encoding_successful = true;
frame_stat->encoded_frame_size_bytes = encoded_image._length;
frame_stat->frame_type = encoded_image._frameType;
frame_stat->qp = encoded_image.qp_;
frame_stat->bitrate_kbps = static_cast<int>(
encoded_image._length * config_.codec_settings.maxFramerate * 8 / 1000);
frame_stat->total_packets =
encoded_image._length / config_.networking_config.packet_size_in_bytes +
1;
// Simulate packet loss.
bool exclude_this_frame = false;
if (encoded_image._frameType == kVideoFrameKey) {
// Only keyframes can be excluded.
switch (config_.exclude_frame_types) {
case kExcludeOnlyFirstKeyFrame:
if (!first_key_frame_has_been_excluded_) {
first_key_frame_has_been_excluded_ = true;
exclude_this_frame = true;
}
break;
case kExcludeAllKeyFrames:
exclude_this_frame = true;
break;
default:
RTC_NOTREACHED();
}
}
// Make a raw copy of the |encoded_image| buffer.
size_t copied_buffer_size = encoded_image._length +
EncodedImage::GetBufferPaddingBytes(codec);
std::unique_ptr<uint8_t[]> copied_buffer(new uint8_t[copied_buffer_size]);
memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
// The image to feed to the decoder.
EncodedImage copied_image;
memcpy(&copied_image, &encoded_image, sizeof(copied_image));
copied_image._size = copied_buffer_size;
copied_image._buffer = copied_buffer.get();
if (!exclude_this_frame) {
frame_stat->packets_dropped =
packet_manipulator_->ManipulatePackets(&copied_image);
}
frame_stat->manipulated_length = copied_image._length;
// For the highest measurement accuracy of the decode time, the start/stop
// time recordings should wrap the Decode call as tightly as possible.
frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code =
decoder_->Decode(copied_image, last_frame_missing, nullptr);
if (frame_stat->decode_return_code != WEBRTC_VIDEO_CODEC_OK) {
// Write the last successful frame the output file to avoid getting it out
// of sync with the source file for SSIM and PSNR comparisons.
RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(),
analysis_frame_writer_->FrameLength());
RTC_CHECK(
analysis_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data()));
if (decoded_frame_writer_) {
RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(),
decoded_frame_writer_->FrameLength());
RTC_CHECK(
decoded_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data()));
}
}
if (encoded_frame_writer_) {
RTC_CHECK(encoded_frame_writer_->WriteFrame(encoded_image, codec));
}
}
void VideoProcessor::FrameDecoded(const VideoFrame& image) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
// For the highest measurement accuracy of the decode time, the start/stop
// time recordings should wrap the Decode call as tightly as possible.
int64_t decode_stop_ns = rtc::TimeNanos();
// Update frame statistics.
const int frame_number = rtp_timestamp_to_frame_num_[image.timestamp()];
FrameStatistic* frame_stat = stats_->GetFrame(frame_number);
frame_stat->decoded_width = image.width();
frame_stat->decoded_height = image.height();
frame_stat->decode_time_us =
GetElapsedTimeMicroseconds(frame_stat->decode_start_ns, decode_stop_ns);
frame_stat->decoding_successful = true;
// Check if the codecs have resized the frame since previously decoded frame.
if (frame_number > 0) {
RTC_CHECK_GE(last_decoded_frame_num_, 0);
const FrameStatistic* last_decoded_frame_stat =
stats_->GetFrame(last_decoded_frame_num_);
if (static_cast<int>(image.width()) !=
last_decoded_frame_stat->decoded_width ||
static_cast<int>(image.height()) !=
last_decoded_frame_stat->decoded_height) {
RTC_CHECK_GE(rate_update_index_, 0);
++num_spatial_resizes_[rate_update_index_];
}
}
// Ensure strict monotonicity.
RTC_CHECK_GT(frame_number, last_decoded_frame_num_);
last_decoded_frame_num_ = frame_number;
// Check if frame size is different from the original size, and if so,
// scale back to original size. This is needed for the PSNR and SSIM
// calculations.
size_t extracted_length;
rtc::Buffer extracted_buffer;
if (image.width() != config_.codec_settings.width ||
image.height() != config_.codec_settings.height) {
rtc::scoped_refptr<I420Buffer> scaled_buffer(I420Buffer::Create(
config_.codec_settings.width, config_.codec_settings.height));
// Should be the same aspect ratio, no cropping needed.
scaled_buffer->ScaleFrom(*image.video_frame_buffer()->ToI420());
size_t length = CalcBufferSize(VideoType::kI420, scaled_buffer->width(),
scaled_buffer->height());
extracted_buffer.SetSize(length);
extracted_length =
ExtractBuffer(scaled_buffer, length, extracted_buffer.data());
} else {
// No resize.
size_t length =
CalcBufferSize(VideoType::kI420, image.width(), image.height());
extracted_buffer.SetSize(length);
extracted_length = ExtractBuffer(image.video_frame_buffer()->ToI420(),
length, extracted_buffer.data());
}
RTC_DCHECK_EQ(extracted_length, analysis_frame_writer_->FrameLength());
RTC_CHECK(analysis_frame_writer_->WriteFrame(extracted_buffer.data()));
if (decoded_frame_writer_) {
RTC_DCHECK_EQ(extracted_length, decoded_frame_writer_->FrameLength());
RTC_CHECK(decoded_frame_writer_->WriteFrame(extracted_buffer.data()));
}
last_decoded_frame_buffer_ = std::move(extracted_buffer);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,313 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "webrtc/api/video/video_frame.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
#include "webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h"
#include "webrtc/rtc_base/buffer.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/rtc_base/sequenced_task_checker.h"
#include "webrtc/rtc_base/task_queue.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
namespace webrtc {
class VideoBitrateAllocator;
namespace test {
// Defines which frame types shall be excluded from packet loss and when.
enum ExcludeFrameTypes {
// Will exclude the first keyframe in the video sequence from packet loss.
// Following keyframes will be targeted for packet loss.
kExcludeOnlyFirstKeyFrame,
// Exclude all keyframes from packet loss, no matter where in the video
// sequence they occur.
kExcludeAllKeyFrames
};
// Returns a string representation of the enum value.
const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e);
// Test configuration for a test run.
struct TestConfig {
// Plain name of YUV file to process without file extension.
std::string filename;
// File to process. This must be a video file in the YUV format.
std::string input_filename;
// File to write to during processing for the test. Will be a video file
// in the YUV format.
std::string output_filename;
// Configurations related to networking.
NetworkingConfig networking_config;
// Decides how the packet loss simulations shall exclude certain frames
// from packet loss.
ExcludeFrameTypes exclude_frame_types = kExcludeOnlyFirstKeyFrame;
// Force the encoder and decoder to use a single core for processing.
// Using a single core is necessary to get a deterministic behavior for the
// encoded frames - using multiple cores will produce different encoded frames
// since multiple cores are competing to consume the byte budget for each
// frame in parallel.
// If set to false, the maximum number of available cores will be used.
bool use_single_core = false;
// If > 0: forces the encoder to create a keyframe every Nth frame.
// Note that the encoder may create a keyframe in other locations in addition
// to this setting. Forcing key frames may also affect encoder planning
// optimizations in a negative way, since it will suddenly be forced to
// produce an expensive key frame.
int keyframe_interval = 0;
// The codec settings to use for the test (target bitrate, video size,
// framerate and so on). This struct should be filled in using the
// VideoCodingModule::Codec() method.
webrtc::VideoCodec codec_settings;
// If printing of information to stdout shall be performed during processing.
bool verbose = true;
// Should hardware accelerated codecs be used?
bool hw_encoder = false;
bool hw_decoder = false;
// Should the hardware codecs be wrapped in software fallbacks?
bool sw_fallback_encoder = false;
// TODO(brandtr): Add support for SW decoder fallbacks, when
// webrtc::VideoDecoder's can be wrapped in std::unique_ptr's.
};
// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder
// interfaces. This is done in a sequential manner in order to be able to
// measure times properly.
// The class processes a frame at the time for the configured input file.
// It maintains state of where in the source input file the processing is at.
//
// Regarding packet loss: Note that keyframes are excluded (first or all
// depending on the ExcludeFrameTypes setting). This is because if key frames
// would be altered, all the following delta frames would be pretty much
// worthless. VP8 has an error-resilience feature that makes it able to handle
// packet loss in key non-first keyframes, which is why only the first is
// excluded by default.
// Packet loss in such important frames is handled on a higher level in the
// Video Engine, where signaling would request a retransmit of the lost packets,
// since they're so important.
//
// Note this class is not thread safe in any way and is meant for simple testing
// purposes.
class VideoProcessor {
public:
VideoProcessor(webrtc::VideoEncoder* encoder,
webrtc::VideoDecoder* decoder,
FrameReader* analysis_frame_reader,
FrameWriter* analysis_frame_writer,
PacketManipulator* packet_manipulator,
const TestConfig& config,
Stats* stats,
IvfFileWriter* encoded_frame_writer,
FrameWriter* decoded_frame_writer);
~VideoProcessor();
// Sets up callbacks and initializes the encoder and decoder.
void Init();
// Tears down callbacks and releases the encoder and decoder.
void Release();
// Reads a frame from the analysis frame reader and sends it to the encoder.
// When the encode callback is received, the encoded frame is sent to the
// decoder. The decoded frame is written to disk by the analysis frame writer.
// Objective video quality metrics can thus be calculated after the fact.
void ProcessFrame();
// Updates the encoder with target rates. Must be called at least once.
void SetRates(int bitrate_kbps, int framerate_fps);
// Returns the number of dropped frames.
std::vector<int> NumberDroppedFramesPerRateUpdate() const;
// Returns the number of spatial resizes.
std::vector<int> NumberSpatialResizesPerRateUpdate() const;
private:
class VideoProcessorEncodeCompleteCallback
: public webrtc::EncodedImageCallback {
public:
explicit VideoProcessorEncodeCompleteCallback(
VideoProcessor* video_processor)
: video_processor_(video_processor),
task_queue_(rtc::TaskQueue::Current()) {}
Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override {
RTC_CHECK(codec_specific_info);
if (task_queue_ && !task_queue_->IsCurrent()) {
task_queue_->PostTask(
std::unique_ptr<rtc::QueuedTask>(new EncodeCallbackTask(
video_processor_, encoded_image, codec_specific_info)));
return Result(Result::OK, 0);
}
video_processor_->FrameEncoded(codec_specific_info->codecType,
encoded_image);
return Result(Result::OK, 0);
}
private:
class EncodeCallbackTask : public rtc::QueuedTask {
public:
EncodeCallbackTask(VideoProcessor* video_processor,
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info)
: video_processor_(video_processor),
buffer_(encoded_image._buffer, encoded_image._length),
encoded_image_(encoded_image),
codec_specific_info_(*codec_specific_info) {
encoded_image_._buffer = buffer_.data();
}
bool Run() override {
video_processor_->FrameEncoded(codec_specific_info_.codecType,
encoded_image_);
return true;
}
private:
VideoProcessor* const video_processor_;
rtc::Buffer buffer_;
webrtc::EncodedImage encoded_image_;
const webrtc::CodecSpecificInfo codec_specific_info_;
};
VideoProcessor* const video_processor_;
rtc::TaskQueue* const task_queue_;
};
class VideoProcessorDecodeCompleteCallback
: public webrtc::DecodedImageCallback {
public:
explicit VideoProcessorDecodeCompleteCallback(
VideoProcessor* video_processor)
: video_processor_(video_processor),
task_queue_(rtc::TaskQueue::Current()) {}
int32_t Decoded(webrtc::VideoFrame& image) override {
if (task_queue_ && !task_queue_->IsCurrent()) {
task_queue_->PostTask(
[this, image]() { video_processor_->FrameDecoded(image); });
return 0;
}
video_processor_->FrameDecoded(image);
return 0;
}
int32_t Decoded(webrtc::VideoFrame& image,
int64_t decode_time_ms) override {
return Decoded(image);
}
void Decoded(webrtc::VideoFrame& image,
rtc::Optional<int32_t> decode_time_ms,
rtc::Optional<uint8_t> qp) override {
Decoded(image);
}
private:
VideoProcessor* const video_processor_;
rtc::TaskQueue* const task_queue_;
};
// Invoked by the callback adapter when a frame has completed encoding.
void FrameEncoded(webrtc::VideoCodecType codec,
const webrtc::EncodedImage& encodedImage);
// Invoked by the callback adapter when a frame has completed decoding.
void FrameDecoded(const webrtc::VideoFrame& image);
bool initialized_ RTC_GUARDED_BY(sequence_checker_);
TestConfig config_ RTC_GUARDED_BY(sequence_checker_);
webrtc::VideoEncoder* const encoder_;
webrtc::VideoDecoder* const decoder_;
const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
// Adapters for the codec callbacks.
VideoProcessorEncodeCompleteCallback encode_callback_;
VideoProcessorDecodeCompleteCallback decode_callback_;
// Fake network.
PacketManipulator* const packet_manipulator_;
// These (mandatory) file manipulators are used for, e.g., objective PSNR and
// SSIM calculations at the end of a test run.
FrameReader* const analysis_frame_reader_;
FrameWriter* const analysis_frame_writer_;
// These (optional) file writers are used to persistently store the encoded
// and decoded bitstreams. The purpose is to give the experimenter an option
// to subjectively evaluate the quality of the processing. Each frame writer
// is enabled by being non-null.
IvfFileWriter* const encoded_frame_writer_;
FrameWriter* const decoded_frame_writer_;
// Keep track of inputed/encoded/decoded frames, so we can detect frame drops.
int last_inputed_frame_num_ RTC_GUARDED_BY(sequence_checker_);
int last_encoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
int last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
// Store an RTP timestamp -> frame number map, since the timestamps are
// based off of the frame rate, which can change mid-test.
std::map<uint32_t, int> rtp_timestamp_to_frame_num_
RTC_GUARDED_BY(sequence_checker_);
// Keep track of if we have excluded the first key frame from packet loss.
bool first_key_frame_has_been_excluded_ RTC_GUARDED_BY(sequence_checker_);
// Keep track of the last successfully decoded frame, since we write that
// frame to disk when decoding fails.
rtc::Buffer last_decoded_frame_buffer_ RTC_GUARDED_BY(sequence_checker_);
// Statistics.
Stats* stats_;
std::vector<int> num_dropped_frames_ RTC_GUARDED_BY(sequence_checker_);
std::vector<int> num_spatial_resizes_ RTC_GUARDED_BY(sequence_checker_);
int rate_update_index_ RTC_GUARDED_BY(sequence_checker_);
rtc::SequencedTaskChecker sequence_checker_;
RTC_DISALLOW_COPY_AND_ASSIGN(VideoProcessor);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_

View File

@ -0,0 +1,657 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.h"
#include <utility>
#if defined(WEBRTC_ANDROID)
#include "webrtc/modules/video_coding/codecs/test/android_test_initializer.h"
#include "webrtc/sdk/android/src/jni/androidmediadecoder_jni.h"
#include "webrtc/sdk/android/src/jni/androidmediaencoder_jni.h"
#elif defined(WEBRTC_IOS)
#include "webrtc/modules/video_coding/codecs/test/objc_codec_h264_test.h"
#endif
#include "webrtc/media/engine/internaldecoderfactory.h"
#include "webrtc/media/engine/internalencoderfactory.h"
#include "webrtc/media/engine/videoencodersoftwarefallbackwrapper.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/event.h"
#include "webrtc/rtc_base/file.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/rtc_base/ptr_util.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
#include "webrtc/test/video_codec_settings.h"
namespace webrtc {
namespace test {
namespace {
const int kPercTargetvsActualMismatch = 20;
const int kBaseKeyFrameInterval = 3000;
// Parameters from VP8 wrapper, which control target size of key frames.
const float kInitialBufferSize = 0.5f;
const float kOptimalBufferSize = 0.6f;
const float kScaleKeyFrameSize = 0.5f;
void VerifyQuality(const QualityMetricsResult& psnr_result,
const QualityMetricsResult& ssim_result,
const QualityThresholds& quality_thresholds) {
EXPECT_GT(psnr_result.average, quality_thresholds.min_avg_psnr);
EXPECT_GT(psnr_result.min, quality_thresholds.min_min_psnr);
EXPECT_GT(ssim_result.average, quality_thresholds.min_avg_ssim);
EXPECT_GT(ssim_result.min, quality_thresholds.min_min_ssim);
}
int NumberOfTemporalLayers(const VideoCodec& codec_settings) {
if (codec_settings.codecType == kVideoCodecVP8) {
return codec_settings.VP8().numberOfTemporalLayers;
} else if (codec_settings.codecType == kVideoCodecVP9) {
return codec_settings.VP9().numberOfTemporalLayers;
} else {
return 1;
}
}
} // namespace
VideoProcessorIntegrationTest::VideoProcessorIntegrationTest() {
#if defined(WEBRTC_ANDROID)
InitializeAndroidObjects();
#endif
}
VideoProcessorIntegrationTest::~VideoProcessorIntegrationTest() = default;
void VideoProcessorIntegrationTest::SetCodecSettings(TestConfig* config,
VideoCodecType codec_type,
int num_temporal_layers,
bool error_concealment_on,
bool denoising_on,
bool frame_dropper_on,
bool spatial_resize_on,
bool resilience_on,
int width,
int height) {
webrtc::test::CodecSettings(codec_type, &config->codec_settings);
// TODO(brandtr): Move the setting of |width| and |height| to the tests, and
// DCHECK that they are set before initializing the codec instead.
config->codec_settings.width = width;
config->codec_settings.height = height;
switch (config->codec_settings.codecType) {
case kVideoCodecVP8:
config->codec_settings.VP8()->resilience =
resilience_on ? kResilientStream : kResilienceOff;
config->codec_settings.VP8()->numberOfTemporalLayers =
num_temporal_layers;
config->codec_settings.VP8()->denoisingOn = denoising_on;
config->codec_settings.VP8()->errorConcealmentOn = error_concealment_on;
config->codec_settings.VP8()->automaticResizeOn = spatial_resize_on;
config->codec_settings.VP8()->frameDroppingOn = frame_dropper_on;
config->codec_settings.VP8()->keyFrameInterval = kBaseKeyFrameInterval;
break;
case kVideoCodecVP9:
config->codec_settings.VP9()->resilienceOn = resilience_on;
config->codec_settings.VP9()->numberOfTemporalLayers =
num_temporal_layers;
config->codec_settings.VP9()->denoisingOn = denoising_on;
config->codec_settings.VP9()->frameDroppingOn = frame_dropper_on;
config->codec_settings.VP9()->keyFrameInterval = kBaseKeyFrameInterval;
config->codec_settings.VP9()->automaticResizeOn = spatial_resize_on;
break;
case kVideoCodecH264:
config->codec_settings.H264()->frameDroppingOn = frame_dropper_on;
config->codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval;
break;
default:
RTC_NOTREACHED();
break;
}
}
void VideoProcessorIntegrationTest::SetRateProfile(
RateProfile* rate_profile,
int rate_update_index,
int bitrate_kbps,
int framerate_fps,
int frame_index_rate_update) {
rate_profile->target_bit_rate[rate_update_index] = bitrate_kbps;
rate_profile->input_frame_rate[rate_update_index] = framerate_fps;
rate_profile->frame_index_rate_update[rate_update_index] =
frame_index_rate_update;
}
void VideoProcessorIntegrationTest::AddRateControlThresholds(
int max_num_dropped_frames,
int max_key_frame_size_mismatch,
int max_delta_frame_size_mismatch,
int max_encoding_rate_mismatch,
int max_time_hit_target,
int num_spatial_resizes,
int num_key_frames,
std::vector<RateControlThresholds>* rc_thresholds) {
RTC_DCHECK(rc_thresholds);
rc_thresholds->emplace_back();
RateControlThresholds* rc_threshold = &rc_thresholds->back();
rc_threshold->max_num_dropped_frames = max_num_dropped_frames;
rc_threshold->max_key_frame_size_mismatch = max_key_frame_size_mismatch;
rc_threshold->max_delta_frame_size_mismatch = max_delta_frame_size_mismatch;
rc_threshold->max_encoding_rate_mismatch = max_encoding_rate_mismatch;
rc_threshold->max_time_hit_target = max_time_hit_target;
rc_threshold->num_spatial_resizes = num_spatial_resizes;
rc_threshold->num_key_frames = num_key_frames;
}
// Processes all frames in the clip and verifies the result.
void VideoProcessorIntegrationTest::ProcessFramesAndMaybeVerify(
const RateProfile& rate_profile,
const std::vector<RateControlThresholds>* rc_thresholds,
const QualityThresholds* quality_thresholds,
const VisualizationParams* visualization_params) {
// The Android HW codec needs to be run on a task queue, so we simply always
// run the test on a task queue.
rtc::TaskQueue task_queue("VidProc TQ");
rtc::Event sync_event(false, false);
SetUpAndInitObjects(&task_queue, rate_profile.target_bit_rate[0],
rate_profile.input_frame_rate[0], visualization_params);
// Set initial rates.
int rate_update_index = 0;
task_queue.PostTask([this, &rate_profile, rate_update_index] {
processor_->SetRates(rate_profile.target_bit_rate[rate_update_index],
rate_profile.input_frame_rate[rate_update_index]);
});
// Process all frames.
int frame_number = 0;
const int num_frames = rate_profile.num_frames;
RTC_DCHECK_GE(num_frames, 1);
while (frame_number < num_frames) {
// In order to not overwhelm the OpenMAX buffers in the Android
// MediaCodec API, we roughly pace the frames here. The downside
// of this is that the encode run will be done in real-time.
#if defined(WEBRTC_ANDROID)
if (config_.hw_encoder || config_.hw_decoder) {
SleepMs(rtc::kNumMillisecsPerSec /
rate_profile.input_frame_rate[rate_update_index]);
}
#endif
task_queue.PostTask([this] { processor_->ProcessFrame(); });
++frame_number;
if (frame_number ==
rate_profile.frame_index_rate_update[rate_update_index + 1]) {
++rate_update_index;
task_queue.PostTask([this, &rate_profile, rate_update_index] {
processor_->SetRates(rate_profile.target_bit_rate[rate_update_index],
rate_profile.input_frame_rate[rate_update_index]);
});
}
}
// Give the VideoProcessor pipeline some time to process the last frame,
// and then release the codecs.
if (config_.hw_encoder || config_.hw_decoder) {
SleepMs(1 * rtc::kNumMillisecsPerSec);
}
ReleaseAndCloseObjects(&task_queue);
// Calculate and print rate control statistics.
rate_update_index = 0;
frame_number = 0;
ResetRateControlMetrics(rate_update_index, rate_profile);
std::vector<int> num_dropped_frames;
std::vector<int> num_resize_actions;
sync_event.Reset();
task_queue.PostTask(
[this, &num_dropped_frames, &num_resize_actions, &sync_event]() {
num_dropped_frames = processor_->NumberDroppedFramesPerRateUpdate();
num_resize_actions = processor_->NumberSpatialResizesPerRateUpdate();
sync_event.Set();
});
sync_event.Wait(rtc::Event::kForever);
while (frame_number < num_frames) {
UpdateRateControlMetrics(frame_number);
++frame_number;
if (frame_number ==
rate_profile.frame_index_rate_update[rate_update_index + 1]) {
PrintAndMaybeVerifyRateControlMetrics(rate_update_index, rc_thresholds,
num_dropped_frames,
num_resize_actions);
++rate_update_index;
ResetRateControlMetrics(rate_update_index, rate_profile);
}
}
PrintAndMaybeVerifyRateControlMetrics(rate_update_index, rc_thresholds,
num_dropped_frames, num_resize_actions);
// Calculate and print other statistics.
EXPECT_EQ(num_frames, static_cast<int>(stats_.size()));
stats_.PrintSummary();
// Calculate and print image quality statistics.
// TODO(marpan): Should compute these quality metrics per SetRates update.
QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, I420MetricsFromFiles(config_.input_filename.c_str(),
config_.output_filename.c_str(),
config_.codec_settings.width,
config_.codec_settings.height, &psnr_result,
&ssim_result));
if (quality_thresholds) {
VerifyQuality(psnr_result, ssim_result, *quality_thresholds);
}
printf("PSNR avg: %f, min: %f\nSSIM avg: %f, min: %f\n", psnr_result.average,
psnr_result.min, ssim_result.average, ssim_result.min);
printf("\n");
// Remove analysis file.
if (remove(config_.output_filename.c_str()) < 0) {
fprintf(stderr, "Failed to remove temporary file!\n");
}
}
void VideoProcessorIntegrationTest::CreateEncoderAndDecoder() {
std::unique_ptr<cricket::WebRtcVideoEncoderFactory> encoder_factory;
if (config_.hw_encoder) {
#if defined(WEBRTC_ANDROID)
encoder_factory.reset(new jni::MediaCodecVideoEncoderFactory());
#elif defined(WEBRTC_IOS)
EXPECT_EQ(kVideoCodecH264, config_.codec_settings.codecType)
<< "iOS HW codecs only support H264.";
encoder_factory = CreateObjCEncoderFactory();
#else
RTC_NOTREACHED() << "Only support HW encoder on Android and iOS.";
#endif
} else {
encoder_factory.reset(new cricket::InternalEncoderFactory());
}
if (config_.hw_decoder) {
#if defined(WEBRTC_ANDROID)
decoder_factory_.reset(new jni::MediaCodecVideoDecoderFactory());
#elif defined(WEBRTC_IOS)
EXPECT_EQ(kVideoCodecH264, config_.codec_settings.codecType)
<< "iOS HW codecs only support H264.";
decoder_factory_ = CreateObjCDecoderFactory();
#else
RTC_NOTREACHED() << "Only support HW decoder on Android and iOS.";
#endif
} else {
decoder_factory_.reset(new cricket::InternalDecoderFactory());
}
cricket::VideoCodec codec;
cricket::VideoDecoderParams decoder_params; // Empty.
switch (config_.codec_settings.codecType) {
case kVideoCodecVP8:
codec = cricket::VideoCodec(cricket::kVp8CodecName);
encoder_.reset(encoder_factory->CreateVideoEncoder(codec));
decoder_ =
decoder_factory_->CreateVideoDecoderWithParams(codec, decoder_params);
break;
case kVideoCodecVP9:
codec = cricket::VideoCodec(cricket::kVp9CodecName);
encoder_.reset(encoder_factory->CreateVideoEncoder(codec));
decoder_ =
decoder_factory_->CreateVideoDecoderWithParams(codec, decoder_params);
break;
case kVideoCodecH264:
// TODO(brandtr): Generalize so that we support multiple profiles here.
codec = cricket::VideoCodec(cricket::kH264CodecName);
encoder_.reset(encoder_factory->CreateVideoEncoder(codec));
decoder_ =
decoder_factory_->CreateVideoDecoderWithParams(codec, decoder_params);
break;
default:
RTC_NOTREACHED();
break;
}
if (config_.sw_fallback_encoder) {
encoder_ = rtc::MakeUnique<VideoEncoderSoftwareFallbackWrapper>(
codec, std::move(encoder_));
}
EXPECT_TRUE(encoder_) << "Encoder not successfully created.";
EXPECT_TRUE(decoder_) << "Decoder not successfully created.";
}
void VideoProcessorIntegrationTest::DestroyEncoderAndDecoder() {
encoder_.reset();
decoder_factory_->DestroyVideoDecoder(decoder_);
}
void VideoProcessorIntegrationTest::SetUpAndInitObjects(
rtc::TaskQueue* task_queue,
const int initial_bitrate_kbps,
const int initial_framerate_fps,
const VisualizationParams* visualization_params) {
CreateEncoderAndDecoder();
// Create file objects for quality analysis.
analysis_frame_reader_.reset(new YuvFrameReaderImpl(
config_.input_filename, config_.codec_settings.width,
config_.codec_settings.height));
analysis_frame_writer_.reset(new YuvFrameWriterImpl(
config_.output_filename, config_.codec_settings.width,
config_.codec_settings.height));
EXPECT_TRUE(analysis_frame_reader_->Init());
EXPECT_TRUE(analysis_frame_writer_->Init());
if (visualization_params) {
const std::string codec_name =
CodecTypeToPayloadString(config_.codec_settings.codecType);
const std::string implementation_type = config_.hw_encoder ? "hw" : "sw";
// clang-format off
const std::string output_filename_base =
OutputPath() + config_.filename + "-" +
codec_name + "-" + implementation_type + "-" +
std::to_string(initial_bitrate_kbps);
// clang-format on
if (visualization_params->save_encoded_ivf) {
rtc::File post_encode_file =
rtc::File::Create(output_filename_base + ".ivf");
encoded_frame_writer_ =
IvfFileWriter::Wrap(std::move(post_encode_file), 0);
}
if (visualization_params->save_decoded_y4m) {
decoded_frame_writer_.reset(new Y4mFrameWriterImpl(
output_filename_base + ".y4m", config_.codec_settings.width,
config_.codec_settings.height, initial_framerate_fps));
EXPECT_TRUE(decoded_frame_writer_->Init());
}
}
packet_manipulator_.reset(new PacketManipulatorImpl(
&packet_reader_, config_.networking_config, config_.verbose));
config_.codec_settings.minBitrate = 0;
config_.codec_settings.startBitrate = initial_bitrate_kbps;
config_.codec_settings.maxFramerate = initial_framerate_fps;
rtc::Event sync_event(false, false);
task_queue->PostTask([this, &sync_event]() {
// TODO(brandtr): std::move |encoder_| and |decoder_| into the
// VideoProcessor when we are able to store |decoder_| in a
// std::unique_ptr. That is, when https://codereview.webrtc.org/3009973002
// has been relanded.
processor_ = rtc::MakeUnique<VideoProcessor>(
encoder_.get(), decoder_, analysis_frame_reader_.get(),
analysis_frame_writer_.get(), packet_manipulator_.get(), config_,
&stats_, encoded_frame_writer_.get(), decoded_frame_writer_.get());
processor_->Init();
sync_event.Set();
});
sync_event.Wait(rtc::Event::kForever);
}
void VideoProcessorIntegrationTest::ReleaseAndCloseObjects(
rtc::TaskQueue* task_queue) {
rtc::Event sync_event(false, false);
task_queue->PostTask([this, &sync_event]() {
processor_->Release();
sync_event.Set();
});
sync_event.Wait(rtc::Event::kForever);
// The VideoProcessor must be ::Release()'d before we destroy the codecs.
DestroyEncoderAndDecoder();
// Close the analysis files before we use them for SSIM/PSNR calculations.
analysis_frame_reader_->Close();
analysis_frame_writer_->Close();
// Close visualization files.
if (encoded_frame_writer_) {
EXPECT_TRUE(encoded_frame_writer_->Close());
}
if (decoded_frame_writer_) {
decoded_frame_writer_->Close();
}
}
// For every encoded frame, update the rate control metrics.
void VideoProcessorIntegrationTest::UpdateRateControlMetrics(int frame_number) {
RTC_CHECK_GE(frame_number, 0);
const int tl_idx = TemporalLayerIndexForFrame(frame_number);
++num_frames_per_update_[tl_idx];
++num_frames_total_;
const FrameStatistic* frame_stat = stats_.GetFrame(frame_number);
FrameType frame_type = frame_stat->frame_type;
float encoded_size_kbits =
frame_stat->encoded_frame_size_bytes * 8.0f / 1000.0f;
// Update layer data.
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kVideoFrameDelta) {
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
sum_frame_size_mismatch_[tl_idx] +=
fabs(encoded_size_kbits - per_frame_bandwidth_[tl_idx]) /
per_frame_bandwidth_[tl_idx];
} else {
float target_size = (frame_number == 0) ? target_size_key_frame_initial_
: target_size_key_frame_;
sum_key_frame_size_mismatch_ +=
fabs(encoded_size_kbits - target_size) / target_size;
num_key_frames_ += 1;
}
sum_encoded_frame_size_[tl_idx] += encoded_size_kbits;
// Encoding bit rate per temporal layer: from the start of the update/run
// to the current frame.
encoding_bitrate_[tl_idx] = sum_encoded_frame_size_[tl_idx] *
framerate_layer_[tl_idx] /
num_frames_per_update_[tl_idx];
// Total encoding rate: from the start of the update/run to current frame.
sum_encoded_frame_size_total_ += encoded_size_kbits;
encoding_bitrate_total_ =
sum_encoded_frame_size_total_ * framerate_ / num_frames_total_;
perc_encoding_rate_mismatch_ =
100 * fabs(encoding_bitrate_total_ - bitrate_kbps_) / bitrate_kbps_;
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
!encoding_rate_within_target_) {
num_frames_to_hit_target_ = num_frames_total_;
encoding_rate_within_target_ = true;
}
}
// Verify expected behavior of rate control and print out data.
void VideoProcessorIntegrationTest::PrintAndMaybeVerifyRateControlMetrics(
int rate_update_index,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<int>& num_dropped_frames,
const std::vector<int>& num_resize_actions) {
printf(
"Rate update #%d:\n"
" Target bitrate : %d\n"
" Encoded bitrate : %f\n"
" Frame rate : %d\n",
rate_update_index, bitrate_kbps_, encoding_bitrate_total_, framerate_);
printf(
" # processed frames : %d\n"
" # frames to convergence: %d\n"
" # dropped frames : %d\n"
" # spatial resizes : %d\n",
num_frames_total_, num_frames_to_hit_target_,
num_dropped_frames[rate_update_index],
num_resize_actions[rate_update_index]);
const RateControlThresholds* rc_threshold = nullptr;
if (rc_thresholds) {
rc_threshold = &(*rc_thresholds)[rate_update_index];
EXPECT_LE(perc_encoding_rate_mismatch_,
rc_threshold->max_encoding_rate_mismatch);
}
if (num_key_frames_ > 0) {
int perc_key_frame_size_mismatch =
100 * sum_key_frame_size_mismatch_ / num_key_frames_;
printf(
" # key frames : %d\n"
" Key frame rate mismatch: %d\n",
num_key_frames_, perc_key_frame_size_mismatch);
if (rc_threshold) {
EXPECT_LE(perc_key_frame_size_mismatch,
rc_threshold->max_key_frame_size_mismatch);
}
}
const int num_temporal_layers =
NumberOfTemporalLayers(config_.codec_settings);
for (int i = 0; i < num_temporal_layers; i++) {
int perc_frame_size_mismatch =
100 * sum_frame_size_mismatch_[i] / num_frames_per_update_[i];
int perc_encoding_rate_mismatch =
100 * fabs(encoding_bitrate_[i] - bitrate_layer_[i]) /
bitrate_layer_[i];
printf(
" Temporal layer #%d:\n"
" Target layer bitrate : %f\n"
" Layer frame rate : %f\n"
" Layer per frame bandwidth : %f\n"
" Layer encoding bitrate : %f\n"
" Layer percent frame size mismatch : %d\n"
" Layer percent encoding rate mismatch: %d\n"
" # frames processed per layer : %d\n",
i, bitrate_layer_[i], framerate_layer_[i], per_frame_bandwidth_[i],
encoding_bitrate_[i], perc_frame_size_mismatch,
perc_encoding_rate_mismatch, num_frames_per_update_[i]);
if (rc_threshold) {
EXPECT_LE(perc_frame_size_mismatch,
rc_threshold->max_delta_frame_size_mismatch);
EXPECT_LE(perc_encoding_rate_mismatch,
rc_threshold->max_encoding_rate_mismatch);
}
}
printf("\n");
if (rc_threshold) {
EXPECT_LE(num_frames_to_hit_target_, rc_threshold->max_time_hit_target);
EXPECT_LE(num_dropped_frames[rate_update_index],
rc_threshold->max_num_dropped_frames);
EXPECT_EQ(rc_threshold->num_spatial_resizes,
num_resize_actions[rate_update_index]);
EXPECT_EQ(rc_threshold->num_key_frames, num_key_frames_);
}
}
// Temporal layer index corresponding to frame number, for up to 3 layers.
int VideoProcessorIntegrationTest::TemporalLayerIndexForFrame(
int frame_number) const {
const int num_temporal_layers =
NumberOfTemporalLayers(config_.codec_settings);
int tl_idx = -1;
switch (num_temporal_layers) {
case 1:
tl_idx = 0;
break;
case 2:
// temporal layer 0: 0 2 4 ...
// temporal layer 1: 1 3
tl_idx = (frame_number % 2 == 0) ? 0 : 1;
break;
case 3:
// temporal layer 0: 0 4 8 ...
// temporal layer 1: 2 6
// temporal layer 2: 1 3 5 7
if (frame_number % 4 == 0) {
tl_idx = 0;
} else if ((frame_number + 2) % 4 == 0) {
tl_idx = 1;
} else if ((frame_number + 1) % 2 == 0) {
tl_idx = 2;
}
break;
default:
RTC_NOTREACHED();
break;
}
return tl_idx;
}
// Reset quantities before each encoder rate update.
void VideoProcessorIntegrationTest::ResetRateControlMetrics(
int rate_update_index,
const RateProfile& rate_profile) {
// Set new rates.
bitrate_kbps_ = rate_profile.target_bit_rate[rate_update_index];
framerate_ = rate_profile.input_frame_rate[rate_update_index];
const int num_temporal_layers =
NumberOfTemporalLayers(config_.codec_settings);
RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
for (int i = 0; i < num_temporal_layers; i++) {
float bit_rate_ratio = kVp8LayerRateAlloction[num_temporal_layers - 1][i];
if (i > 0) {
float bit_rate_delta_ratio =
kVp8LayerRateAlloction[num_temporal_layers - 1][i] -
kVp8LayerRateAlloction[num_temporal_layers - 1][i - 1];
bitrate_layer_[i] = bitrate_kbps_ * bit_rate_delta_ratio;
} else {
bitrate_layer_[i] = bitrate_kbps_ * bit_rate_ratio;
}
framerate_layer_[i] =
framerate_ / static_cast<float>(1 << (num_temporal_layers - 1));
}
if (num_temporal_layers == 3) {
framerate_layer_[2] = framerate_ / 2.0f;
}
if (rate_update_index == 0) {
target_size_key_frame_initial_ =
0.5 * kInitialBufferSize * bitrate_layer_[0];
}
// Reset rate control metrics.
for (int i = 0; i < num_temporal_layers; i++) {
num_frames_per_update_[i] = 0;
sum_frame_size_mismatch_[i] = 0.0f;
sum_encoded_frame_size_[i] = 0.0f;
encoding_bitrate_[i] = 0.0f;
// Update layer per-frame-bandwidth.
per_frame_bandwidth_[i] = static_cast<float>(bitrate_layer_[i]) /
static_cast<float>(framerate_layer_[i]);
}
// Set maximum size of key frames, following setting in the VP8 wrapper.
float max_key_size = kScaleKeyFrameSize * kOptimalBufferSize * framerate_;
// We don't know exact target size of the key frames (except for first one),
// but the minimum in libvpx is ~|3 * per_frame_bandwidth| and maximum is
// set by |max_key_size_ * per_frame_bandwidth|. Take middle point/average
// as reference for mismatch. Note key frames always correspond to base
// layer frame in this test.
target_size_key_frame_ = 0.5 * (3 + max_key_size) * per_frame_bandwidth_[0];
num_frames_total_ = 0;
sum_encoded_frame_size_total_ = 0.0f;
encoding_bitrate_total_ = 0.0f;
perc_encoding_rate_mismatch_ = 0.0f;
num_frames_to_hit_target_ =
rate_profile.frame_index_rate_update[rate_update_index + 1];
encoding_rate_within_target_ = false;
sum_key_frame_size_mismatch_ = 0.0;
num_key_frames_ = 0;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,192 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_INTEGRATIONTEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_INTEGRATIONTEST_H_
#include <memory>
#include <string>
#include <vector>
#include "webrtc/common_types.h"
#include "webrtc/media/engine/webrtcvideodecoderfactory.h"
#include "webrtc/media/engine/webrtcvideoencoderfactory.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
#include "webrtc/test/testsupport/packet_reader.h"
namespace webrtc {
namespace test {
// The sequence of bit rate and frame rate changes for the encoder, the frame
// number where the changes are made, and the total number of frames for the
// test.
struct RateProfile {
static const int kMaxNumRateUpdates = 3;
int target_bit_rate[kMaxNumRateUpdates];
int input_frame_rate[kMaxNumRateUpdates];
int frame_index_rate_update[kMaxNumRateUpdates + 1];
int num_frames;
};
// Thresholds for the rate control metrics. The rate mismatch thresholds are
// defined as percentages. |max_time_hit_target| is defined as number of frames,
// after a rate update is made to the encoder, for the encoder to reach within
// |kPercTargetvsActualMismatch| of new target rate. The thresholds are defined
// for each rate update sequence.
struct RateControlThresholds {
int max_num_dropped_frames;
int max_key_frame_size_mismatch;
int max_delta_frame_size_mismatch;
int max_encoding_rate_mismatch;
int max_time_hit_target;
int num_spatial_resizes;
int num_key_frames;
};
// Thresholds for the quality metrics.
struct QualityThresholds {
QualityThresholds(double min_avg_psnr,
double min_min_psnr,
double min_avg_ssim,
double min_min_ssim)
: min_avg_psnr(min_avg_psnr),
min_min_psnr(min_min_psnr),
min_avg_ssim(min_avg_ssim),
min_min_ssim(min_min_ssim) {}
double min_avg_psnr;
double min_min_psnr;
double min_avg_ssim;
double min_min_ssim;
};
// Should video files be saved persistently to disk for post-run visualization?
struct VisualizationParams {
bool save_encoded_ivf;
bool save_decoded_y4m;
};
// Integration test for video processor. Encodes+decodes a clip and
// writes it to the output directory. After completion, quality metrics
// (PSNR and SSIM) and rate control metrics are computed and compared to given
// thresholds, to verify that the quality and encoder response is acceptable.
// The rate control tests allow us to verify the behavior for changing bit rate,
// changing frame rate, frame dropping/spatial resize, and temporal layers.
// The thresholds for the rate control metrics are set to be fairly
// conservative, so failure should only happen when some significant regression
// or breakdown occurs.
class VideoProcessorIntegrationTest : public testing::Test {
protected:
VideoProcessorIntegrationTest();
~VideoProcessorIntegrationTest() override;
static void SetCodecSettings(TestConfig* config,
VideoCodecType codec_type,
int num_temporal_layers,
bool error_concealment_on,
bool denoising_on,
bool frame_dropper_on,
bool spatial_resize_on,
bool resilience_on,
int width,
int height);
static void SetRateProfile(RateProfile* rate_profile,
int rate_update_index,
int bitrate_kbps,
int framerate_fps,
int frame_index_rate_update);
static void AddRateControlThresholds(
int max_num_dropped_frames,
int max_key_frame_size_mismatch,
int max_delta_frame_size_mismatch,
int max_encoding_rate_mismatch,
int max_time_hit_target,
int num_spatial_resizes,
int num_key_frames,
std::vector<RateControlThresholds>* rc_thresholds);
void ProcessFramesAndMaybeVerify(
const RateProfile& rate_profile,
const std::vector<RateControlThresholds>* rc_thresholds,
const QualityThresholds* quality_thresholds,
const VisualizationParams* visualization_params);
// Config.
TestConfig config_;
private:
static const int kMaxNumTemporalLayers = 3;
void CreateEncoderAndDecoder();
void DestroyEncoderAndDecoder();
void SetUpAndInitObjects(rtc::TaskQueue* task_queue,
const int initial_bitrate_kbps,
const int initial_framerate_fps,
const VisualizationParams* visualization_params);
void ReleaseAndCloseObjects(rtc::TaskQueue* task_queue);
void UpdateRateControlMetrics(int frame_number);
void PrintAndMaybeVerifyRateControlMetrics(
int rate_update_index,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<int>& num_dropped_frames,
const std::vector<int>& num_resize_actions);
int TemporalLayerIndexForFrame(int frame_number) const;
void ResetRateControlMetrics(int rate_update_index,
const RateProfile& rate_profile);
// Codecs.
std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<cricket::WebRtcVideoDecoderFactory> decoder_factory_;
VideoDecoder* decoder_;
// Helper objects.
std::unique_ptr<FrameReader> analysis_frame_reader_;
std::unique_ptr<FrameWriter> analysis_frame_writer_;
std::unique_ptr<IvfFileWriter> encoded_frame_writer_;
std::unique_ptr<FrameWriter> decoded_frame_writer_;
PacketReader packet_reader_;
std::unique_ptr<PacketManipulator> packet_manipulator_;
Stats stats_;
std::unique_ptr<VideoProcessor> processor_;
// Quantities defined/updated for every encoder rate update.
int num_frames_per_update_[kMaxNumTemporalLayers];
float sum_frame_size_mismatch_[kMaxNumTemporalLayers];
float sum_encoded_frame_size_[kMaxNumTemporalLayers];
float encoding_bitrate_[kMaxNumTemporalLayers];
float per_frame_bandwidth_[kMaxNumTemporalLayers];
float bitrate_layer_[kMaxNumTemporalLayers];
float framerate_layer_[kMaxNumTemporalLayers];
int num_frames_total_;
float sum_encoded_frame_size_total_;
float encoding_bitrate_total_;
float perc_encoding_rate_mismatch_;
int num_frames_to_hit_target_;
bool encoding_rate_within_target_;
int bitrate_kbps_;
int framerate_;
float target_size_key_frame_initial_;
float target_size_key_frame_;
float sum_key_frame_size_mismatch_;
int num_key_frames_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_INTEGRATIONTEST_H_

View File

@ -0,0 +1,379 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.h"
#include <vector>
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
namespace {
// Codec settings.
const bool kResilienceOn = true;
const int kCifWidth = 352;
const int kCifHeight = 288;
#if !defined(WEBRTC_IOS)
const int kNumFramesShort = 100;
#endif
const int kNumFramesLong = 300;
const std::nullptr_t kNoVisualizationParams = nullptr;
} // namespace
class VideoProcessorIntegrationTestLibvpx
: public VideoProcessorIntegrationTest {
protected:
VideoProcessorIntegrationTestLibvpx() {
config_.filename = "foreman_cif";
config_.input_filename = ResourcePath(config_.filename, "yuv");
config_.output_filename =
TempFilename(OutputPath(), "videoprocessor_integrationtest_libvpx");
config_.networking_config.packet_loss_probability = 0.0;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
config_.verbose = false;
config_.hw_encoder = false;
config_.hw_decoder = false;
}
};
// Fails on iOS. See webrtc:4755.
#if !defined(WEBRTC_IOS)
#if !defined(RTC_DISABLE_VP9)
// VP9: Run with no packet loss and fixed bitrate. Quality should be very high.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTestLibvpx, Process0PercentPacketLossVP9) {
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, false, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 20, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(37.0, 36.0, 0.93, 0.92);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP9: Run with 5% packet loss and fixed bitrate. Quality should be a bit
// lower. One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTestLibvpx, Process5PercentPacketLossVP9) {
config_.networking_config.packet_loss_probability = 0.05f;
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, false, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 20, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(17.0, 14.0, 0.45, 0.36);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP9: Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTestLibvpx, ProcessNoLossChangeBitRateVP9) {
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, false, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 200, 30, 0);
SetRateProfile(&rate_profile, 1, 700, 30, 100);
SetRateProfile(&rate_profile, 2, 500, 30, 200);
rate_profile.frame_index_rate_update[3] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 30, 20, 20, 35, 0, 1, &rc_thresholds);
AddRateControlThresholds(2, 0, 20, 20, 60, 0, 0, &rc_thresholds);
AddRateControlThresholds(0, 0, 25, 20, 40, 0, 0, &rc_thresholds);
QualityThresholds quality_thresholds(35.5, 30.0, 0.90, 0.85);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP9: Run with no packet loss, with an update (decrease) in frame rate.
// Lower frame rate means higher per-frame-bandwidth, so easier to encode.
// At the low bitrate in this test, this means better rate control after the
// update(s) to lower frame rate. So expect less frame drops, and max values
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics averaged over whole sequence run.
TEST_F(VideoProcessorIntegrationTestLibvpx,
ProcessNoLossChangeFrameRateFrameDropVP9) {
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, false, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 100, 24, 0);
SetRateProfile(&rate_profile, 1, 100, 15, 100);
SetRateProfile(&rate_profile, 2, 100, 10, 200);
rate_profile.frame_index_rate_update[3] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(45, 50, 95, 15, 45, 0, 1, &rc_thresholds);
AddRateControlThresholds(20, 0, 50, 10, 30, 0, 0, &rc_thresholds);
AddRateControlThresholds(5, 0, 30, 5, 25, 0, 0, &rc_thresholds);
QualityThresholds quality_thresholds(31.5, 18.0, 0.80, 0.43);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP9: Run with no packet loss and denoiser on. One key frame (first frame).
TEST_F(VideoProcessorIntegrationTestLibvpx, ProcessNoLossDenoiserOnVP9) {
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 20, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(36.8, 35.8, 0.92, 0.91);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// Run with no packet loss, at low bitrate.
// spatial_resize is on, for this low bitrate expect one resize in sequence.
// Resize happens on delta frame. Expect only one key frame (first frame).
TEST_F(VideoProcessorIntegrationTestLibvpx,
DISABLED_ProcessNoLossSpatialResizeFrameDropVP9) {
SetCodecSettings(&config_, kVideoCodecVP9, 1, false, false, true, true,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 50, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(228, 70, 160, 15, 80, 1, 1, &rc_thresholds);
QualityThresholds quality_thresholds(24.0, 13.0, 0.65, 0.37);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// TODO(marpan): Add temporal layer test for VP9, once changes are in
// vp9 wrapper for this.
#endif // !defined(RTC_DISABLE_VP9)
// VP8: Run with no packet loss and fixed bitrate. Quality should be very high.
// One key frame (first frame only) in sequence. Setting |key_frame_interval|
// to -1 below means no periodic key frames in test.
TEST_F(VideoProcessorIntegrationTestLibvpx, ProcessZeroPacketLoss) {
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 15, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(34.95, 33.0, 0.90, 0.89);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP8: Run with 5% packet loss and fixed bitrate. Quality should be a bit
// lower. One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTestLibvpx, Process5PercentPacketLoss) {
config_.networking_config.packet_loss_probability = 0.05f;
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 15, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(20.0, 16.0, 0.60, 0.40);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP8: Run with 10% packet loss and fixed bitrate. Quality should be lower.
// One key frame (first frame only) in sequence.
TEST_F(VideoProcessorIntegrationTestLibvpx, Process10PercentPacketLoss) {
config_.networking_config.packet_loss_probability = 0.1f;
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFramesShort + 1;
rate_profile.num_frames = kNumFramesShort;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 40, 20, 10, 15, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(19.0, 16.0, 0.50, 0.35);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
#endif // !defined(WEBRTC_IOS)
// The tests below are currently disabled for Android. For ARM, the encoder
// uses |cpu_speed| = 12, as opposed to default |cpu_speed| <= 6 for x86,
// which leads to significantly different quality. The quality and rate control
// settings in the tests below are defined for encoder speed setting
// |cpu_speed| <= ~6. A number of settings would need to be significantly
// modified for the |cpu_speed| = 12 case. For now, keep the tests below
// disabled on Android. Some quality parameter in the above test has been
// adjusted to also pass for |cpu_speed| <= 12.
// VP8: Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
// Too slow to finish before timeout on iOS. See webrtc:4755.
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
#define MAYBE_ProcessNoLossChangeBitRateVP8 \
DISABLED_ProcessNoLossChangeBitRateVP8
#else
#define MAYBE_ProcessNoLossChangeBitRateVP8 ProcessNoLossChangeBitRateVP8
#endif
TEST_F(VideoProcessorIntegrationTestLibvpx,
MAYBE_ProcessNoLossChangeBitRateVP8) {
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 200, 30, 0);
SetRateProfile(&rate_profile, 1, 800, 30, 100);
SetRateProfile(&rate_profile, 2, 500, 30, 200);
rate_profile.frame_index_rate_update[3] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 45, 20, 10, 15, 0, 1, &rc_thresholds);
AddRateControlThresholds(0, 0, 25, 20, 10, 0, 0, &rc_thresholds);
AddRateControlThresholds(0, 0, 25, 15, 10, 0, 0, &rc_thresholds);
QualityThresholds quality_thresholds(34.0, 32.0, 0.85, 0.80);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP8: Run with no packet loss, with an update (decrease) in frame rate.
// Lower frame rate means higher per-frame-bandwidth, so easier to encode.
// At the bitrate in this test, this means better rate control after the
// update(s) to lower frame rate. So expect less frame drops, and max values
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics averaged over whole sequence run.
// Too slow to finish before timeout on iOS. See webrtc:4755.
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
DISABLED_ProcessNoLossChangeFrameRateFrameDropVP8
#else
#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
ProcessNoLossChangeFrameRateFrameDropVP8
#endif
TEST_F(VideoProcessorIntegrationTestLibvpx,
MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8) {
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 80, 24, 0);
SetRateProfile(&rate_profile, 1, 80, 15, 100);
SetRateProfile(&rate_profile, 2, 80, 10, 200);
rate_profile.frame_index_rate_update[3] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(40, 20, 75, 15, 60, 0, 1, &rc_thresholds);
AddRateControlThresholds(10, 0, 25, 10, 35, 0, 0, &rc_thresholds);
AddRateControlThresholds(0, 0, 20, 10, 15, 0, 0, &rc_thresholds);
QualityThresholds quality_thresholds(31.0, 22.0, 0.80, 0.65);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
// VP8: Run with no packet loss, with 3 temporal layers, with a rate update in
// the middle of the sequence. The max values for the frame size mismatch and
// encoding rate mismatch are applied to each layer.
// No dropped frames in this test, and internal spatial resizer is off.
// One key frame (first frame only) in sequence, so no spatial resizing.
// Too slow to finish before timeout on iOS. See webrtc:4755.
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
#define MAYBE_ProcessNoLossTemporalLayersVP8 \
DISABLED_ProcessNoLossTemporalLayersVP8
#else
#define MAYBE_ProcessNoLossTemporalLayersVP8 ProcessNoLossTemporalLayersVP8
#endif
TEST_F(VideoProcessorIntegrationTestLibvpx,
MAYBE_ProcessNoLossTemporalLayersVP8) {
SetCodecSettings(&config_, kVideoCodecVP8, 3, false, true, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 200, 30, 0);
SetRateProfile(&rate_profile, 1, 400, 30, 150);
rate_profile.frame_index_rate_update[2] = kNumFramesLong + 1;
rate_profile.num_frames = kNumFramesLong;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 20, 30, 10, 10, 0, 1, &rc_thresholds);
AddRateControlThresholds(0, 0, 30, 15, 10, 0, 0, &rc_thresholds);
QualityThresholds quality_thresholds(32.5, 30.0, 0.85, 0.80);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.h"
#include <vector>
#include "webrtc/test/field_trial.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
#if defined(WEBRTC_ANDROID)
namespace {
const int kForemanNumFrames = 300;
const std::nullptr_t kNoVisualizationParams = nullptr;
} // namespace
class VideoProcessorIntegrationTestMediaCodec
: public VideoProcessorIntegrationTest {
protected:
VideoProcessorIntegrationTestMediaCodec() {
config_.filename = "foreman_cif";
config_.input_filename = ResourcePath(config_.filename, "yuv");
config_.output_filename =
TempFilename(OutputPath(), "videoprocessor_integrationtest_mediacodec");
config_.verbose = false;
config_.hw_encoder = true;
config_.hw_decoder = true;
}
};
TEST_F(VideoProcessorIntegrationTestMediaCodec, ForemanCif500kbpsVp8) {
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, false, false, false,
false, 352, 288);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0); // Start below |low_kbps|.
rate_profile.frame_index_rate_update[1] = kForemanNumFrames + 1;
rate_profile.num_frames = kForemanNumFrames;
// The thresholds below may have to be tweaked to let even poor MediaCodec
// implementations pass. If this test fails on the bots, disable it and
// ping brandtr@.
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(5, 95, 20, 10, 10, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(30.0, 15.0, 0.90, 0.40);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
TEST_F(VideoProcessorIntegrationTestMediaCodec,
Foreman240p100kbpsVp8WithForcedSwFallback) {
ScopedFieldTrials override_field_trials(
"WebRTC-VP8-Forced-Fallback-Encoder/Enabled-150,175,10000,1/");
config_.filename = "foreman_320x240";
config_.input_filename = ResourcePath(config_.filename, "yuv");
config_.sw_fallback_encoder = true;
SetCodecSettings(&config_, kVideoCodecVP8, 1, false, false, false, false,
false, 320, 240);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 100, 10, 0); // Start below |low_kbps|.
SetRateProfile(&rate_profile, 1, 100, 10, 80); // Fallback in this bucket.
SetRateProfile(&rate_profile, 2, 200, 10, 200); // Switch back here.
rate_profile.frame_index_rate_update[3] = kForemanNumFrames + 1;
rate_profile.num_frames = kForemanNumFrames;
// The thresholds below may have to be tweaked to let even poor MediaCodec
// implementations pass. If this test fails on the bots, disable it and
// ping brandtr@.
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(0, 50, 75, 70, 10, 0, 1, &rc_thresholds);
AddRateControlThresholds(0, 50, 25, 12, 60, 0, 1, &rc_thresholds);
AddRateControlThresholds(0, 65, 15, 5, 5, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(33.0, 30.0, 0.90, 0.85);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
#endif // defined(WEBRTC_ANDROID)
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.h"
#include <vector>
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
#if defined(WEBRTC_USE_H264)
namespace {
// Codec settings.
const bool kResilienceOn = true;
const int kCifWidth = 352;
const int kCifHeight = 288;
const int kNumFrames = 100;
const std::nullptr_t kNoVisualizationParams = nullptr;
} // namespace
class VideoProcessorIntegrationTestOpenH264
: public VideoProcessorIntegrationTest {
protected:
VideoProcessorIntegrationTestOpenH264() {
config_.filename = "foreman_cif";
config_.input_filename = ResourcePath(config_.filename, "yuv");
config_.output_filename =
TempFilename(OutputPath(), "videoprocessor_integrationtest_libvpx");
config_.networking_config.packet_loss_probability = 0.0;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
config_.verbose = false;
config_.hw_encoder = false;
config_.hw_decoder = false;
}
};
// H264: Run with no packet loss and fixed bitrate. Quality should be very high.
// Note(hbos): The PacketManipulatorImpl code used to simulate packet loss in
// these unittests appears to drop "packets" in a way that is not compatible
// with H264. Therefore ProcessXPercentPacketLossH264, X != 0, unittests have
// not been added.
TEST_F(VideoProcessorIntegrationTestOpenH264, Process0PercentPacketLossH264) {
SetCodecSettings(&config_, kVideoCodecH264, 1, false, false, true, false,
kResilienceOn, kCifWidth, kCifHeight);
RateProfile rate_profile;
SetRateProfile(&rate_profile, 0, 500, 30, 0);
rate_profile.frame_index_rate_update[1] = kNumFrames + 1;
rate_profile.num_frames = kNumFrames;
std::vector<RateControlThresholds> rc_thresholds;
AddRateControlThresholds(2, 60, 20, 10, 20, 0, 1, &rc_thresholds);
QualityThresholds quality_thresholds(35.0, 25.0, 0.93, 0.70);
ProcessFramesAndMaybeVerify(rate_profile, &rc_thresholds, &quality_thresholds,
kNoVisualizationParams);
}
#endif // defined(WEBRTC_USE_H264)
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
namespace {
// Loop variables.
const int kBitrates[] = {500};
const VideoCodecType kVideoCodecType[] = {kVideoCodecVP8};
const bool kHwCodec[] = {false};
// Codec settings.
const bool kResilienceOn = false;
const int kNumTemporalLayers = 1;
const bool kDenoisingOn = false;
const bool kErrorConcealmentOn = false;
const bool kSpatialResizeOn = false;
const bool kFrameDropperOn = false;
// Test settings.
const bool kUseSingleCore = false;
const VisualizationParams kVisualizationParams = {
false, // save_encoded_ivf
false, // save_decoded_y4m
};
const int kNumFrames = 30;
} // namespace
// Tests for plotting statistics from logs.
class VideoProcessorIntegrationTestParameterized
: public VideoProcessorIntegrationTest,
public ::testing::WithParamInterface<
::testing::tuple<int, VideoCodecType, bool>> {
protected:
VideoProcessorIntegrationTestParameterized()
: bitrate_(::testing::get<0>(GetParam())),
codec_type_(::testing::get<1>(GetParam())),
hw_codec_(::testing::get<2>(GetParam())) {}
~VideoProcessorIntegrationTestParameterized() override = default;
void RunTest(int width,
int height,
int framerate,
const std::string& filename) {
config_.filename = filename;
config_.input_filename = ResourcePath(filename, "yuv");
config_.output_filename =
TempFilename(OutputPath(), "plot_videoprocessor_integrationtest");
config_.use_single_core = kUseSingleCore;
config_.verbose = true;
config_.hw_encoder = hw_codec_;
config_.hw_decoder = hw_codec_;
SetCodecSettings(&config_, codec_type_, kNumTemporalLayers,
kErrorConcealmentOn, kDenoisingOn, kFrameDropperOn,
kSpatialResizeOn, kResilienceOn, width, height);
RateProfile rate_profile;
SetRateProfile(&rate_profile,
0, // update_index
bitrate_, framerate,
0); // frame_index_rate_update
rate_profile.frame_index_rate_update[1] = kNumFrames + 1;
rate_profile.num_frames = kNumFrames;
ProcessFramesAndMaybeVerify(rate_profile, nullptr, nullptr,
&kVisualizationParams);
}
const int bitrate_;
const VideoCodecType codec_type_;
const bool hw_codec_;
};
INSTANTIATE_TEST_CASE_P(CodecSettings,
VideoProcessorIntegrationTestParameterized,
::testing::Combine(::testing::ValuesIn(kBitrates),
::testing::ValuesIn(kVideoCodecType),
::testing::ValuesIn(kHwCodec)));
TEST_P(VideoProcessorIntegrationTestParameterized, Process_128x96_30fps) {
RunTest(128, 96, 30, "foreman_128x96");
}
TEST_P(VideoProcessorIntegrationTestParameterized, Process_160x120_30fps) {
RunTest(160, 120, 30, "foreman_160x120");
}
TEST_P(VideoProcessorIntegrationTestParameterized, Process_176x144_30fps) {
RunTest(176, 144, 30, "foreman_176x144");
}
TEST_P(VideoProcessorIntegrationTestParameterized, Process_320x240_30fps) {
RunTest(320, 240, 30, "foreman_320x240");
}
TEST_P(VideoProcessorIntegrationTestParameterized, Process_352x288_30fps) {
RunTest(352, 288, 30, "foreman_cif");
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,188 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/video_coding/codecs/test/mock/mock_packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/rtc_base/ptr_util.h"
#include "webrtc/test/gmock.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/mock/mock_frame_reader.h"
#include "webrtc/test/testsupport/mock/mock_frame_writer.h"
#include "webrtc/test/testsupport/packet_reader.h"
#include "webrtc/test/testsupport/unittest_utils.h"
#include "webrtc/test/video_codec_settings.h"
#include "webrtc/typedefs.h"
using ::testing::_;
using ::testing::AtLeast;
using ::testing::ElementsAre;
using ::testing::Property;
using ::testing::Return;
namespace webrtc {
namespace test {
namespace {
const int kWidth = 352;
const int kHeight = 288;
const int kFrameSize = kWidth * kHeight * 3 / 2; // I420.
const int kNumFrames = 2;
} // namespace
class VideoProcessorTest : public testing::Test {
protected:
VideoProcessorTest() {
// Get a codec configuration struct and configure it.
webrtc::test::CodecSettings(kVideoCodecVP8, &config_.codec_settings);
config_.codec_settings.width = kWidth;
config_.codec_settings.height = kHeight;
EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
.WillRepeatedly(Return(kNumFrames));
EXPECT_CALL(frame_reader_mock_, FrameLength())
.WillRepeatedly(Return(kFrameSize));
video_processor_ = rtc::MakeUnique<VideoProcessor>(
&encoder_mock_, &decoder_mock_, &frame_reader_mock_,
&frame_writer_mock_, &packet_manipulator_mock_, config_, &stats_,
nullptr /* encoded_frame_writer */, nullptr /* decoded_frame_writer */);
}
void ExpectInit() {
EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
EXPECT_CALL(decoder_mock_, InitDecode(_, _)).Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);
}
void ExpectRelease() {
EXPECT_CALL(encoder_mock_, Release()).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
EXPECT_CALL(decoder_mock_, Release()).Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);
}
TestConfig config_;
MockVideoEncoder encoder_mock_;
MockVideoDecoder decoder_mock_;
MockFrameReader frame_reader_mock_;
MockFrameWriter frame_writer_mock_;
MockPacketManipulator packet_manipulator_mock_;
Stats stats_;
std::unique_ptr<VideoProcessor> video_processor_;
};
TEST_F(VideoProcessorTest, InitRelease) {
ExpectInit();
video_processor_->Init();
ExpectRelease();
video_processor_->Release();
}
TEST_F(VideoProcessorTest, ProcessFrames_FixedFramerate) {
ExpectInit();
video_processor_->Init();
const int kBitrateKbps = 456;
const int kFramerateFps = 31;
video_processor_->SetRates(kBitrateKbps, kFramerateFps);
EXPECT_CALL(frame_reader_mock_, ReadFrame())
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
EXPECT_CALL(
encoder_mock_,
Encode(Property(&VideoFrame::timestamp, 1 * 90000 / kFramerateFps), _, _))
.Times(1);
video_processor_->ProcessFrame();
EXPECT_CALL(
encoder_mock_,
Encode(Property(&VideoFrame::timestamp, 2 * 90000 / kFramerateFps), _, _))
.Times(1);
video_processor_->ProcessFrame();
ExpectRelease();
video_processor_->Release();
}
TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
ExpectInit();
video_processor_->Init();
const int kBitrateKbps = 456;
const int kStartFramerateFps = 27;
video_processor_->SetRates(kBitrateKbps, kStartFramerateFps);
EXPECT_CALL(frame_reader_mock_, ReadFrame())
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
EXPECT_CALL(encoder_mock_, Encode(Property(&VideoFrame::timestamp,
1 * 90000 / kStartFramerateFps),
_, _))
.Times(1);
video_processor_->ProcessFrame();
const int kNewFramerateFps = 13;
video_processor_->SetRates(kBitrateKbps, kNewFramerateFps);
EXPECT_CALL(encoder_mock_, Encode(Property(&VideoFrame::timestamp,
2 * 90000 / kNewFramerateFps),
_, _))
.Times(1);
video_processor_->ProcessFrame();
ExpectRelease();
video_processor_->Release();
}
TEST_F(VideoProcessorTest, SetRates) {
ExpectInit();
video_processor_->Init();
const int kBitrateKbps = 123;
const int kFramerateFps = 17;
EXPECT_CALL(encoder_mock_,
SetRateAllocation(
Property(&BitrateAllocation::get_sum_kbps, kBitrateKbps),
kFramerateFps))
.Times(1);
video_processor_->SetRates(kBitrateKbps, kFramerateFps);
EXPECT_THAT(video_processor_->NumberDroppedFramesPerRateUpdate(),
ElementsAre(0));
EXPECT_THAT(video_processor_->NumberSpatialResizesPerRateUpdate(),
ElementsAre(0));
const int kNewBitrateKbps = 456;
const int kNewFramerateFps = 34;
EXPECT_CALL(encoder_mock_,
SetRateAllocation(
Property(&BitrateAllocation::get_sum_kbps, kNewBitrateKbps),
kNewFramerateFps))
.Times(1);
video_processor_->SetRates(kNewBitrateKbps, kNewFramerateFps);
EXPECT_THAT(video_processor_->NumberDroppedFramesPerRateUpdate(),
ElementsAre(0, 0));
EXPECT_THAT(video_processor_->NumberSpatialResizesPerRateUpdate(),
ElementsAre(0, 0));
ExpectRelease();
video_processor_->Release();
}
} // namespace test
} // namespace webrtc