Revert "Implement H264 simulcast support and generalize SimulcastEncoderAdapter use for H264 & VP8."

This reverts commit 07efe436c9002e139845f62486e3ee4e29f0d85b.

Reason for revert: Breaks downstream project.

cricket::GetSimulcastConfig method signature has been updated.
I think you can get away with a default value for temporal_layers_supported (and then you can remove it after a few days when projects will be updated).


Original change's description:
> Implement H264 simulcast support and generalize SimulcastEncoderAdapter use for H264 & VP8.
> 
> * Move SimulcastEncoderAdapter out under modules/video_coding
> * Move SimulcastRateAllocator back out to modules/video_coding/utility
> * Move TemporalLayers and ScreenshareLayers to modules/video_coding/utility
> * Move any VP8 specific code - such as temporal layer bitrate budgeting -
>   under codec type dependent conditionals.
> * Plumb the simulcast index for H264 in the codec specific and RTP format data structures.
> 
> Bug: webrtc:5840
> Change-Id: Ieced8a00e38f273c1a6cfd0f5431a87d07b8f44e
> Reviewed-on: https://webrtc-review.googlesource.com/64100
> Commit-Queue: Harald Alvestrand <hta@webrtc.org>
> Reviewed-by: Stefan Holmer <stefan@webrtc.org>
> Reviewed-by: Erik Språng <sprang@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#23705}

TBR=sprang@webrtc.org,stefan@webrtc.org,mflodman@webrtc.org,hta@webrtc.org,sergio.garcia.murillo@gmail.com,titovartem@webrtc.org,agouaillard@gmail.com

Change-Id: Ic9d3b1eeaf195bb5ec2063954421f5e77866d663
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:5840
Reviewed-on: https://webrtc-review.googlesource.com/84760
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23710}
This commit is contained in:
Mirko Bonadei
2018-06-21 13:41:01 +00:00
committed by Commit Bot
parent f341f3feb5
commit 6f440ed5b5
51 changed files with 530 additions and 918 deletions

View File

@ -1,244 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "common_types.h" // NOLINT(build/include)
#include "rtc_base/checks.h"
namespace webrtc {
SimulcastRateAllocator::SimulcastRateAllocator(const VideoCodec& codec)
: codec_(codec) {}
VideoBitrateAllocation SimulcastRateAllocator::GetAllocation(
uint32_t total_bitrate_bps,
uint32_t framerate) {
VideoBitrateAllocation allocated_bitrates_bps;
DistributeAllocationToSimulcastLayers(total_bitrate_bps,
&allocated_bitrates_bps);
DistributeAllocationToTemporalLayers(framerate, &allocated_bitrates_bps);
return allocated_bitrates_bps;
}
void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers(
uint32_t total_bitrate_bps,
VideoBitrateAllocation* allocated_bitrates_bps) const {
uint32_t left_to_allocate = total_bitrate_bps;
if (codec_.maxBitrate && codec_.maxBitrate * 1000 < left_to_allocate)
left_to_allocate = codec_.maxBitrate * 1000;
if (codec_.numberOfSimulcastStreams == 0) {
// No simulcast, just set the target as this has been capped already.
if (codec_.active) {
allocated_bitrates_bps->SetBitrate(
0, 0, std::max(codec_.minBitrate * 1000, left_to_allocate));
}
return;
}
// Find the first active layer. We don't allocate to inactive layers.
size_t active_layer = 0;
for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
if (codec_.simulcastStream[active_layer].active) {
// Found the first active layer.
break;
}
}
// All streams could be inactive, and nothing more to do.
if (active_layer == codec_.numberOfSimulcastStreams) {
return;
}
// Always allocate enough bitrate for the minimum bitrate of the first
// active layer. Suspending below min bitrate is controlled outside the
// codec implementation and is not overridden by this.
left_to_allocate = std::max(
codec_.simulcastStream[active_layer].minBitrate * 1000, left_to_allocate);
// Begin by allocating bitrate to simulcast streams, putting all bitrate in
// temporal layer 0. We'll then distribute this bitrate, across potential
// temporal layers, when stream allocation is done.
size_t top_active_layer = active_layer;
// Allocate up to the target bitrate for each active simulcast layer.
for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
const SimulcastStream& stream = codec_.simulcastStream[active_layer];
if (!stream.active) {
continue;
}
// If we can't allocate to the current layer we can't allocate to higher
// layers because they require a higher minimum bitrate.
if (left_to_allocate < stream.minBitrate * 1000) {
break;
}
// We are allocating to this layer so it is the current active allocation.
top_active_layer = active_layer;
uint32_t allocation =
std::min(left_to_allocate, stream.targetBitrate * 1000);
allocated_bitrates_bps->SetBitrate(active_layer, 0, allocation);
RTC_DCHECK_LE(allocation, left_to_allocate);
left_to_allocate -= allocation;
}
// Next, try allocate remaining bitrate, up to max bitrate, in top active
// stream.
// TODO(sprang): Allocate up to max bitrate for all layers once we have a
// better idea of possible performance implications.
if (left_to_allocate > 0) {
const SimulcastStream& stream = codec_.simulcastStream[top_active_layer];
uint32_t bitrate_bps =
allocated_bitrates_bps->GetSpatialLayerSum(top_active_layer);
uint32_t allocation =
std::min(left_to_allocate, stream.maxBitrate * 1000 - bitrate_bps);
bitrate_bps += allocation;
RTC_DCHECK_LE(allocation, left_to_allocate);
left_to_allocate -= allocation;
allocated_bitrates_bps->SetBitrate(top_active_layer, 0, bitrate_bps);
}
}
void SimulcastRateAllocator::DistributeAllocationToTemporalLayers(
uint32_t framerate,
VideoBitrateAllocation* allocated_bitrates_bps) const {
const int num_spatial_streams =
std::max(1, static_cast<int>(codec_.numberOfSimulcastStreams));
// Finally, distribute the bitrate for the simulcast streams across the
// available temporal layers.
for (int simulcast_id = 0; simulcast_id < num_spatial_streams;
++simulcast_id) {
uint32_t target_bitrate_kbps =
allocated_bitrates_bps->GetBitrate(simulcast_id, 0) / 1000;
if (target_bitrate_kbps == 0) {
continue;
}
const uint32_t expected_allocated_bitrate_kbps = target_bitrate_kbps;
RTC_DCHECK_EQ(
target_bitrate_kbps,
allocated_bitrates_bps->GetSpatialLayerSum(simulcast_id) / 1000);
const int num_temporal_streams = NumTemporalStreams(simulcast_id);
uint32_t max_bitrate_kbps;
// Legacy temporal-layered only screenshare, or simulcast screenshare
// with legacy mode for simulcast stream 0.
const bool conference_screenshare_mode =
codec_.mode == VideoCodecMode::kScreensharing &&
codec_.targetBitrate > 0 &&
((num_spatial_streams == 1 && num_temporal_streams == 2) || // Legacy.
(num_spatial_streams > 1 && simulcast_id == 0)); // Simulcast.
if (conference_screenshare_mode) {
// TODO(holmer): This is a "temporary" hack for screensharing, where we
// interpret the startBitrate as the encoder target bitrate. This is
// to allow for a different max bitrate, so if the codec can't meet
// the target we still allow it to overshoot up to the max before dropping
// frames. This hack should be improved.
int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate_kbps);
max_bitrate_kbps = std::min(codec_.maxBitrate, target_bitrate_kbps);
target_bitrate_kbps = tl0_bitrate;
} else if (num_spatial_streams == 1) {
max_bitrate_kbps = codec_.maxBitrate;
} else {
max_bitrate_kbps = codec_.simulcastStream[simulcast_id].maxBitrate;
}
std::vector<uint32_t> tl_allocation;
if (num_temporal_streams == 1) {
tl_allocation.push_back(target_bitrate_kbps);
} else {
if (conference_screenshare_mode) {
tl_allocation = ScreenshareTemporalLayerAllocation(
target_bitrate_kbps, max_bitrate_kbps, framerate, simulcast_id);
} else {
tl_allocation = DefaultTemporalLayerAllocation(
target_bitrate_kbps, max_bitrate_kbps, framerate, simulcast_id);
}
}
RTC_DCHECK_GT(tl_allocation.size(), 0);
RTC_DCHECK_LE(tl_allocation.size(), num_temporal_streams);
uint64_t tl_allocation_sum_kbps = 0;
for (size_t tl_index = 0; tl_index < tl_allocation.size(); ++tl_index) {
uint32_t layer_rate_kbps = tl_allocation[tl_index];
if (layer_rate_kbps > 0) {
allocated_bitrates_bps->SetBitrate(simulcast_id, tl_index,
layer_rate_kbps * 1000);
}
tl_allocation_sum_kbps += layer_rate_kbps;
}
RTC_DCHECK_LE(tl_allocation_sum_kbps, expected_allocated_bitrate_kbps);
}
}
std::vector<uint32_t> SimulcastRateAllocator::DefaultTemporalLayerAllocation(
int bitrate_kbps,
int max_bitrate_kbps,
int framerate,
int simulcast_id) const {
const size_t num_temporal_layers = NumTemporalStreams(simulcast_id);
std::vector<uint32_t> bitrates;
for (size_t i = 0; i < num_temporal_layers; ++i) {
float layer_bitrate =
bitrate_kbps * kLayerRateAllocation[num_temporal_layers - 1][i];
bitrates.push_back(static_cast<uint32_t>(layer_bitrate + 0.5));
}
// Allocation table is of aggregates, transform to individual rates.
uint32_t sum = 0;
for (size_t i = 0; i < num_temporal_layers; ++i) {
uint32_t layer_bitrate = bitrates[i];
RTC_DCHECK_LE(sum, bitrates[i]);
bitrates[i] -= sum;
sum = layer_bitrate;
if (sum >= static_cast<uint32_t>(bitrate_kbps)) {
// Sum adds up; any subsequent layers will be 0.
bitrates.resize(i + 1);
break;
}
}
return bitrates;
}
std::vector<uint32_t>
SimulcastRateAllocator::ScreenshareTemporalLayerAllocation(
int bitrate_kbps,
int max_bitrate_kbps,
int framerate,
int simulcast_id) const {
if (simulcast_id > 0) {
return DefaultTemporalLayerAllocation(bitrate_kbps, max_bitrate_kbps,
framerate, simulcast_id);
}
std::vector<uint32_t> allocation;
allocation.push_back(bitrate_kbps);
if (max_bitrate_kbps > bitrate_kbps)
allocation.push_back(max_bitrate_kbps - bitrate_kbps);
return allocation;
}
const VideoCodec& webrtc::SimulcastRateAllocator::GetCodec() const {
return codec_;
}
int SimulcastRateAllocator::NumTemporalStreams(size_t simulcast_id) const {
return std::max<uint8_t>(
1,
codec_.codecType == kVideoCodecVP8 && codec_.numberOfSimulcastStreams == 0
? codec_.VP8().numberOfTemporalLayers
: codec_.simulcastStream[simulcast_id].numberOfTemporalLayers);
}
} // namespace webrtc

View File

@ -1,70 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
#include <stdint.h>
#include <map>
#include <memory>
#include <vector>
#include "api/video_codecs/video_encoder.h"
#include "common_types.h" // NOLINT(build/include)
#include "common_video/include/video_bitrate_allocator.h"
#include "rtc_base/constructormagic.h"
namespace webrtc {
// Ratio allocation between temporal streams:
// Values as required for the VP8 codec (accumulating).
static const float
kLayerRateAllocation[kMaxSimulcastStreams][kMaxTemporalStreams] = {
{1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
{0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
{0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
{0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
};
class SimulcastRateAllocator : public VideoBitrateAllocator {
public:
explicit SimulcastRateAllocator(const VideoCodec& codec);
VideoBitrateAllocation GetAllocation(uint32_t total_bitrate_bps,
uint32_t framerate) override;
const VideoCodec& GetCodec() const;
private:
void DistributeAllocationToSimulcastLayers(
uint32_t total_bitrate_bps,
VideoBitrateAllocation* allocated_bitrates_bps) const;
void DistributeAllocationToTemporalLayers(
uint32_t framerate,
VideoBitrateAllocation* allocated_bitrates_bps) const;
std::vector<uint32_t> DefaultTemporalLayerAllocation(int bitrate_kbps,
int max_bitrate_kbps,
int framerate,
int simulcast_id) const;
std::vector<uint32_t> ScreenshareTemporalLayerAllocation(
int bitrate_kbps,
int max_bitrate_kbps,
int framerate,
int simulcast_id) const;
int NumTemporalStreams(size_t simulcast_id) const;
const VideoCodec codec_;
RTC_DISALLOW_COPY_AND_ASSIGN(SimulcastRateAllocator);
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_

View File

@ -8,15 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
#include "test/gmock.h"
#include "test/gtest.h"

View File

@ -1,829 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
#include <algorithm>
#include <map>
#include <memory>
#include <vector>
#include "api/video_codecs/sdp_video_format.h"
#include "common_video/include/video_frame.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/checks.h"
#include "test/gtest.h"
using ::testing::_;
using ::testing::AllOf;
using ::testing::Field;
using ::testing::Return;
namespace webrtc {
namespace test {
namespace {
const int kDefaultWidth = 1280;
const int kDefaultHeight = 720;
const int kNumberOfSimulcastStreams = 3;
const int kColorY = 66;
const int kColorU = 22;
const int kColorV = 33;
const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
const int kNoTemporalLayerProfile[3] = {0, 0, 0};
template <typename T>
void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
expected_values[0] = value0;
expected_values[1] = value1;
expected_values[2] = value2;
}
enum PlaneType {
kYPlane = 0,
kUPlane = 1,
kVPlane = 2,
kNumOfPlanes = 3,
};
} // namespace
class SimulcastTestFixtureImpl::TestEncodedImageCallback
: public EncodedImageCallback {
public:
TestEncodedImageCallback() {
memset(temporal_layer_, -1, sizeof(temporal_layer_));
memset(layer_sync_, false, sizeof(layer_sync_));
}
~TestEncodedImageCallback() {
delete[] encoded_key_frame_._buffer;
delete[] encoded_frame_._buffer;
}
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
uint16_t simulcast_idx = 0;
bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
if (is_vp8) {
simulcast_idx = codec_specific_info->codecSpecific.VP8.simulcastIdx;
} else {
simulcast_idx = codec_specific_info->codecSpecific.H264.simulcast_idx;
}
// Only store the base layer.
if (simulcast_idx) {
if (encoded_image._frameType == kVideoFrameKey) {
delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
encoded_key_frame_._size = encoded_image._size;
encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
encoded_image._length);
} else {
delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size];
encoded_frame_._size = encoded_image._size;
encoded_frame_._length = encoded_image._length;
memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length);
}
}
if (is_vp8) {
layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.layerSync;
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.temporalIdx;
}
return Result(Result::OK, encoded_image._timeStamp);
}
// This method only makes sense for VP8.
void GetLastEncodedFrameInfo(int* temporal_layer,
bool* layer_sync,
int stream) {
*temporal_layer = temporal_layer_[stream];
*layer_sync = layer_sync_[stream];
}
void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
*encoded_key_frame = encoded_key_frame_;
}
void GetLastEncodedFrame(EncodedImage* encoded_frame) {
*encoded_frame = encoded_frame_;
}
private:
EncodedImage encoded_key_frame_;
EncodedImage encoded_frame_;
int temporal_layer_[kNumberOfSimulcastStreams];
bool layer_sync_[kNumberOfSimulcastStreams];
};
class SimulcastTestFixtureImpl::TestDecodedImageCallback
: public DecodedImageCallback {
public:
TestDecodedImageCallback() : decoded_frames_(0) {}
int32_t Decoded(VideoFrame& decoded_image) override {
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
decoded_image.video_frame_buffer()->ToI420();
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
}
// TODO(mikhal): Verify the difference between U,V and the original.
for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
}
decoded_frames_++;
return 0;
}
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
RTC_NOTREACHED();
return -1;
}
void Decoded(VideoFrame& decoded_image,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) override {
Decoded(decoded_image);
}
int DecodedFrames() { return decoded_frames_; }
private:
int decoded_frames_;
};
namespace {
void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
for (int i = 0; i < height; i++, data += stride) {
// Setting allocated area to zero - setting only image size to
// requested values - will make it easier to distinguish between image
// size and frame size (accounting for stride).
memset(data, value, width);
memset(data + width, 0, stride - width);
}
}
// Fills in an I420Buffer from |plane_colors|.
void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
int plane_colors[kNumOfPlanes]) {
SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
buffer->height(), buffer->StrideY());
SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
buffer->ChromaHeight(), buffer->StrideU());
SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
buffer->ChromaHeight(), buffer->StrideV());
}
void ConfigureStream(int width,
int height,
int max_bitrate,
int min_bitrate,
int target_bitrate,
SimulcastStream* stream,
int num_temporal_layers) {
assert(stream);
stream->width = width;
stream->height = height;
stream->maxBitrate = max_bitrate;
stream->minBitrate = min_bitrate;
stream->targetBitrate = target_bitrate;
if (num_temporal_layers >= 0) {
stream->numberOfTemporalLayers = num_temporal_layers;
}
stream->qpMax = 45;
stream->active = true;
}
} // namespace
void SimulcastTestFixtureImpl::DefaultSettings(
VideoCodec* settings,
const int* temporal_layer_profile,
VideoCodecType codec_type) {
RTC_CHECK(settings);
memset(settings, 0, sizeof(VideoCodec));
settings->codecType = codec_type;
// 96 to 127 dynamic payload types for video codecs
settings->plType = 120;
settings->startBitrate = 300;
settings->minBitrate = 30;
settings->maxBitrate = 0;
settings->maxFramerate = 30;
settings->width = kDefaultWidth;
settings->height = kDefaultHeight;
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
settings->active = true;
ASSERT_EQ(3, kNumberOfSimulcastStreams);
settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
kDefaultOutlierFrameSizePercent};
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
kMinBitrates[0], kTargetBitrates[0],
&settings->simulcastStream[0], temporal_layer_profile[0]);
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
kMinBitrates[1], kTargetBitrates[1],
&settings->simulcastStream[1], temporal_layer_profile[1]);
ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
kMinBitrates[2], kTargetBitrates[2],
&settings->simulcastStream[2], temporal_layer_profile[2]);
if (codec_type == kVideoCodecVP8) {
settings->VP8()->denoisingOn = true;
settings->VP8()->automaticResizeOn = false;
settings->VP8()->frameDroppingOn = true;
settings->VP8()->keyFrameInterval = 3000;
} else {
settings->H264()->frameDroppingOn = true;
settings->H264()->keyFrameInterval = 3000;
}
}
SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
std::unique_ptr<VideoEncoderFactory> encoder_factory,
std::unique_ptr<VideoDecoderFactory> decoder_factory,
SdpVideoFormat video_format)
: codec_type_(PayloadStringToCodecType(video_format.name)) {
encoder_ = encoder_factory->CreateVideoEncoder(video_format);
decoder_ = decoder_factory->CreateVideoDecoder(video_format);
SetUpCodec(codec_type_ == kVideoCodecVP8 ? kDefaultTemporalLayerProfile
: kNoTemporalLayerProfile);
}
SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
encoder_->Release();
decoder_->Release();
}
void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
SetUpRateAllocator();
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
input_buffer_->InitializeData();
input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
0 /* timestamp_us */));
}
void SimulcastTestFixtureImpl::SetUpRateAllocator() {
rate_allocator_.reset(new SimulcastRateAllocator(settings_));
}
void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
encoder_->SetRateAllocation(
rate_allocator_->GetAllocation(bitrate_kbps * 1000, fps), fps);
}
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
const std::vector<bool> active_streams) {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
UpdateActiveStreams(active_streams);
// Set sufficient bitrate for all streams so we can test active without
// bitrate being an issue.
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
ExpectStreams(kVideoFrameKey, active_streams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, active_streams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::UpdateActiveStreams(
const std::vector<bool> active_streams) {
ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
for (size_t i = 0; i < active_streams.size(); ++i) {
settings_.simulcastStream[i].active = active_streams[i];
}
// Re initialize the allocator and encoder with the new settings.
// TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
// reconfiguration of the allocator and encoder. When the video bitrate
// allocator has support for updating active streams without a
// reinitialization, we can just call that here instead.
SetUpRateAllocator();
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
}
void SimulcastTestFixtureImpl::ExpectStreams(
FrameType frame_type,
const std::vector<bool> expected_streams_active) {
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
kNumberOfSimulcastStreams);
if (expected_streams_active[0]) {
EXPECT_CALL(
encoder_callback_,
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
_, _))
.Times(1)
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
}
if (expected_streams_active[1]) {
EXPECT_CALL(
encoder_callback_,
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
_, _))
.Times(1)
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
}
if (expected_streams_active[2]) {
EXPECT_CALL(encoder_callback_,
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
_, _))
.Times(1)
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
}
}
void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type,
int expected_video_streams) {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
for (int i = 0; i < expected_video_streams; ++i) {
expected_streams_active[i] = true;
}
ExpectStreams(frame_type, expected_streams_active);
}
void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
TestEncodedImageCallback* encoder_callback,
const int* expected_temporal_idx,
const bool* expected_layer_sync,
int num_spatial_layers) {
int temporal_layer = -1;
bool layer_sync = false;
for (int i = 0; i < num_spatial_layers; i++) {
encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
EXPECT_EQ(expected_layer_sync[i], layer_sync);
}
}
// We currently expect all active streams to generate a key frame even though
// a key frame was only requested for some of them.
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
SetRates(kMaxBitrates[2], 30); // To get all three streams.
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
frame_types[0] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
frame_types[1] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
frame_types[2] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
// We should always encode the base layer.
SetRates(kMinBitrates[0] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
// We have just enough to get only the first stream and padding for two.
SetRates(kMinBitrates[0], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
// We are just below limit of sending second stream, so we should get
// the first stream maxed out (at |maxBitrate|), and padding for two.
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
// We have just enough to send two streams, so padding for one stream.
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestSendAllStreams() {
// We have just enough to send all streams.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestDisablingStreams() {
// We should get three media streams.
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
ExpectStreams(kVideoFrameDelta, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// We should only get the first stream and padding for two.
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// We don't have enough bitrate for the thumbnail stream, but we should get
// it anyway with current configuration.
SetRates(kTargetBitrates[0] - 1, 30);
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// We should get all three streams.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestActiveStreams() {
// All streams on.
RunActiveStreamsTest({true, true, true});
// All streams off.
RunActiveStreamsTest({false, false, false});
// Low stream off.
RunActiveStreamsTest({false, true, true});
// Middle stream off.
RunActiveStreamsTest({true, false, true});
// High stream off.
RunActiveStreamsTest({true, true, false});
// Only low stream turned on.
RunActiveStreamsTest({true, false, false});
// Only middle stream turned on.
RunActiveStreamsTest({false, true, false});
// Only high stream turned on.
RunActiveStreamsTest({false, false, true});
}
void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
const int* temporal_layer_profile = nullptr;
// Disable all streams except the last and set the bitrate of the last to
// 100 kbps. This verifies the way GTP switches to screenshare mode.
if (codec_type_ == kVideoCodecVP8) {
settings_.VP8()->numberOfTemporalLayers = 1;
temporal_layer_profile = kDefaultTemporalLayerProfile;
} else {
temporal_layer_profile = kNoTemporalLayerProfile;
}
settings_.maxBitrate = 100;
settings_.startBitrate = 100;
settings_.width = width;
settings_.height = height;
for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
settings_.simulcastStream[i].maxBitrate = 0;
settings_.simulcastStream[i].width = settings_.width;
settings_.simulcastStream[i].height = settings_.height;
settings_.simulcastStream[i].numberOfTemporalLayers = 1;
}
// Setting input image to new resolution.
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
input_buffer_->InitializeData();
input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
0 /* timestamp_us */));
// The for loop above did not set the bitrate of the highest layer.
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
0;
// The highest layer has to correspond to the non-simulcast resolution.
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
settings_.width;
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
settings_.height;
SetUpRateAllocator();
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
// Encode one frame and verify.
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
EXPECT_CALL(
encoder_callback_,
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
Field(&EncodedImage::_encodedWidth, width),
Field(&EncodedImage::_encodedHeight, height)),
_, _))
.Times(1)
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
// Switch back.
DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
// Start at the lowest bitrate for enabling base stream.
settings_.startBitrate = kMinBitrates[0];
SetUpRateAllocator();
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
SetRates(settings_.startBitrate, 30);
ExpectStreams(kVideoFrameKey, 1);
// Resize |input_frame_| to the new resolution.
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
input_buffer_->InitializeData();
input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
0 /* timestamp_us */));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
}
void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
SwitchingToOneStream(1024, 768);
}
void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
SwitchingToOneStream(1023, 769);
}
void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
SwitchingToOneStream(4, 4);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
// temporal_layer id and layer_sync is expected for all streams.
void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
EXPECT_EQ(codec_type_, kVideoCodecVP8);
TestEncodedImageCallback encoder_callback;
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
SetRates(kMaxBitrates[2], 30); // To get all three streams.
int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
// 1 temporal layer for highest resolution.
// For this profile, we expect the temporal index pattern to be:
// 1st stream: 0, 2, 1, 2, ....
// 2nd stream: 0, 1, 0, 1, ...
// 3rd stream: -1, -1, -1, -1, ....
// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
// should always have temporal layer idx set to kNoTemporalIdx = -1.
// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
// TODO(marpan): Although this seems safe for now, we should fix this.
void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
EXPECT_EQ(codec_type_, kVideoCodecVP8);
int temporal_layer_profile[3] = {3, 2, 1};
SetUpCodec(temporal_layer_profile);
TestEncodedImageCallback encoder_callback;
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
SetRates(kMaxBitrates[2], 30); // To get all three streams.
int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
TestEncodedImageCallback encoder_callback;
TestDecodedImageCallback decoder_callback;
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
SetRates(kMaxBitrates[2], 30); // To get all three streams.
// Setting two (possibly) problematic use cases for stride:
// 1. stride > width 2. stride_y != stride_uv/2
int stride_y = kDefaultWidth + 20;
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
stride_uv, stride_uv);
input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
0 /* timestamp_us */));
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
CreateImage(input_buffer_, plane_offset);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
// Change color.
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EncodedImage encoded_frame;
// Only encoding one frame - so will be a key frame.
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL, 0));
encoder_callback.GetLastEncodedFrame(&encoded_frame);
decoder_->Decode(encoded_frame, false, NULL, 0);
EXPECT_EQ(2, decoder_callback.DecodedFrames());
}
} // namespace test
} // namespace webrtc

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
#include <memory>
#include <vector>
#include "api/test/simulcast_test_fixture.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "common_types.h" // NOLINT(build/include)
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "modules/video_coding/include/mock/mock_video_codec_interface.h"
namespace webrtc {
namespace test {
class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
public:
SimulcastTestFixtureImpl(
std::unique_ptr<VideoEncoderFactory> encoder_factory,
std::unique_ptr<VideoDecoderFactory> decoder_factory,
SdpVideoFormat video_format);
~SimulcastTestFixtureImpl() final;
// Implements SimulcastTestFixture.
void TestKeyFrameRequestsOnAllStreams() override;
void TestPaddingAllStreams() override;
void TestPaddingTwoStreams() override;
void TestPaddingTwoStreamsOneMaxedOut() override;
void TestPaddingOneStream() override;
void TestPaddingOneStreamTwoMaxedOut() override;
void TestSendAllStreams() override;
void TestDisablingStreams() override;
void TestActiveStreams() override;
void TestSwitchingToOneStream() override;
void TestSwitchingToOneOddStream() override;
void TestSwitchingToOneSmallStream() override;
void TestSpatioTemporalLayers333PatternEncoder() override;
void TestSpatioTemporalLayers321PatternEncoder() override;
void TestStrideEncodeDecode() override;
static void DefaultSettings(VideoCodec* settings,
const int* temporal_layer_profile,
VideoCodecType codec_type);
private:
class TestEncodedImageCallback;
class TestDecodedImageCallback;
void SetUpCodec(const int* temporal_layer_profile);
void SetUpRateAllocator();
void SetRates(uint32_t bitrate_kbps, uint32_t fps);
void RunActiveStreamsTest(const std::vector<bool> active_streams);
void UpdateActiveStreams(const std::vector<bool> active_streams);
void ExpectStreams(FrameType frame_type,
const std::vector<bool> expected_streams_active);
void ExpectStreams(FrameType frame_type, int expected_video_streams);
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
TestEncodedImageCallback* encoder_callback,
const int* expected_temporal_idx,
const bool* expected_layer_sync,
int num_spatial_layers);
void SwitchingToOneStream(int width, int height);
std::unique_ptr<VideoEncoder> encoder_;
MockEncodedImageCallback encoder_callback_;
std::unique_ptr<VideoDecoder> decoder_;
MockDecodedImageCallback decoder_callback_;
VideoCodec settings_;
rtc::scoped_refptr<I420Buffer> input_buffer_;
std::unique_ptr<VideoFrame> input_frame_;
std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
VideoCodecType codec_type_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_

View File

@ -1,65 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/utility/simulcast_utility.h"
namespace webrtc {
uint32_t SimulcastUtility::SumStreamMaxBitrate(int streams,
const VideoCodec& codec) {
uint32_t bitrate_sum = 0;
for (int i = 0; i < streams; ++i) {
bitrate_sum += codec.simulcastStream[i].maxBitrate;
}
return bitrate_sum;
}
int SimulcastUtility::NumberOfSimulcastStreams(const VideoCodec& codec) {
int streams =
codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
if (simulcast_max_bitrate == 0) {
streams = 1;
}
return streams;
}
bool SimulcastUtility::ValidSimulcastResolutions(const VideoCodec& codec,
int num_streams) {
if (codec.width != codec.simulcastStream[num_streams - 1].width ||
codec.height != codec.simulcastStream[num_streams - 1].height) {
return false;
}
for (int i = 0; i < num_streams; ++i) {
if (codec.width * codec.simulcastStream[i].height !=
codec.height * codec.simulcastStream[i].width) {
return false;
}
}
for (int i = 1; i < num_streams; ++i) {
if (codec.simulcastStream[i].width !=
codec.simulcastStream[i - 1].width * 2) {
return false;
}
}
return true;
}
bool SimulcastUtility::ValidSimulcastTemporalLayers(const VideoCodec& codec,
int num_streams) {
for (int i = 0; i < num_streams - 1; ++i) {
if (codec.simulcastStream[i].numberOfTemporalLayers !=
codec.simulcastStream[i + 1].numberOfTemporalLayers)
return false;
}
return true;
}
} // namespace webrtc

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
#include "api/video_codecs/video_encoder.h"
namespace webrtc {
class SimulcastUtility {
public:
static uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec);
static int NumberOfSimulcastStreams(const VideoCodec& codec);
static bool ValidSimulcastResolutions(const VideoCodec& codec,
int num_streams);
static bool ValidSimulcastTemporalLayers(const VideoCodec& codec,
int num_streams);
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_