Revert of Reuse allocated encoders in SimulcastEncoderAdapter. (patchset #15 id:320001 of https://codereview.webrtc.org/2830793005/ )

Reason for revert:
Breaks Chrome tests.

Original issue's description:
> Reuse allocated encoders in SimulcastEncoderAdapter.
>
> Prior to this change, the SimulcastEncoderAdapter would destroy and create
> encoders whenever it is being reinitialized. After this change, the
> SimulcastEncoderAdapter will cache the already allocated encoders, and reuse
> them after reinitialization.
>
> This change will help in reducing the number of PictureID "jumps" that have
> been seen around encoder reinitialization.
>
> TESTED=AppRTCMobile, Chrome desktop, and internal app, with forced encoder reinits every 30 frames and https://codereview.webrtc.org/2833493003/ applied.
> BUG=webrtc:7475
>
> Review-Url: https://codereview.webrtc.org/2830793005
> Cr-Commit-Position: refs/heads/master@{#18215}
> Committed: 0b8bfb9d98

TBR=stefan@webrtc.org,noahric@chromium.org,glaznev@webrtc.org,sprang@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:7475

Review-Url: https://codereview.webrtc.org/2893003002
Cr-Commit-Position: refs/heads/master@{#18216}
This commit is contained in:
brandtr
2017-05-19 10:33:03 -07:00
committed by Commit bot
parent 0b8bfb9d98
commit 56e119e2e8
7 changed files with 72 additions and 620 deletions

View File

@ -62,7 +62,7 @@ bool ValidSimulcastResolutions(const webrtc::VideoCodec& codec,
}
int VerifyCodec(const webrtc::VideoCodec* inst) {
if (inst == nullptr) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->maxFramerate < 1) {
@ -127,49 +127,36 @@ class TemporalLayersFactoryAdapter : public webrtc::TemporalLayersFactory {
namespace webrtc {
SimulcastEncoderAdapter::SimulcastEncoderAdapter(VideoEncoderFactory* factory)
: inited_(0),
factory_(factory),
: factory_(factory),
encoded_complete_callback_(nullptr),
implementation_name_("SimulcastEncoderAdapter") {
// The adapter is typically created on the worker thread, but operated on
// the encoder task queue.
encoder_queue_.Detach();
memset(&codec_, 0, sizeof(webrtc::VideoCodec));
}
SimulcastEncoderAdapter::~SimulcastEncoderAdapter() {
RTC_DCHECK(!Initialized());
DestroyStoredEncoders();
Release();
}
int SimulcastEncoderAdapter::Release() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
// TODO(pbos): Keep the last encoder instance but call ::Release() on it, then
// re-use this instance in ::InitEncode(). This means that changing
// resolutions doesn't require reallocation of the first encoder, but only
// reinitialization, which makes sense. Then Destroy this instance instead in
// ~SimulcastEncoderAdapter().
while (!streaminfos_.empty()) {
VideoEncoder* encoder = streaminfos_.back().encoder;
EncodedImageCallback* callback = streaminfos_.back().callback;
encoder->Release();
// Even though it seems very unlikely, there are no guarantees that the
// encoder will not call back after being Release()'d. Therefore, we disable
// the callbacks here.
encoder->RegisterEncodeCompleteCallback(nullptr);
streaminfos_.pop_back(); // Deletes callback adapter.
stored_encoders_.push(encoder);
factory_->Destroy(encoder);
delete callback;
streaminfos_.pop_back();
}
// It's legal to move the encoder to another queue now.
encoder_queue_.Detach();
rtc::AtomicOps::ReleaseStore(&inited_, 0);
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t max_payload_size) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
if (number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@ -185,7 +172,6 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
}
int number_of_streams = NumberOfStreams(*inst);
RTC_DCHECK_LE(number_of_streams, kMaxSimulcastStreams);
const bool doing_simulcast = (number_of_streams > 1);
if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
@ -216,7 +202,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
start_bitrate_kbps =
std::max(codec_.simulcastStream[i].minBitrate, start_bitrate_kbps);
bool highest_resolution_stream = (i == (number_of_streams - 1));
PopulateStreamCodec(codec_, i, start_bitrate_kbps,
PopulateStreamCodec(&codec_, i, start_bitrate_kbps,
highest_resolution_stream, &stream_codec);
}
TemporalLayersFactoryAdapter tl_factory_adapter(i,
@ -228,17 +214,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
stream_codec.qpMax = kDefaultMaxQp;
}
// If an existing encoder instance exists, reuse it.
// TODO(brandtr): Set initial RTP state (e.g., picture_id/tl0_pic_idx) here,
// when we start storing that state outside the encoder wrappers.
VideoEncoder* encoder;
if (!stored_encoders_.empty()) {
encoder = stored_encoders_.top();
stored_encoders_.pop();
} else {
encoder = factory_->Create();
}
VideoEncoder* encoder = factory_->Create();
ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
if (ret < 0) {
// Explicitly destroy the current encoder; because we haven't registered a
@ -247,30 +223,21 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
Release();
return ret;
}
std::unique_ptr<EncodedImageCallback> callback(
new AdapterEncodedImageCallback(this, i));
encoder->RegisterEncodeCompleteCallback(callback.get());
streaminfos_.emplace_back(encoder, std::move(callback), stream_codec.width,
stream_codec.height, start_bitrate_kbps > 0);
if (i != 0) {
EncodedImageCallback* callback = new AdapterEncodedImageCallback(this, i);
encoder->RegisterEncodeCompleteCallback(callback);
streaminfos_.push_back(StreamInfo(encoder, callback, stream_codec.width,
stream_codec.height,
start_bitrate_kbps > 0));
if (i != 0)
implementation_name += ", ";
}
implementation_name += streaminfos_[i].encoder->ImplementationName();
}
if (doing_simulcast) {
implementation_name_ =
"SimulcastEncoderAdapter (" + implementation_name + ")";
} else {
implementation_name_ = implementation_name;
}
// To save memory, don't store encoders that we don't use.
DestroyStoredEncoders();
rtc::AtomicOps::ReleaseStore(&inited_, 1);
return WEBRTC_VIDEO_CODEC_OK;
}
@ -278,12 +245,10 @@ int SimulcastEncoderAdapter::Encode(
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
if (!Initialized()) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoded_complete_callback_ == nullptr) {
if (encoded_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@ -310,9 +275,8 @@ int SimulcastEncoderAdapter::Encode(
int src_height = input_image.height();
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
// Don't encode frames in resolutions that we don't intend to send.
if (!streaminfos_[stream_idx].send_stream) {
if (!streaminfos_[stream_idx].send_stream)
continue;
}
std::vector<FrameType> stream_frame_types;
if (send_key_frame) {
@ -374,14 +338,12 @@ int SimulcastEncoderAdapter::Encode(
int SimulcastEncoderAdapter::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
encoded_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
int64_t rtt) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
streaminfos_[stream_idx].encoder->SetChannelParameters(packet_loss, rtt);
}
@ -390,26 +352,20 @@ int SimulcastEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
int SimulcastEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t new_framerate) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
if (!Initialized()) {
if (!Initialized())
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (new_framerate < 1) {
if (new_framerate < 1)
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_.maxBitrate > 0 && bitrate.get_sum_kbps() > codec_.maxBitrate) {
if (codec_.maxBitrate > 0 && bitrate.get_sum_kbps() > codec_.maxBitrate)
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (bitrate.get_sum_bps() > 0) {
// Make sure the bitrate fits the configured min bitrates. 0 is a special
// value that means paused, though, so leave it alone.
if (bitrate.get_sum_kbps() < codec_.minBitrate) {
if (bitrate.get_sum_kbps() < codec_.minBitrate)
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_.numberOfSimulcastStreams > 0 &&
bitrate.get_sum_kbps() < codec_.simulcastStream[0].minBitrate) {
@ -432,9 +388,8 @@ int SimulcastEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
// Slice the temporal layers out of the full allocation and pass it on to
// the encoder handling the current simulcast stream.
BitrateAllocation stream_allocation;
for (int i = 0; i < kMaxTemporalStreams; ++i) {
for (int i = 0; i < kMaxTemporalStreams; ++i)
stream_allocation.SetBitrate(0, i, bitrate.GetBitrate(stream_idx, i));
}
streaminfos_[stream_idx].encoder->SetRateAllocation(stream_allocation,
new_framerate);
}
@ -442,8 +397,6 @@ int SimulcastEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
return WEBRTC_VIDEO_CODEC_OK;
}
// TODO(brandtr): Add task checker to this member function, when all encoder
// callbacks are coming in on the encoder queue.
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
@ -459,25 +412,24 @@ EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
}
void SimulcastEncoderAdapter::PopulateStreamCodec(
const webrtc::VideoCodec& inst,
const webrtc::VideoCodec* inst,
int stream_index,
uint32_t start_bitrate_kbps,
bool highest_resolution_stream,
webrtc::VideoCodec* stream_codec) {
*stream_codec = inst;
*stream_codec = *inst;
// Stream specific settings.
stream_codec->VP8()->numberOfTemporalLayers =
inst.simulcastStream[stream_index].numberOfTemporalLayers;
inst->simulcastStream[stream_index].numberOfTemporalLayers;
stream_codec->numberOfSimulcastStreams = 0;
stream_codec->width = inst.simulcastStream[stream_index].width;
stream_codec->height = inst.simulcastStream[stream_index].height;
stream_codec->maxBitrate = inst.simulcastStream[stream_index].maxBitrate;
stream_codec->minBitrate = inst.simulcastStream[stream_index].minBitrate;
stream_codec->qpMax = inst.simulcastStream[stream_index].qpMax;
stream_codec->width = inst->simulcastStream[stream_index].width;
stream_codec->height = inst->simulcastStream[stream_index].height;
stream_codec->maxBitrate = inst->simulcastStream[stream_index].maxBitrate;
stream_codec->minBitrate = inst->simulcastStream[stream_index].minBitrate;
stream_codec->qpMax = inst->simulcastStream[stream_index].qpMax;
// Settings that are based on stream/resolution.
const bool lowest_resolution_stream = (stream_index == 0);
if (lowest_resolution_stream) {
if (stream_index == 0) {
// Settings for lowest spatial resolutions.
stream_codec->qpMax = kLowestResMaxQp;
}
@ -497,42 +449,28 @@ void SimulcastEncoderAdapter::PopulateStreamCodec(
}
bool SimulcastEncoderAdapter::Initialized() const {
return rtc::AtomicOps::AcquireLoad(&inited_) == 1;
}
void SimulcastEncoderAdapter::DestroyStoredEncoders() {
while (!stored_encoders_.empty()) {
VideoEncoder* encoder = stored_encoders_.top();
factory_->Destroy(encoder);
stored_encoders_.pop();
}
return !streaminfos_.empty();
}
bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
// We should not be calling this method before streaminfos_ are configured.
RTC_DCHECK(!streaminfos_.empty());
for (const auto& streaminfo : streaminfos_) {
if (!streaminfo.encoder->SupportsNativeHandle()) {
if (!streaminfo.encoder->SupportsNativeHandle())
return false;
}
}
return true;
}
VideoEncoder::ScalingSettings SimulcastEncoderAdapter::GetScalingSettings()
const {
// TODO(brandtr): Investigate why the sequence checker below fails on mac.
// RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
// Turn off quality scaling for simulcast.
if (!Initialized() || NumberOfStreams(codec_) != 1) {
if (!Initialized() || NumberOfStreams(codec_) != 1)
return VideoEncoder::ScalingSettings(false);
}
return streaminfos_[0].encoder->GetScalingSettings();
}
const char* SimulcastEncoderAdapter::ImplementationName() const {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
return implementation_name_.c_str();
}

View File

@ -13,21 +13,15 @@
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
#include <memory>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "webrtc/base/atomicops.h"
#include "webrtc/base/sequenced_task_checker.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
namespace webrtc {
class SimulcastRateAllocator;
// TODO(brandtr): Remove this class and replace its use with a
// WebRtcVideoEncoderFactory.
class VideoEncoderFactory {
public:
virtual VideoEncoder* Create() = 0;
@ -37,16 +31,14 @@ class VideoEncoderFactory {
// SimulcastEncoderAdapter implements simulcast support by creating multiple
// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
// The object is created and destroyed on the worker thread, but all public
// interfaces should be called from the encoder task queue.
// All the public interfaces are expected to be called from the same thread,
// e.g the encoder thread.
class SimulcastEncoderAdapter : public VP8Encoder {
public:
// TODO(brandtr): Make it clear that the ownership of |factory| is transferred
// by only accepting a std::unique_ptr<VideoEncoderFactory> here.
explicit SimulcastEncoderAdapter(VideoEncoderFactory* factory);
virtual ~SimulcastEncoderAdapter();
// Implements VideoEncoder.
// Implements VideoEncoder
int Release() override;
int InitEncode(const VideoCodec* inst,
int number_of_cores,
@ -75,50 +67,47 @@ class SimulcastEncoderAdapter : public VP8Encoder {
private:
struct StreamInfo {
StreamInfo()
: encoder(NULL),
callback(NULL),
width(0),
height(0),
key_frame_request(false),
send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
std::unique_ptr<EncodedImageCallback> callback,
EncodedImageCallback* callback,
uint16_t width,
uint16_t height,
bool send_stream)
: encoder(encoder),
callback(std::move(callback)),
callback(callback),
width(width),
height(height),
key_frame_request(false),
send_stream(send_stream) {}
// Deleted by SimulcastEncoderAdapter::DestroyStoredEncoders().
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
std::unique_ptr<EncodedImageCallback> callback;
EncodedImageCallback* callback;
uint16_t width;
uint16_t height;
bool key_frame_request;
bool send_stream;
};
// Populate the codec settings for each simulcast stream.
static void PopulateStreamCodec(const webrtc::VideoCodec& inst,
int stream_index,
uint32_t start_bitrate_kbps,
bool highest_resolution_stream,
webrtc::VideoCodec* stream_codec);
// Populate the codec settings for each stream.
void PopulateStreamCodec(const webrtc::VideoCodec* inst,
int stream_index,
uint32_t start_bitrate_kbps,
bool highest_resolution_stream,
webrtc::VideoCodec* stream_codec);
bool Initialized() const;
void DestroyStoredEncoders();
volatile int inited_; // Accessed atomically.
const std::unique_ptr<VideoEncoderFactory> factory_;
std::unique_ptr<VideoEncoderFactory> factory_;
VideoCodec codec_;
std::vector<StreamInfo> streaminfos_;
EncodedImageCallback* encoded_complete_callback_;
std::string implementation_name_;
// Used for checking the single-threaded access of the encoder interface.
rtc::SequencedTaskChecker encoder_queue_;
// Store encoders in between calls to Release and InitEncode, so they don't
// have to be recreated. Remaining encoders are destroyed by the destructor.
std::stack<VideoEncoder*> stored_encoders_;
};
} // namespace webrtc

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <array>
#include <memory>
#include <vector>
@ -118,7 +117,7 @@ class MockVideoEncoder : public VideoEncoder {
return 0;
}
MOCK_METHOD0(Release, int32_t());
int32_t Release() /* override */ { return 0; }
int32_t SetRateAllocation(const BitrateAllocation& bitrate_allocation,
uint32_t framerate) {
@ -143,7 +142,7 @@ class MockVideoEncoder : public VideoEncoder {
image._encodedHeight = height;
CodecSpecificInfo codec_specific_info;
memset(&codec_specific_info, 0, sizeof(codec_specific_info));
callback_->OnEncodedImage(image, &codec_specific_info, nullptr);
callback_->OnEncodedImage(image, &codec_specific_info, NULL);
}
void set_supports_native_handle(bool enabled) {
@ -244,11 +243,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
last_encoded_image_width_(-1),
last_encoded_image_height_(-1),
last_encoded_image_simulcast_index_(-1) {}
virtual ~TestSimulcastEncoderAdapterFake() {
if (adapter_) {
adapter_->Release();
}
}
virtual ~TestSimulcastEncoderAdapterFake() {}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
@ -372,17 +367,6 @@ TEST_F(TestSimulcastEncoderAdapterFake, InitEncode) {
VerifyCodecSettings();
}
TEST_F(TestSimulcastEncoderAdapterFake, ReleaseWithoutInitEncode) {
EXPECT_EQ(0, adapter_->Release());
}
TEST_F(TestSimulcastEncoderAdapterFake, Reinit) {
SetupCodec();
EXPECT_EQ(0, adapter_->Release());
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
}
TEST_F(TestSimulcastEncoderAdapterFake, SetChannelParameters) {
SetupCodec();
const uint32_t packetLoss = 5;
@ -402,9 +386,8 @@ TEST_F(TestSimulcastEncoderAdapterFake, EncodedCallbackForDifferentEncoders) {
// resolutions, to test that the adapter forwards on the correct resolution
// and simulcast index values, going only off the encoder that generates the
// image.
std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
ASSERT_EQ(3u, encoders.size());
encoders[0]->SendEncodedImage(1152, 704);
EXPECT_EQ(3u, helper_->factory()->encoders().size());
helper_->factory()->encoders()[0]->SendEncodedImage(1152, 704);
int width;
int height;
int simulcast_index;
@ -413,224 +396,19 @@ TEST_F(TestSimulcastEncoderAdapterFake, EncodedCallbackForDifferentEncoders) {
EXPECT_EQ(704, height);
EXPECT_EQ(0, simulcast_index);
encoders[1]->SendEncodedImage(300, 620);
helper_->factory()->encoders()[1]->SendEncodedImage(300, 620);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(300, width);
EXPECT_EQ(620, height);
EXPECT_EQ(1, simulcast_index);
encoders[2]->SendEncodedImage(120, 240);
helper_->factory()->encoders()[2]->SendEncodedImage(120, 240);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(120, width);
EXPECT_EQ(240, height);
EXPECT_EQ(2, simulcast_index);
}
// This test verifies that the underlying encoders are reused, when the adapter
// is reinited with different number of simulcast streams. It further checks
// that the allocated encoders are reused in the same order as before, starting
// with the lowest stream.
TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
// Set up common settings for three streams.
TestVp8Simulcast::DefaultSettings(
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
rate_allocator_.reset(new SimulcastRateAllocator(codec_, nullptr));
tl_factory_.SetListener(rate_allocator_.get());
codec_.VP8()->tl_factory = &tl_factory_;
adapter_->RegisterEncodeCompleteCallback(this);
// Input data.
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
VideoFrame input_frame(buffer, 100, 1000, kVideoRotation_180);
std::vector<FrameType> frame_types;
// Encode with three streams.
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
VerifyCodecSettings();
std::vector<MockVideoEncoder*> original_encoders =
helper_->factory()->encoders();
ASSERT_EQ(3u, original_encoders.size());
EXPECT_CALL(*original_encoders[0], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[1], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[2], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[1], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[2], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_EQ(0, adapter_->Release());
// Encode with two streams.
codec_.width /= 2;
codec_.height /= 2;
codec_.numberOfSimulcastStreams = 2;
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
std::vector<MockVideoEncoder*> new_encoders = helper_->factory()->encoders();
ASSERT_EQ(2u, new_encoders.size());
ASSERT_EQ(original_encoders[0], new_encoders[0]);
EXPECT_CALL(*original_encoders[0], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
ASSERT_EQ(original_encoders[1], new_encoders[1]);
EXPECT_CALL(*original_encoders[1], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(2, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[1], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_EQ(0, adapter_->Release());
// Encode with single stream.
codec_.width /= 2;
codec_.height /= 2;
codec_.numberOfSimulcastStreams = 1;
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
new_encoders = helper_->factory()->encoders();
ASSERT_EQ(1u, new_encoders.size());
ASSERT_EQ(original_encoders[0], new_encoders[0]);
EXPECT_CALL(*original_encoders[0], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(1, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_EQ(0, adapter_->Release());
// Encode with three streams, again.
codec_.width *= 4;
codec_.height *= 4;
codec_.numberOfSimulcastStreams = 3;
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
new_encoders = helper_->factory()->encoders();
ASSERT_EQ(3u, new_encoders.size());
// The first encoder is reused.
ASSERT_EQ(original_encoders[0], new_encoders[0]);
EXPECT_CALL(*original_encoders[0], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
// The second and third encoders are new.
EXPECT_CALL(*new_encoders[1], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*new_encoders[2], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*new_encoders[1], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*new_encoders[2], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_EQ(0, adapter_->Release());
}
TEST_F(TestSimulcastEncoderAdapterFake, DoesNotLeakEncoders) {
SetupCodec();
VerifyCodecSettings();
EXPECT_EQ(3u, helper_->factory()->encoders().size());
// The adapter should destroy all encoders it has allocated. Since
// |helper_->factory()| is owned by |adapter_|, however, we need to rely on
// lsan to find leaks here.
EXPECT_EQ(0, adapter_->Release());
adapter_.reset();
}
// This test verifies that an adapter reinit with the same codec settings as
// before does not change the underlying encoder codec settings.
TEST_F(TestSimulcastEncoderAdapterFake, ReinitDoesNotReorderEncoderSettings) {
SetupCodec();
VerifyCodecSettings();
// Capture current codec settings.
std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
ASSERT_EQ(3u, encoders.size());
std::array<VideoCodec, 3> codecs_before;
for (int i = 0; i < 3; ++i) {
codecs_before[i] = encoders[i]->codec();
}
// Reinitialize and verify that the new codec settings are the same.
EXPECT_EQ(0, adapter_->Release());
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
for (int i = 0; i < 3; ++i) {
const VideoCodec& codec_before = codecs_before[i];
const VideoCodec& codec_after = encoders[i]->codec();
// webrtc::VideoCodec does not implement operator==.
EXPECT_EQ(codec_before.codecType, codec_after.codecType);
EXPECT_EQ(codec_before.plType, codec_after.plType);
EXPECT_EQ(codec_before.width, codec_after.width);
EXPECT_EQ(codec_before.height, codec_after.height);
EXPECT_EQ(codec_before.startBitrate, codec_after.startBitrate);
EXPECT_EQ(codec_before.maxBitrate, codec_after.maxBitrate);
EXPECT_EQ(codec_before.minBitrate, codec_after.minBitrate);
EXPECT_EQ(codec_before.targetBitrate, codec_after.targetBitrate);
EXPECT_EQ(codec_before.maxFramerate, codec_after.maxFramerate);
EXPECT_EQ(codec_before.qpMax, codec_after.qpMax);
EXPECT_EQ(codec_before.numberOfSimulcastStreams,
codec_after.numberOfSimulcastStreams);
EXPECT_EQ(codec_before.mode, codec_after.mode);
EXPECT_EQ(codec_before.expect_encode_from_texture,
codec_after.expect_encode_from_texture);
}
}
// This test is similar to the one above, except that it tests the simulcastIdx
// from the CodecSpecificInfo that is connected to an encoded frame. The
// PayloadRouter demuxes the incoming encoded frames on different RTP modules
// using the simulcastIdx, so it's important that there is no corresponding
// encoder reordering in between adapter reinits as this would lead to PictureID
// discontinuities.
TEST_F(TestSimulcastEncoderAdapterFake, ReinitDoesNotReorderFrameSimulcastIdx) {
SetupCodec();
adapter_->SetRateAllocation(rate_allocator_->GetAllocation(1200, 30), 30);
VerifyCodecSettings();
// Send frames on all streams.
std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
ASSERT_EQ(3u, encoders.size());
encoders[0]->SendEncodedImage(1152, 704);
int width;
int height;
int simulcast_index;
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(0, simulcast_index);
encoders[1]->SendEncodedImage(300, 620);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(1, simulcast_index);
encoders[2]->SendEncodedImage(120, 240);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(2, simulcast_index);
// Reinitialize.
EXPECT_EQ(0, adapter_->Release());
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->SetRateAllocation(rate_allocator_->GetAllocation(1200, 30), 30);
// Verify that the same encoder sends out frames on the same simulcast index.
encoders[0]->SendEncodedImage(1152, 704);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(0, simulcast_index);
encoders[1]->SendEncodedImage(300, 620);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(1, simulcast_index);
encoders[2]->SendEncodedImage(120, 240);
EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
EXPECT_EQ(2, simulcast_index);
}
TEST_F(TestSimulcastEncoderAdapterFake, SupportsNativeHandleForSingleStreams) {
TestVp8Simulcast::DefaultSettings(
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
@ -689,7 +467,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) {
adapter_->ImplementationName());
// Single streams should not expose "SimulcastEncoderAdapter" in name.
EXPECT_EQ(0, adapter_->Release());
adapter_->Release();
codec_.numberOfSimulcastStreams = 1;
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->RegisterEncodeCompleteCallback(this);
@ -750,7 +528,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
for (MockVideoEncoder* encoder : helper_->factory()->encoders())
EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _, _)).Times(1);
std::vector<FrameType> frame_types(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, NULL, &frame_types));
}
TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {