Reland "Inform VideoEncoder of negotiated capabilities"

This is a reland of 11dfff0878c949f2e19d95a0ddc209cdad94b3b4

Now that I am sure that WebRTC code is not calling the obsolete
versions, I will just remove the NOT_REACHED and call the
new version from the old ones, so as not to trip up downstream
projects.

Original change's description:
> Inform VideoEncoder of negotiated capabilities
>
> After this CL lands, an announcement will be made to
> discuss-webrtc about the deprecation of one version
> of InitEncode().
>
> Bug: webrtc:10720
> Change-Id: Ib992af0272bbb16ae16ef7e69491f365702d179e
> Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/140884
> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
> Reviewed-by: Sami Kalliomäki <sakal@webrtc.org>
> Reviewed-by: Erik Språng <sprang@webrtc.org>
> Commit-Queue: Elad Alon <eladalon@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#28224}

TBR=sakal@webrtc.org,kwiberg@webrtc.org,sprang@webrtc.org

Bug: webrtc:10720
Change-Id: I46c69e45c190805c07f7e51acbe277d7eebd1600
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/141412
Commit-Queue: Elad Alon <eladalon@webrtc.org>
Reviewed-by: Elad Alon <eladalon@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28236}
This commit is contained in:
Elad Alon
2019-06-11 14:57:57 +02:00
committed by Commit Bot
parent 95e0a607f6
commit 370f93a34a
59 changed files with 444 additions and 319 deletions

View File

@ -191,8 +191,7 @@ H264EncoderImpl::~H264EncoderImpl() {
}
int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
int32_t number_of_cores,
size_t max_payload_size) {
const VideoEncoder::Settings& settings) {
ReportInit();
if (!inst || inst->codecType != kVideoCodecH264) {
ReportError();
@ -226,8 +225,8 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
pictures_.resize(number_of_streams);
configurations_.resize(number_of_streams);
number_of_cores_ = number_of_cores;
max_payload_size_ = max_payload_size;
number_of_cores_ = settings.number_of_cores;
max_payload_size_ = settings.max_payload_size;
codec_ = *inst;
// Code expects simulcastStream resolutions to be correct, make sure they are

View File

@ -25,6 +25,7 @@
#include <vector>
#include "api/video/i420_buffer.h"
#include "api/video_codecs/video_encoder.h"
#include "common_video/h264/h264_bitstream_parser.h"
#include "modules/video_coding/codecs/h264/include/h264.h"
#include "modules/video_coding/utility/quality_scaler.h"
@ -56,7 +57,7 @@ class H264EncoderImpl : public H264Encoder {
explicit H264EncoderImpl(const cricket::VideoCodec& codec);
~H264EncoderImpl() override;
// |max_payload_size| is ignored.
// |settings.max_payload_size| is ignored.
// The following members of |codec_settings| are used. The rest are ignored.
// - codecType (must be kVideoCodecH264)
// - targetBitrate
@ -64,8 +65,7 @@ class H264EncoderImpl : public H264Encoder {
// - width
// - height
int32_t InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) override;
const VideoEncoder::Settings& settings) override;
int32_t Release() override;
int32_t RegisterEncodeCompleteCallback(

View File

@ -11,6 +11,7 @@
#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
#include "api/video_codecs/video_encoder.h"
#include "test/gtest.h"
namespace webrtc {
@ -20,6 +21,11 @@ namespace {
const int kMaxPayloadSize = 1024;
const int kNumCores = 1;
const VideoEncoder::Capabilities kCapabilities(false);
const VideoEncoder::Settings kSettings(kCapabilities,
kNumCores,
kMaxPayloadSize);
void SetDefaultSettings(VideoCodec* codec_settings) {
codec_settings->codecType = kVideoCodecH264;
codec_settings->maxFramerate = 60;
@ -37,7 +43,7 @@ TEST(H264EncoderImplTest, CanInitializeWithDefaultParameters) {
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, kNumCores, kMaxPayloadSize));
encoder.InitEncode(&codec_settings, kSettings));
EXPECT_EQ(H264PacketizationMode::NonInterleaved,
encoder.PacketizationModeForTesting());
}
@ -49,7 +55,7 @@ TEST(H264EncoderImplTest, CanInitializeWithNonInterleavedModeExplicitly) {
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, kNumCores, kMaxPayloadSize));
encoder.InitEncode(&codec_settings, kSettings));
EXPECT_EQ(H264PacketizationMode::NonInterleaved,
encoder.PacketizationModeForTesting());
}
@ -61,7 +67,7 @@ TEST(H264EncoderImplTest, CanInitializeWithSingleNalUnitModeExplicitly) {
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, kNumCores, kMaxPayloadSize));
encoder.InitEncode(&codec_settings, kSettings));
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
encoder.PacketizationModeForTesting());
}
@ -73,7 +79,7 @@ TEST(H264EncoderImplTest, CanInitializeWithRemovedParameter) {
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, kNumCores, kMaxPayloadSize));
encoder.InitEncode(&codec_settings, kSettings));
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
encoder.PacketizationModeForTesting());
}

View File

@ -40,8 +40,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
// Implements VideoEncoder
int InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t max_payload_size) override;
const VideoEncoder::Settings& settings) override;
int Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;

View File

@ -13,6 +13,7 @@
#include <cstring>
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_encoder.h"
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/include/module_common_types.h"
@ -60,9 +61,9 @@ MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
Release();
}
int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t max_payload_size) {
int MultiplexEncoderAdapter::InitEncode(
const VideoCodec* inst,
const VideoEncoder::Settings& settings) {
const size_t buffer_size =
CalcBufferSize(VideoType::kI420, inst->width, inst->height);
multiplex_dummy_planes_.resize(buffer_size);
@ -71,23 +72,23 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
0x80);
RTC_DCHECK_EQ(kVideoCodecMultiplex, inst->codecType);
VideoCodec settings = *inst;
settings.codecType = PayloadStringToCodecType(associated_format_.name);
VideoCodec video_codec = *inst;
video_codec.codecType = PayloadStringToCodecType(associated_format_.name);
// Take over the key frame interval at adapter level, because we have to
// sync the key frames for both sub-encoders.
switch (settings.codecType) {
switch (video_codec.codecType) {
case kVideoCodecVP8:
key_frame_interval_ = settings.VP8()->keyFrameInterval;
settings.VP8()->keyFrameInterval = 0;
key_frame_interval_ = video_codec.VP8()->keyFrameInterval;
video_codec.VP8()->keyFrameInterval = 0;
break;
case kVideoCodecVP9:
key_frame_interval_ = settings.VP9()->keyFrameInterval;
settings.VP9()->keyFrameInterval = 0;
key_frame_interval_ = video_codec.VP9()->keyFrameInterval;
video_codec.VP9()->keyFrameInterval = 0;
break;
case kVideoCodecH264:
key_frame_interval_ = settings.H264()->keyFrameInterval;
settings.H264()->keyFrameInterval = 0;
key_frame_interval_ = video_codec.H264()->keyFrameInterval;
video_codec.H264()->keyFrameInterval = 0;
break;
default:
break;
@ -101,8 +102,7 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
std::unique_ptr<VideoEncoder> encoder =
factory_->CreateVideoEncoder(associated_format_);
const int rv =
encoder->InitEncode(&settings, number_of_cores, max_payload_size);
const int rv = encoder->InitEncode(&video_codec, settings);
if (rv) {
RTC_LOG(LS_ERROR) << "Failed to create multiplex codec index " << i;
return rv;

View File

@ -10,6 +10,7 @@
#include <utility>
#include "api/video_codecs/video_encoder.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
#include "modules/video_coding/include/video_error_codes.h"
@ -25,6 +26,9 @@ static const int kHeight = 144; // Height of the input image.
static const int kMaxFramerate = 30; // Arbitrary value.
namespace webrtc {
namespace {
const VideoEncoder::Capabilities kCapabilities(false);
}
EncodedImageCallback::Result
VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
@ -78,8 +82,10 @@ void VideoCodecUnitTest::SetUp() {
decoder_->RegisterDecodeCompleteCallback(&decode_complete_callback_);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(
&codec_settings_,
VideoEncoder::Settings(kCapabilities, 1 /* number of cores */,
0 /* max payload size (unused) */)));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->InitDecode(&codec_settings_, 1 /* number of cores */));
}

View File

@ -28,6 +28,7 @@ namespace webrtc {
namespace test {
namespace {
const VideoEncoder::Capabilities kCapabilities(false);
int32_t InitEncoder(VideoCodecType codec_type, VideoEncoder* encoder) {
VideoCodec codec;
@ -36,8 +37,9 @@ int32_t InitEncoder(VideoCodecType codec_type, VideoEncoder* encoder) {
codec.height = 480;
codec.maxFramerate = 30;
RTC_CHECK(encoder);
return encoder->InitEncode(&codec, 1 /* number_of_cores */,
1200 /* max_payload_size */);
return encoder->InitEncode(
&codec, VideoEncoder::Settings(kCapabilities, 1 /* number_of_cores */,
1200 /* max_payload_size */));
}
int32_t InitDecoder(VideoCodecType codec_type, VideoDecoder* decoder) {

View File

@ -24,6 +24,7 @@
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "common_video/h264/h264_common.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
@ -45,6 +46,8 @@ namespace {
const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000;
const int kMaxBufferedInputFrames = 20;
const VideoEncoder::Capabilities kCapabilities(false);
size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
const VideoCodecTestFixture::Config& config) {
if (config.codec_settings.codecType != kVideoCodecH264)
@ -207,9 +210,11 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
WEBRTC_VIDEO_CODEC_OK);
// Initialize codecs so that they are ready to receive frames.
RTC_CHECK_EQ(encoder_->InitEncode(&config_.codec_settings,
static_cast<int>(config_.NumberOfCores()),
config_.max_payload_size_bytes),
RTC_CHECK_EQ(encoder_->InitEncode(
&config_.codec_settings,
VideoEncoder::Settings(
kCapabilities, static_cast<int>(config_.NumberOfCores()),
config_.max_payload_size_bytes)),
WEBRTC_VIDEO_CODEC_OK);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {

View File

@ -66,7 +66,7 @@ class VideoProcessorTest : public ::testing::Test {
}
void ExpectInit() {
EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
EXPECT_CALL(encoder_mock_, InitEncode(_, _)).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
EXPECT_CALL(*decoder_mock_, InitDecode(_, _)).Times(1);
EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);

View File

@ -450,9 +450,10 @@ void LibvpxVp8Encoder::SetStreamState(bool send_stream, int stream_idx) {
send_stream_[stream_idx] = send_stream;
}
// TODO(eladalon): s/inst/codec_settings/g.
// TODO(bugs.webrtc.org/10720): Pass |capabilities| to frame buffer controller.
int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t /*maxPayloadSize */) {
const VideoEncoder::Settings& settings) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@ -466,7 +467,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
if (inst->width < 1 || inst->height < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (number_of_cores < 1) {
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) {
@ -492,7 +493,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
}
RTC_DCHECK(frame_buffer_controller_);
number_of_cores_ = number_of_cores;
number_of_cores_ = settings.number_of_cores;
timestamp_ = 0;
codec_ = *inst;
@ -611,7 +612,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
// Determine number of threads based on the image size and #cores.
// TODO(fbarchard): Consider number of Simulcast layers.
vpx_configs_[0].g_threads = NumberOfThreads(
vpx_configs_[0].g_w, vpx_configs_[0].g_h, number_of_cores);
vpx_configs_[0].g_w, vpx_configs_[0].g_h, settings.number_of_cores);
// Creating a wrapper to the image - setting image data to NULL.
// Actual pointer will be set in encode. Setting align to 1, as it

View File

@ -46,8 +46,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
int Release() override;
int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) override;
const VideoEncoder::Settings& settings) override;
int Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override;

View File

@ -14,6 +14,7 @@
#include "api/test/mock_video_decoder.h"
#include "api/test/mock_video_encoder.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_temporal_layers.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "common_video/test/utilities.h"
@ -51,6 +52,11 @@ constexpr int kDefaultMinPixelsPerFrame = 320 * 180;
constexpr int kWidth = 172;
constexpr int kHeight = 144;
constexpr float kFramerateFps = 30;
const VideoEncoder::Capabilities kCapabilities(false);
const VideoEncoder::Settings kSettings(kCapabilities,
kNumCores,
kMaxPayloadSize);
} // namespace
class TestVp8Impl : public VideoCodecUnitTest {
@ -117,7 +123,8 @@ TEST_F(TestVp8Impl, SetRates) {
auto* const vpx = new NiceMock<MockLibvpxVp8Interface>();
LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings_, 1, 1000));
encoder.InitEncode(&codec_settings_,
VideoEncoder::Settings(kCapabilities, 1, 1000)));
const uint32_t kBitrateBps = 300000;
VideoBitrateAllocation bitrate_allocation;
@ -143,7 +150,8 @@ TEST_F(TestVp8Impl, DynamicSetRates) {
auto* const vpx = new NiceMock<MockLibvpxVp8Interface>();
LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings_, 1, 1000));
encoder.InitEncode(&codec_settings_,
VideoEncoder::Settings(kCapabilities, 1, 1000)));
const uint32_t kBitrateBps = 300000;
VideoEncoder::RateControlParameters rate_settings;
@ -199,7 +207,7 @@ TEST_F(TestVp8Impl, DynamicSetRates) {
TEST_F(TestVp8Impl, EncodeFrameAndRelease) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
@ -257,7 +265,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[1] = {kWidth / 2, kHeight / 2, 30, 3,
4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
codec_settings_.numberOfSimulcastStreams = 3;
// Resolutions are not in ascending order.
codec_settings_.simulcastStream[0] = {
@ -267,7 +275,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {kWidth, kHeight, 30, 1,
4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Resolutions are not in ascending order.
codec_settings_.simulcastStream[0] = {kWidth, kHeight, kFramerateFps, 1,
4000, 3000, 2000, 80};
@ -276,7 +284,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {
kWidth - 1, kHeight - 1, kFramerateFps, 1, 4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Temporal layers do not match.
codec_settings_.simulcastStream[0] = {
kWidth / 4, kHeight / 4, kFramerateFps, 1, 4000, 3000, 2000, 80};
@ -285,7 +293,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {kWidth, kHeight, kFramerateFps, 3,
4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Resolutions do not match codec config.
codec_settings_.simulcastStream[0] = {
kWidth / 4 + 1, kHeight / 4 + 1, kFramerateFps, 1, 4000, 3000, 2000, 80};
@ -294,7 +302,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {
kWidth + 4, kHeight + 4, kFramerateFps, 1, 4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Everything fine: scaling by 2, top resolution matches video, temporal
// settings are the same for all layers.
codec_settings_.simulcastStream[0] = {
@ -304,7 +312,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {kWidth, kHeight, kFramerateFps, 1,
4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Everything fine: custom scaling, top resolution matches video, temporal
// settings are the same for all layers.
codec_settings_.simulcastStream[0] = {
@ -314,7 +322,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
codec_settings_.simulcastStream[2] = {kWidth, kHeight, kFramerateFps, 1,
4000, 3000, 2000, 80};
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
}
#if defined(WEBRTC_ANDROID)
@ -378,7 +386,7 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
codec_settings_.VP8()->numberOfTemporalLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Temporal layer 0.
EncodedImage encoded_frame;
@ -399,7 +407,7 @@ TEST_F(TestVp8Impl, ScalingDisabledIfAutomaticResizeOff) {
codec_settings_.VP8()->frameDroppingOn = true;
codec_settings_.VP8()->automaticResizeOn = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoEncoder::ScalingSettings settings =
encoder_->GetEncoderInfo().scaling_settings;
@ -410,7 +418,7 @@ TEST_F(TestVp8Impl, ScalingEnabledIfAutomaticResizeOn) {
codec_settings_.VP8()->frameDroppingOn = true;
codec_settings_.VP8()->automaticResizeOn = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoEncoder::ScalingSettings settings =
encoder_->GetEncoderInfo().scaling_settings;
@ -442,7 +450,7 @@ TEST_F(TestVp8Impl, DontDropKeyframes) {
/* num_squares = */ absl::optional<int>(300));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
// Bitrate only enough for TL0.
@ -480,7 +488,8 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
return img;
}));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings_, 1, 1000));
encoder.InitEncode(&codec_settings_,
VideoEncoder::Settings(kCapabilities, 1, 1000)));
MockEncodedImageCallback callback;
encoder.RegisterEncodeCompleteCallback(&callback);
@ -512,7 +521,7 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationTwoTemporalLayers) {
codec_settings_.simulcastStream[0].maxBitrate = 100;
codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
@ -530,7 +539,7 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationThreeTemporalLayers) {
codec_settings_.simulcastStream[0].maxBitrate = 100;
codec_settings_.simulcastStream[0].numberOfTemporalLayers = 3;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
@ -553,7 +562,7 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationScreenshareLayers) {
kLegacyScreenshareTl1BitrateKbps;
codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
// Expect empty vector, since this mode doesn't have a fixed framerate.
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
@ -581,7 +590,7 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationSimulcastVideo) {
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
encoder_->InitEncode(&codec_settings_, kSettings));
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);

View File

@ -10,6 +10,7 @@
#include "api/video/color_space.h"
#include "api/video/i420_buffer.h"
#include "api/video_codecs/video_encoder.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "media/base/vp9_profile.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
@ -31,6 +32,11 @@ using FramerateFractions =
namespace {
const size_t kWidth = 1280;
const size_t kHeight = 720;
const VideoEncoder::Capabilities kCapabilities(false);
const VideoEncoder::Settings kSettings(kCapabilities,
/*number_of_cores=*/1,
/*max_payload_size=*/0);
} // namespace
class TestVp9Impl : public VideoCodecUnitTest {
@ -199,8 +205,7 @@ TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) {
// Tl0PidIdx is only used in non-flexible mode.
codec_settings_.VP9()->flexibleMode = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Temporal layer 0.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -229,8 +234,7 @@ TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) {
TEST_F(TestVp9Impl, EncoderWith2SpatialLayers) {
codec_settings_.VP9()->numberOfSpatialLayers = 2;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -273,8 +277,7 @@ TEST_F(TestVp9Impl, EncoderExplicitLayering) {
codec_settings_.spatialLayers[1].maxFramerate = codec_settings_.maxFramerate;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Ensure it fails if scaling factors in horz/vert dimentions are different.
codec_settings_.spatialLayers[0].width = codec_settings_.width;
@ -282,8 +285,7 @@ TEST_F(TestVp9Impl, EncoderExplicitLayering) {
codec_settings_.spatialLayers[1].width = codec_settings_.width;
codec_settings_.spatialLayers[1].height = codec_settings_.height;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Ensure it fails if scaling factor is not power of two.
codec_settings_.spatialLayers[0].width = codec_settings_.width / 3;
@ -291,8 +293,7 @@ TEST_F(TestVp9Impl, EncoderExplicitLayering) {
codec_settings_.spatialLayers[1].width = codec_settings_.width;
codec_settings_.spatialLayers[1].height = codec_settings_.height;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
}
TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
@ -309,8 +310,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
codec_settings_.VP9()->frameDroppingOn = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
@ -357,8 +357,7 @@ TEST_F(TestVp9Impl, EndOfPicture) {
ConfigureSvc(num_spatial_layers);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Encode both base and upper layers. Check that end-of-superframe flag is
// set on upper layer frame but not on base layer frame.
@ -385,8 +384,7 @@ TEST_F(TestVp9Impl, EndOfPicture) {
encoder_->SetRates(VideoEncoder::RateControlParameters(
bitrate_allocation, codec_settings_.maxFramerate));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
@ -415,8 +413,7 @@ TEST_F(TestVp9Impl, InterLayerPred) {
for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
codec_settings_.VP9()->interLayerPred = inter_layer_pred;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
encoder_->SetRates(VideoEncoder::RateControlParameters(
bitrate_allocation, codec_settings_.maxFramerate));
@ -485,8 +482,7 @@ TEST_F(TestVp9Impl,
for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
codec_settings_.VP9()->interLayerPred = inter_layer_pred;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
@ -544,8 +540,7 @@ TEST_F(TestVp9Impl,
for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
codec_settings_.VP9()->interLayerPred = inter_layer_pred;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
@ -595,8 +590,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
@ -670,8 +664,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
@ -761,8 +754,7 @@ TEST_F(TestVp9Impl, EnablingNewLayerIsDelayedInScreenshareAndAddsSsInfo) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
codec_settings_.VP9()->flexibleMode = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Enable all but the last layer.
VideoBitrateAllocation bitrate_allocation;
@ -830,8 +822,7 @@ TEST_F(TestVp9Impl, ScreenshareFrameDropping) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
codec_settings_.VP9()->flexibleMode = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Enable all but the last layer.
VideoBitrateAllocation bitrate_allocation;
@ -924,8 +915,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
codec_settings_.VP9()->flexibleMode = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// All layers are enabled from the start.
VideoBitrateAllocation bitrate_allocation;
@ -1007,8 +997,7 @@ TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) {
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
codec_settings_.VP9()->flexibleMode = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// Enable all the layers.
VideoBitrateAllocation bitrate_allocation;
@ -1069,8 +1058,7 @@ TEST_F(TestVp9Impl,
codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(
@ -1089,8 +1077,7 @@ TEST_F(TestVp9Impl,
TEST_F(TestVp9Impl, ScalabilityStructureIsAvailableInFlexibleMode) {
codec_settings_.VP9()->flexibleMode = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr));
@ -1123,8 +1110,7 @@ TEST_F(TestVp9Impl, EncoderInfoFpsAllocation) {
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
@ -1161,8 +1147,7 @@ TEST_F(TestVp9Impl, EncoderInfoFpsAllocationFlexibleMode) {
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
// No temporal layers allowed when spatial layers have different fps targets.
FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
@ -1195,8 +1180,7 @@ TEST_P(TestVp9ImplWithLayering, FlexibleMode) {
codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers_;
codec_settings_.VP9()->numberOfTemporalLayers = num_temporal_layers_;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
GofInfoVP9 gof;
if (num_temporal_layers_ == 1) {
@ -1234,8 +1218,7 @@ TEST_P(TestVp9ImplWithLayering, ExternalRefControl) {
codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers_;
codec_settings_.VP9()->numberOfTemporalLayers = num_temporal_layers_;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
GofInfoVP9 gof;
if (num_temporal_layers_ == 1) {
@ -1291,8 +1274,7 @@ TEST_F(TestVp9ImplFrameDropping, PreEncodeFrameDropping) {
codec_settings_.maxFramerate = static_cast<uint32_t>(expected_framerate_fps);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
VideoFrame* input_frame = NextInputFrame();
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
@ -1341,8 +1323,7 @@ TEST_F(TestVp9ImplFrameDropping, DifferentFrameratePerSpatialLayer) {
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
0 /* max payload size (unused) */));
encoder_->InitEncode(&codec_settings_, kSettings));
encoder_->SetRates(VideoEncoder::RateControlParameters(
bitrate_allocation, codec_settings_.maxFramerate));

View File

@ -401,9 +401,9 @@ void VP9EncoderImpl::SetRates(const RateControlParameters& parameters) {
return;
}
// TODO(eladalon): s/inst/codec_settings/g.
int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t /*max_payload_size*/) {
const Settings& settings) {
if (inst == nullptr) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@ -417,7 +417,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (inst->width < 1 || inst->height < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (number_of_cores < 1) {
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->VP9().numberOfTemporalLayers > 3) {
@ -526,7 +526,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0;
// Determine number of threads based on the image size and #cores.
config_->g_threads =
NumberOfThreads(config_->g_w, config_->g_h, number_of_cores);
NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores);
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);

View File

@ -21,6 +21,7 @@
#include "modules/video_coding/codecs/vp9/include/vp9.h"
#include "api/video_codecs/video_encoder.h"
#include "media/base/vp9_profile.h"
#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
#include "modules/video_coding/utility/framerate_controller.h"
@ -40,8 +41,7 @@ class VP9EncoderImpl : public VP9Encoder {
int Release() override;
int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) override;
const Settings& settings) override;
int Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override;